From 37baad950f465c134ab6ac68f1b1f0f7d40b9cf1 Mon Sep 17 00:00:00 2001 From: Ashwin Hendre Date: Fri, 22 Nov 2024 11:41:16 +0530 Subject: [PATCH] Create region-zone-sysType hierarchy --- .../asset/cluster/tfvars_20241219120242.go | 1189 +++++++++++++++++ .../asset/cluster/tfvars_20241219121700.go | 1186 ++++++++++++++++ .../platformprovisioncheck_20241219120242.go | 206 +++ .../platformprovisioncheck_20241219121357.go | 203 +++ .../powervs/validation_20241219120242.go | 321 +++++ .../powervs/validation_20241219121130.go | 317 +++++ .../powervs/validation_test_20241219120242.go | 887 ++++++++++++ .../powervs/validation_test_20241219121335.go | 881 ++++++++++++ .../asset/machines/worker_20241219120242.go | 842 ++++++++++++ .../asset/machines/worker_20241219120647.go | 839 ++++++++++++ .../powervs/powervs_regions_20241219120242.go | 324 +++++ .../powervs/powervs_regions_20241219120532.go | 261 ++++ .../powervs/powervs_regions_20241219162541.go | 263 ++++ .../powervs/powervs_regions_20241219162543.go | 263 ++++ pkg/asset/cluster/tfvars.go | 10 + .../installconfig/platformprovisioncheck.go | 5 + pkg/asset/installconfig/powervs/regions.go | 6 +- pkg/asset/installconfig/powervs/validation.go | 25 +- .../installconfig/powervs/validation_test.go | 140 +- pkg/asset/machines/worker.go | 9 + pkg/types/powervs/powervs_regions.go | 188 ++- 21 files changed, 8325 insertions(+), 40 deletions(-) create mode 100644 .history/pkg/asset/cluster/tfvars_20241219120242.go create mode 100644 .history/pkg/asset/cluster/tfvars_20241219121700.go create mode 100644 .history/pkg/asset/installconfig/platformprovisioncheck_20241219120242.go create mode 100644 .history/pkg/asset/installconfig/platformprovisioncheck_20241219121357.go create mode 100644 .history/pkg/asset/installconfig/powervs/validation_20241219120242.go create mode 100644 .history/pkg/asset/installconfig/powervs/validation_20241219121130.go create mode 100644 .history/pkg/asset/installconfig/powervs/validation_test_20241219120242.go create mode 100644 .history/pkg/asset/installconfig/powervs/validation_test_20241219121335.go create mode 100644 .history/pkg/asset/machines/worker_20241219120242.go create mode 100644 .history/pkg/asset/machines/worker_20241219120647.go create mode 100644 .history/pkg/types/powervs/powervs_regions_20241219120242.go create mode 100644 .history/pkg/types/powervs/powervs_regions_20241219120532.go create mode 100644 .history/pkg/types/powervs/powervs_regions_20241219162541.go create mode 100644 .history/pkg/types/powervs/powervs_regions_20241219162543.go diff --git a/.history/pkg/asset/cluster/tfvars_20241219120242.go b/.history/pkg/asset/cluster/tfvars_20241219120242.go new file mode 100644 index 00000000000..be8c9ddfedd --- /dev/null +++ b/.history/pkg/asset/cluster/tfvars_20241219120242.go @@ -0,0 +1,1189 @@ +package cluster + +import ( + "context" + "encoding/json" + "fmt" + "math/rand" + "os" + "path" + "strconv" + "strings" + "time" + + "github.com/IBM/vpc-go-sdk/vpcv1" + igntypes "github.com/coreos/ignition/v2/config/v3_2/types" + coreosarch "github.com/coreos/stream-metadata-go/arch" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/mo" + "sigs.k8s.io/yaml" + + configv1 "github.com/openshift/api/config/v1" + machinev1 "github.com/openshift/api/machine/v1" + machinev1beta1 "github.com/openshift/api/machine/v1beta1" + libvirtprovider "github.com/openshift/cluster-api-provider-libvirt/pkg/apis/libvirtproviderconfig/v1beta1" + ovirtprovider "github.com/openshift/cluster-api-provider-ovirt/pkg/apis/ovirtprovider/v1beta1" + "github.com/openshift/installer/pkg/asset" + "github.com/openshift/installer/pkg/asset/ignition" + "github.com/openshift/installer/pkg/asset/ignition/bootstrap" + baremetalbootstrap "github.com/openshift/installer/pkg/asset/ignition/bootstrap/baremetal" + "github.com/openshift/installer/pkg/asset/ignition/machine" + "github.com/openshift/installer/pkg/asset/installconfig" + awsconfig "github.com/openshift/installer/pkg/asset/installconfig/aws" + aztypes "github.com/openshift/installer/pkg/asset/installconfig/azure" + gcpconfig "github.com/openshift/installer/pkg/asset/installconfig/gcp" + ovirtconfig "github.com/openshift/installer/pkg/asset/installconfig/ovirt" + powervsconfig "github.com/openshift/installer/pkg/asset/installconfig/powervs" + vsphereconfig "github.com/openshift/installer/pkg/asset/installconfig/vsphere" + "github.com/openshift/installer/pkg/asset/machines" + "github.com/openshift/installer/pkg/asset/manifests" + "github.com/openshift/installer/pkg/asset/openshiftinstall" + "github.com/openshift/installer/pkg/asset/rhcos" + rhcospkg "github.com/openshift/installer/pkg/rhcos" + "github.com/openshift/installer/pkg/tfvars" + alibabacloudtfvars "github.com/openshift/installer/pkg/tfvars/alibabacloud" + awstfvars "github.com/openshift/installer/pkg/tfvars/aws" + azuretfvars "github.com/openshift/installer/pkg/tfvars/azure" + baremetaltfvars "github.com/openshift/installer/pkg/tfvars/baremetal" + gcptfvars "github.com/openshift/installer/pkg/tfvars/gcp" + ibmcloudtfvars "github.com/openshift/installer/pkg/tfvars/ibmcloud" + libvirttfvars "github.com/openshift/installer/pkg/tfvars/libvirt" + nutanixtfvars "github.com/openshift/installer/pkg/tfvars/nutanix" + openstacktfvars "github.com/openshift/installer/pkg/tfvars/openstack" + ovirttfvars "github.com/openshift/installer/pkg/tfvars/ovirt" + powervstfvars "github.com/openshift/installer/pkg/tfvars/powervs" + vspheretfvars "github.com/openshift/installer/pkg/tfvars/vsphere" + "github.com/openshift/installer/pkg/types" + "github.com/openshift/installer/pkg/types/alibabacloud" + "github.com/openshift/installer/pkg/types/aws" + "github.com/openshift/installer/pkg/types/azure" + "github.com/openshift/installer/pkg/types/baremetal" + "github.com/openshift/installer/pkg/types/external" + "github.com/openshift/installer/pkg/types/gcp" + "github.com/openshift/installer/pkg/types/ibmcloud" + "github.com/openshift/installer/pkg/types/libvirt" + "github.com/openshift/installer/pkg/types/none" + "github.com/openshift/installer/pkg/types/nutanix" + "github.com/openshift/installer/pkg/types/openstack" + "github.com/openshift/installer/pkg/types/ovirt" + "github.com/openshift/installer/pkg/types/powervs" + "github.com/openshift/installer/pkg/types/vsphere" + ibmcloudprovider "github.com/openshift/machine-api-provider-ibmcloud/pkg/apis/ibmcloudprovider/v1" +) + +const ( + // GCPFirewallPermission is the role/permission to create or skip the creation of + // firewall rules for GCP during an xpn installation. + GCPFirewallPermission = "compute.firewalls.create" + + // TfVarsFileName is the filename for Terraform variables. + TfVarsFileName = "terraform.tfvars.json" + + // TfPlatformVarsFileName is the name for platform-specific + // Terraform variable files. + // + // https://www.terraform.io/docs/configuration/variables.html#variable-files + TfPlatformVarsFileName = "terraform.platform.auto.tfvars.json" + + tfvarsAssetName = "Terraform Variables" +) + +// TerraformVariables depends on InstallConfig, Manifests, +// and Ignition to generate the terrafor.tfvars. +type TerraformVariables struct { + FileList []*asset.File +} + +var _ asset.WritableAsset = (*TerraformVariables)(nil) + +// Name returns the human-friendly name of the asset. +func (t *TerraformVariables) Name() string { + return tfvarsAssetName +} + +// Dependencies returns the dependency of the TerraformVariable +func (t *TerraformVariables) Dependencies() []asset.Asset { + return []asset.Asset{ + &installconfig.ClusterID{}, + &installconfig.InstallConfig{}, + new(rhcos.Image), + new(rhcos.Release), + new(rhcos.BootstrapImage), + &bootstrap.Bootstrap{}, + &machine.Master{}, + &machines.Master{}, + &machines.Worker{}, + &baremetalbootstrap.IronicCreds{}, + &installconfig.PlatformProvisionCheck{}, + &manifests.Manifests{}, + } +} + +// Generate generates the terraform.tfvars file. +func (t *TerraformVariables) Generate(parents asset.Parents) error { + ctx := context.TODO() + clusterID := &installconfig.ClusterID{} + installConfig := &installconfig.InstallConfig{} + bootstrapIgnAsset := &bootstrap.Bootstrap{} + masterIgnAsset := &machine.Master{} + mastersAsset := &machines.Master{} + workersAsset := &machines.Worker{} + manifestsAsset := &manifests.Manifests{} + rhcosImage := new(rhcos.Image) + rhcosRelease := new(rhcos.Release) + rhcosBootstrapImage := new(rhcos.BootstrapImage) + ironicCreds := &baremetalbootstrap.IronicCreds{} + parents.Get(clusterID, installConfig, bootstrapIgnAsset, masterIgnAsset, mastersAsset, workersAsset, manifestsAsset, rhcosImage, rhcosRelease, rhcosBootstrapImage, ironicCreds) + + platform := installConfig.Config.Platform.Name() + switch platform { + case external.Name, none.Name: + return errors.Errorf("cannot create the cluster because %q is a UPI platform", platform) + } + + masterIgn := string(masterIgnAsset.Files()[0].Data) + bootstrapIgn, err := injectInstallInfo(bootstrapIgnAsset.Files()[0].Data) + if err != nil { + return errors.Wrap(err, "unable to inject installation info") + } + + var useIPv4, useIPv6 bool + for _, network := range installConfig.Config.Networking.ServiceNetwork { + if network.IP.To4() != nil { + useIPv4 = true + } else { + useIPv6 = true + } + } + + machineV4CIDRs, machineV6CIDRs := []string{}, []string{} + for _, network := range installConfig.Config.Networking.MachineNetwork { + if network.CIDR.IPNet.IP.To4() != nil { + machineV4CIDRs = append(machineV4CIDRs, network.CIDR.IPNet.String()) + } else { + machineV6CIDRs = append(machineV6CIDRs, network.CIDR.IPNet.String()) + } + } + + masterCount := len(mastersAsset.MachineFiles) + mastersSchedulable := false + for _, f := range manifestsAsset.Files() { + if f.Filename == manifests.SchedulerCfgFilename { + schedulerConfig := configv1.Scheduler{} + err = yaml.Unmarshal(f.Data, &schedulerConfig) + if err != nil { + return errors.Wrapf(err, "failed to unmarshall %s", manifests.SchedulerCfgFilename) + } + mastersSchedulable = schedulerConfig.Spec.MastersSchedulable + break + } + } + + data, err := tfvars.TFVars( + clusterID.InfraID, + installConfig.Config.ClusterDomain(), + installConfig.Config.BaseDomain, + machineV4CIDRs, + machineV6CIDRs, + useIPv4, + useIPv6, + bootstrapIgn, + masterIgn, + masterCount, + mastersSchedulable, + ) + if err != nil { + return errors.Wrap(err, "failed to get Terraform variables") + } + t.FileList = []*asset.File{ + { + Filename: TfVarsFileName, + Data: data, + }, + } + + if masterCount == 0 { + return errors.Errorf("master slice cannot be empty") + } + + switch platform { + case aws.Name: + var vpc string + var privateSubnets []string + var publicSubnets []string + + if len(installConfig.Config.Platform.AWS.Subnets) > 0 { + subnets, err := installConfig.AWS.PrivateSubnets(ctx) + if err != nil { + return err + } + + for id := range subnets { + privateSubnets = append(privateSubnets, id) + } + + subnets, err = installConfig.AWS.PublicSubnets(ctx) + if err != nil { + return err + } + + for id := range subnets { + publicSubnets = append(publicSubnets, id) + } + + vpc, err = installConfig.AWS.VPC(ctx) + if err != nil { + return err + } + } + + sess, err := installConfig.AWS.Session(ctx) + if err != nil { + return err + } + object := "bootstrap.ign" + bucket := fmt.Sprintf("%s-bootstrap", clusterID.InfraID) + url, err := awsconfig.PresignedS3URL(sess, installConfig.Config.Platform.AWS.Region, bucket, object) + if err != nil { + return err + } + masters, err := mastersAsset.Machines() + if err != nil { + return err + } + masterConfigs := make([]*machinev1beta1.AWSMachineProviderConfig, len(masters)) + for i, m := range masters { + masterConfigs[i] = m.Spec.ProviderSpec.Value.Object.(*machinev1beta1.AWSMachineProviderConfig) + } + workers, err := workersAsset.MachineSets() + if err != nil { + return err + } + workerConfigs := make([]*machinev1beta1.AWSMachineProviderConfig, len(workers)) + for i, m := range workers { + workerConfigs[i] = m.Spec.Template.Spec.ProviderSpec.Value.Object.(*machinev1beta1.AWSMachineProviderConfig) + } + osImage := strings.SplitN(string(*rhcosImage), ",", 2) + osImageID := osImage[0] + osImageRegion := installConfig.Config.AWS.Region + if len(osImage) == 2 { + osImageRegion = osImage[1] + } + + workerIAMRoleName := "" + if mp := installConfig.Config.WorkerMachinePool(); mp != nil { + awsMP := &aws.MachinePool{} + awsMP.Set(installConfig.Config.AWS.DefaultMachinePlatform) + awsMP.Set(mp.Platform.AWS) + workerIAMRoleName = awsMP.IAMRole + } + + var securityGroups []string + if mp := installConfig.Config.AWS.DefaultMachinePlatform; mp != nil { + securityGroups = mp.AdditionalSecurityGroupIDs + } + masterIAMRoleName := "" + if mp := installConfig.Config.ControlPlane; mp != nil { + awsMP := &aws.MachinePool{} + awsMP.Set(installConfig.Config.AWS.DefaultMachinePlatform) + awsMP.Set(mp.Platform.AWS) + masterIAMRoleName = awsMP.IAMRole + if len(awsMP.AdditionalSecurityGroupIDs) > 0 { + securityGroups = awsMP.AdditionalSecurityGroupIDs + } + } + + // AWS Zones is used to determine which route table the edge zone will be associated. + allZones, err := installConfig.AWS.AllZones(ctx) + if err != nil { + return err + } + + data, err := awstfvars.TFVars(awstfvars.TFVarsSources{ + VPC: vpc, + PrivateSubnets: privateSubnets, + PublicSubnets: publicSubnets, + AvailabilityZones: allZones, + InternalZone: installConfig.Config.AWS.HostedZone, + InternalZoneRole: installConfig.Config.AWS.HostedZoneRole, + Services: installConfig.Config.AWS.ServiceEndpoints, + Publish: installConfig.Config.Publish, + MasterConfigs: masterConfigs, + WorkerConfigs: workerConfigs, + AMIID: osImageID, + AMIRegion: osImageRegion, + IgnitionBucket: bucket, + IgnitionPresignedURL: url, + AdditionalTrustBundle: installConfig.Config.AdditionalTrustBundle, + MasterIAMRoleName: masterIAMRoleName, + WorkerIAMRoleName: workerIAMRoleName, + Architecture: installConfig.Config.ControlPlane.Architecture, + Proxy: installConfig.Config.Proxy, + PreserveBootstrapIgnition: installConfig.Config.AWS.PreserveBootstrapIgnition, + MasterSecurityGroups: securityGroups, + }) + if err != nil { + return errors.Wrapf(err, "failed to get %s Terraform variables", platform) + } + t.FileList = append(t.FileList, &asset.File{ + Filename: TfPlatformVarsFileName, + Data: data, + }) + case azure.Name: + session, err := installConfig.Azure.Session() + if err != nil { + return err + } + + auth := azuretfvars.Auth{ + SubscriptionID: session.Credentials.SubscriptionID, + ClientID: session.Credentials.ClientID, + ClientSecret: session.Credentials.ClientSecret, + TenantID: session.Credentials.TenantID, + ClientCertificatePath: session.Credentials.ClientCertificatePath, + ClientCertificatePassword: session.Credentials.ClientCertificatePassword, + UseMSI: session.AuthType == aztypes.ManagedIdentityAuth, + } + masters, err := mastersAsset.Machines() + if err != nil { + return err + } + masterConfigs := make([]*machinev1beta1.AzureMachineProviderSpec, len(masters)) + for i, m := range masters { + masterConfigs[i] = m.Spec.ProviderSpec.Value.Object.(*machinev1beta1.AzureMachineProviderSpec) + } + workers, err := workersAsset.MachineSets() + if err != nil { + return err + } + workerConfigs := make([]*machinev1beta1.AzureMachineProviderSpec, len(workers)) + for i, w := range workers { + workerConfigs[i] = w.Spec.Template.Spec.ProviderSpec.Value.Object.(*machinev1beta1.AzureMachineProviderSpec) + } + client := aztypes.NewClient(session) + hyperVGeneration, err := client.GetHyperVGenerationVersion(context.TODO(), masterConfigs[0].VMSize, masterConfigs[0].Location, "") + if err != nil { + return err + } + + preexistingnetwork := installConfig.Config.Azure.VirtualNetwork != "" + + var bootstrapIgnStub, bootstrapIgnURLPlaceholder string + if installConfig.Azure.CloudName == azure.StackCloud { + // Due to the SAS created in Terraform to limit access to bootstrap ignition, we cannot know the URL in advance. + // Instead, we will pass a placeholder string in the ignition to be replaced in TF once the value is known. + bootstrapIgnURLPlaceholder = "BOOTSTRAP_IGNITION_URL_PLACEHOLDER" + shim, err := bootstrap.GenerateIgnitionShimWithCertBundleAndProxy(bootstrapIgnURLPlaceholder, installConfig.Config.AdditionalTrustBundle, installConfig.Config.Proxy) + if err != nil { + return errors.Wrap(err, "failed to create stub Ignition config for bootstrap") + } + bootstrapIgnStub = string(shim) + } + + data, err := azuretfvars.TFVars( + azuretfvars.TFVarsSources{ + Auth: auth, + CloudName: installConfig.Config.Azure.CloudName, + ARMEndpoint: installConfig.Config.Azure.ARMEndpoint, + ResourceGroupName: installConfig.Config.Azure.ResourceGroupName, + BaseDomainResourceGroupName: installConfig.Config.Azure.BaseDomainResourceGroupName, + MasterConfigs: masterConfigs, + WorkerConfigs: workerConfigs, + ImageURL: string(*rhcosImage), + ImageRelease: rhcosRelease.GetAzureReleaseVersion(), + PreexistingNetwork: preexistingnetwork, + Publish: installConfig.Config.Publish, + OutboundType: installConfig.Config.Azure.OutboundType, + BootstrapIgnStub: bootstrapIgnStub, + BootstrapIgnitionURLPlaceholder: bootstrapIgnURLPlaceholder, + HyperVGeneration: hyperVGeneration, + VMArchitecture: installConfig.Config.ControlPlane.Architecture, + InfrastructureName: clusterID.InfraID, + }, + ) + if err != nil { + return errors.Wrapf(err, "failed to get %s Terraform variables", platform) + } + t.FileList = append(t.FileList, &asset.File{ + Filename: TfPlatformVarsFileName, + Data: data, + }) + case gcp.Name: + sess, err := gcpconfig.GetSession(ctx) + if err != nil { + return err + } + + auth := gcptfvars.Auth{ + ProjectID: installConfig.Config.GCP.ProjectID, + NetworkProjectID: installConfig.Config.GCP.NetworkProjectID, + ServiceAccount: string(sess.Credentials.JSON), + } + + client, err := gcpconfig.NewClient(context.Background()) + if err != nil { + return err + } + + // In the case of a shared vpn, the firewall rules should only be created if the user has permissions to do so + createFirewallRules := true + if installConfig.Config.GCP.NetworkProjectID != "" { + permissions, err := client.GetProjectPermissions(context.Background(), installConfig.Config.GCP.NetworkProjectID, []string{ + GCPFirewallPermission, + }) + if err != nil { + return err + } + createFirewallRules = permissions.Has(GCPFirewallPermission) + } + + masters, err := mastersAsset.Machines() + if err != nil { + return err + } + masterConfigs := make([]*machinev1beta1.GCPMachineProviderSpec, len(masters)) + for i, m := range masters { + masterConfigs[i] = m.Spec.ProviderSpec.Value.Object.(*machinev1beta1.GCPMachineProviderSpec) + } + workers, err := workersAsset.MachineSets() + if err != nil { + return err + } + workerConfigs := make([]*machinev1beta1.GCPMachineProviderSpec, len(workers)) + for i, w := range workers { + workerConfigs[i] = w.Spec.Template.Spec.ProviderSpec.Value.Object.(*machinev1beta1.GCPMachineProviderSpec) + } + preexistingnetwork := installConfig.Config.GCP.Network != "" + + // Search the project for a dns zone with the specified base domain. + publicZoneName := "" + if installConfig.Config.Publish == types.ExternalPublishingStrategy { + publicZone, err := client.GetDNSZone(ctx, installConfig.Config.GCP.ProjectID, installConfig.Config.BaseDomain, true) + if err != nil { + return errors.Wrapf(err, "failed to get GCP public zone") + } + publicZoneName = publicZone.Name + } + + privateZoneName := "" + if installConfig.Config.GCP.NetworkProjectID != "" { + privateZone, err := client.GetDNSZone(ctx, installConfig.Config.GCP.ProjectID, installConfig.Config.ClusterDomain(), false) + if err != nil { + return errors.Wrapf(err, "failed to get GCP private zone") + } + if privateZone != nil { + privateZoneName = privateZone.Name + } + } + + archName := coreosarch.RpmArch(string(installConfig.Config.ControlPlane.Architecture)) + st, err := rhcospkg.FetchCoreOSBuild(ctx) + if err != nil { + return err + } + streamArch, err := st.GetArchitecture(archName) + if err != nil { + return err + } + + img := streamArch.Images.Gcp + if img == nil { + return fmt.Errorf("%s: No GCP build found", st.FormatPrefix(archName)) + } + + data, err := gcptfvars.TFVars( + gcptfvars.TFVarsSources{ + Auth: auth, + MasterConfigs: masterConfigs, + WorkerConfigs: workerConfigs, + CreateFirewallRules: createFirewallRules, + PreexistingNetwork: preexistingnetwork, + PublicZoneName: publicZoneName, + PrivateZoneName: privateZoneName, + PublishStrategy: installConfig.Config.Publish, + InfrastructureName: clusterID.InfraID, + }, + ) + if err != nil { + return errors.Wrapf(err, "failed to get %s Terraform variables", platform) + } + t.FileList = append(t.FileList, &asset.File{ + Filename: TfPlatformVarsFileName, + Data: data, + }) + case ibmcloud.Name: + client, err := installConfig.IBMCloud.Client() + if err != nil { + return err + } + auth := ibmcloudtfvars.Auth{ + APIKey: client.GetAPIKey(), + } + + // Get master and worker machine info + masters, err := mastersAsset.Machines() + if err != nil { + return err + } + masterConfigs := make([]*ibmcloudprovider.IBMCloudMachineProviderSpec, len(masters)) + for i, m := range masters { + masterConfigs[i] = m.Spec.ProviderSpec.Value.Object.(*ibmcloudprovider.IBMCloudMachineProviderSpec) + } + workers, err := workersAsset.MachineSets() + if err != nil { + return err + } + workerConfigs := make([]*ibmcloudprovider.IBMCloudMachineProviderSpec, len(workers)) + for i, w := range workers { + workerConfigs[i] = w.Spec.Template.Spec.ProviderSpec.Value.Object.(*ibmcloudprovider.IBMCloudMachineProviderSpec) + } + + // Set existing network (boolean of whether one is being used) + preexistingVPC := installConfig.Config.Platform.IBMCloud.GetVPCName() != "" + + // Set machine pool info + var masterMachinePool ibmcloud.MachinePool + var workerMachinePool ibmcloud.MachinePool + if installConfig.Config.Platform.IBMCloud.DefaultMachinePlatform != nil { + masterMachinePool.Set(installConfig.Config.Platform.IBMCloud.DefaultMachinePlatform) + workerMachinePool.Set(installConfig.Config.Platform.IBMCloud.DefaultMachinePlatform) + } + if installConfig.Config.ControlPlane.Platform.IBMCloud != nil { + masterMachinePool.Set(installConfig.Config.ControlPlane.Platform.IBMCloud) + } + if worker := installConfig.Config.WorkerMachinePool(); worker != nil { + workerMachinePool.Set(worker.Platform.IBMCloud) + } + + // Get master dedicated host info + var masterDedicatedHosts []ibmcloudtfvars.DedicatedHost + for _, dhost := range masterMachinePool.DedicatedHosts { + if dhost.Name != "" { + dh, err := client.GetDedicatedHostByName(ctx, dhost.Name, installConfig.Config.Platform.IBMCloud.Region) + if err != nil { + return err + } + masterDedicatedHosts = append(masterDedicatedHosts, ibmcloudtfvars.DedicatedHost{ + ID: *dh.ID, + }) + } else { + masterDedicatedHosts = append(masterDedicatedHosts, ibmcloudtfvars.DedicatedHost{ + Profile: dhost.Profile, + }) + } + } + + // Get worker dedicated host info + var workerDedicatedHosts []ibmcloudtfvars.DedicatedHost + for _, dhost := range workerMachinePool.DedicatedHosts { + if dhost.Name != "" { + dh, err := client.GetDedicatedHostByName(ctx, dhost.Name, installConfig.Config.Platform.IBMCloud.Region) + if err != nil { + return err + } + workerDedicatedHosts = append(workerDedicatedHosts, ibmcloudtfvars.DedicatedHost{ + ID: *dh.ID, + }) + } else { + workerDedicatedHosts = append(workerDedicatedHosts, ibmcloudtfvars.DedicatedHost{ + Profile: dhost.Profile, + }) + } + } + + var cisCRN, dnsID string + vpcPermitted := false + + if installConfig.Config.Publish == types.InternalPublishingStrategy { + // Get DNSInstanceCRN from InstallConfig metadata + dnsInstance, err := installConfig.IBMCloud.DNSInstance(ctx) + if err != nil { + return err + } + if dnsInstance != nil { + dnsID = dnsInstance.ID + } + // If the VPC already exists and the cluster is Private, check if the VPC is already a Permitted Network on DNS Instance + if preexistingVPC { + vpcPermitted, err = installConfig.IBMCloud.IsVPCPermittedNetwork(ctx, installConfig.Config.Platform.IBMCloud.VPCName) + if err != nil { + return err + } + } + } else { + // Get CISInstanceCRN from InstallConfig metadata + cisCRN, err = installConfig.IBMCloud.CISInstanceCRN(ctx) + if err != nil { + return err + } + } + + data, err = ibmcloudtfvars.TFVars( + ibmcloudtfvars.TFVarsSources{ + Auth: auth, + CISInstanceCRN: cisCRN, + DNSInstanceID: dnsID, + ImageURL: string(*rhcosImage), + MasterConfigs: masterConfigs, + MasterDedicatedHosts: masterDedicatedHosts, + NetworkResourceGroupName: installConfig.Config.Platform.IBMCloud.NetworkResourceGroupName, + PreexistingVPC: preexistingVPC, + PublishStrategy: installConfig.Config.Publish, + ResourceGroupName: installConfig.Config.Platform.IBMCloud.ResourceGroupName, + VPCPermitted: vpcPermitted, + WorkerConfigs: workerConfigs, + WorkerDedicatedHosts: workerDedicatedHosts, + }, + ) + if err != nil { + return errors.Wrapf(err, "failed to get %s Terraform variables", platform) + } + t.FileList = append(t.FileList, &asset.File{ + Filename: TfPlatformVarsFileName, + Data: data, + }) + case libvirt.Name: + masters, err := mastersAsset.Machines() + if err != nil { + return err + } + // convert options list to a list of mappings which can be consumed by terraform + var dnsmasqoptions []map[string]string + for _, option := range installConfig.Config.Platform.Libvirt.Network.DnsmasqOptions { + dnsmasqoptions = append(dnsmasqoptions, + map[string]string{ + "option_name": option.Name, + "option_value": option.Value}) + } + + data, err = libvirttfvars.TFVars( + libvirttfvars.TFVarsSources{ + MasterConfig: masters[0].Spec.ProviderSpec.Value.Object.(*libvirtprovider.LibvirtMachineProviderConfig), + OsImage: string(*rhcosImage), + MachineCIDR: &installConfig.Config.Networking.MachineNetwork[0].CIDR.IPNet, + Bridge: installConfig.Config.Platform.Libvirt.Network.IfName, + MasterCount: masterCount, + Architecture: installConfig.Config.ControlPlane.Architecture, + DnsmasqOptions: dnsmasqoptions, + }, + ) + if err != nil { + return errors.Wrapf(err, "failed to get %s Terraform variables", platform) + } + t.FileList = append(t.FileList, &asset.File{ + Filename: TfPlatformVarsFileName, + Data: data, + }) + case openstack.Name: + data, err = openstacktfvars.TFVars( + installConfig, + mastersAsset, + workersAsset, + string(*rhcosImage), + clusterID, + bootstrapIgn, + ) + if err != nil { + return errors.Wrapf(err, "failed to get %s Terraform variables", platform) + } + t.FileList = append(t.FileList, &asset.File{ + Filename: TfPlatformVarsFileName, + Data: data, + }) + case baremetal.Name: + var imageCacheIP string + if installConfig.Config.Platform.BareMetal.ProvisioningNetwork == baremetal.DisabledProvisioningNetwork { + imageCacheIP = installConfig.Config.Platform.BareMetal.APIVIPs[0] + } else { + imageCacheIP = installConfig.Config.Platform.BareMetal.BootstrapProvisioningIP + } + + data, err = baremetaltfvars.TFVars( + *installConfig.Config.ControlPlane.Replicas, + installConfig.Config.Platform.BareMetal.LibvirtURI, + installConfig.Config.Platform.BareMetal.APIVIPs, + imageCacheIP, + string(*rhcosBootstrapImage), + installConfig.Config.Platform.BareMetal.ExternalBridge, + installConfig.Config.Platform.BareMetal.ExternalMACAddress, + installConfig.Config.Platform.BareMetal.ProvisioningBridge, + installConfig.Config.Platform.BareMetal.ProvisioningMACAddress, + installConfig.Config.Platform.BareMetal.Hosts, + mastersAsset.HostFiles, + string(*rhcosImage), + ironicCreds.Username, + ironicCreds.Password, + masterIgn, + ) + if err != nil { + return errors.Wrapf(err, "failed to get %s Terraform variables", platform) + } + t.FileList = append(t.FileList, &asset.File{ + Filename: TfPlatformVarsFileName, + Data: data, + }) + case ovirt.Name: + config, err := ovirtconfig.NewConfig() + if err != nil { + return err + } + con, err := ovirtconfig.NewConnection() + if err != nil { + return err + } + defer con.Close() + + if installConfig.Config.Platform.Ovirt.VNICProfileID == "" { + profiles, err := ovirtconfig.FetchVNICProfileByClusterNetwork( + con, + installConfig.Config.Platform.Ovirt.ClusterID, + installConfig.Config.Platform.Ovirt.NetworkName) + if err != nil { + return errors.Wrapf(err, "failed to compute values for Engine platform") + } + if len(profiles) != 1 { + return fmt.Errorf("failed to compute values for Engine platform, "+ + "there are multiple vNIC profiles. found %v vNIC profiles for network %s", + len(profiles), installConfig.Config.Platform.Ovirt.NetworkName) + } + installConfig.Config.Platform.Ovirt.VNICProfileID = profiles[0].MustId() + } + + masters, err := mastersAsset.Machines() + if err != nil { + return err + } + + data, err := ovirttfvars.TFVars( + ovirttfvars.Auth{ + URL: config.URL, + Username: config.Username, + Password: config.Password, + Cafile: config.CAFile, + Cabundle: config.CABundle, + Insecure: config.Insecure, + }, + installConfig.Config.Platform.Ovirt.ClusterID, + installConfig.Config.Platform.Ovirt.StorageDomainID, + installConfig.Config.Platform.Ovirt.NetworkName, + installConfig.Config.Platform.Ovirt.VNICProfileID, + string(*rhcosImage), + clusterID.InfraID, + masters[0].Spec.ProviderSpec.Value.Object.(*ovirtprovider.OvirtMachineProviderSpec), + installConfig.Config.Platform.Ovirt.AffinityGroups, + ) + if err != nil { + return errors.Wrapf(err, "failed to get %s Terraform variables", platform) + } + t.FileList = append(t.FileList, &asset.File{ + Filename: TfPlatformVarsFileName, + Data: data, + }) + case powervs.Name: + APIKey, err := installConfig.PowerVS.APIKey(ctx) + if err != nil { + return err + } + + masters, err := mastersAsset.Machines() + if err != nil { + return err + } + + var ( + cisCRN, dnsCRN, vpcGatewayName, vpcSubnet string + vpcPermitted, vpcGatewayAttached bool + ) + if len(installConfig.Config.PowerVS.VPCSubnets) > 0 { + vpcSubnet = installConfig.Config.PowerVS.VPCSubnets[0] + } + switch installConfig.Config.Publish { + case types.InternalPublishingStrategy: + // Get DNSInstanceCRN from InstallConfig metadata + dnsCRN, err = installConfig.PowerVS.DNSInstanceCRN(ctx) + if err != nil { + return err + } + + // If the VPC already exists and the cluster is Private, check if the VPC is already a Permitted Network on DNS Instance + if installConfig.Config.PowerVS.VPCName != "" { + vpcPermitted, err = installConfig.PowerVS.IsVPCPermittedNetwork(ctx, installConfig.Config.Platform.PowerVS.VPCName, installConfig.Config.BaseDomain) + if err != nil { + return err + } + vpcGatewayName, vpcGatewayAttached, err = installConfig.PowerVS.GetExistingVPCGateway(ctx, installConfig.Config.Platform.PowerVS.VPCName, vpcSubnet) + if err != nil { + return err + } + } + case types.ExternalPublishingStrategy: + // Get CISInstanceCRN from InstallConfig metadata + cisCRN, err = installConfig.PowerVS.CISInstanceCRN(ctx) + if err != nil { + return err + } + default: + return errors.New("unknown publishing strategy") + } + + masterConfigs := make([]*machinev1.PowerVSMachineProviderConfig, len(masters)) + for i, m := range masters { + masterConfigs[i] = m.Spec.ProviderSpec.Value.Object.(*machinev1.PowerVSMachineProviderConfig) + } + + client, err := powervsconfig.NewClient() + if err != nil { + return err + } + var ( + vpcRegion, vpcZone string + ) + vpcName := installConfig.Config.PowerVS.VPCName + if vpcName != "" { + var vpc *vpcv1.VPC + vpc, err = client.GetVPCByName(ctx, vpcName) + if err != nil { + return err + } + var crnElems = strings.SplitN(*vpc.CRN, ":", 8) + vpcRegion = crnElems[5] + } else { + specified := installConfig.Config.PowerVS.VPCRegion + if specified != "" { + if powervs.ValidateVPCRegion(specified) { + vpcRegion = specified + } else { + return errors.New("unknown VPC region") + } + } else if vpcRegion, err = powervs.VPCRegionForPowerVSRegion(installConfig.Config.PowerVS.Region); err != nil { + return err + } + } + if vpcSubnet != "" { + var sn *vpcv1.Subnet + sn, err = client.GetSubnetByName(ctx, vpcSubnet, vpcRegion) + if err != nil { + return err + } + vpcZone = *sn.Zone.Name + } else { + rand.Seed(time.Now().UnixNano()) + vpcZone = fmt.Sprintf("%s-%d", vpcRegion, rand.Intn(2)+1) //nolint:gosec // we don't need a crypto secure number + } + +<<<<<<< HEAD:pkg/asset/cluster/tfvars.go + err = powervsconfig.ValidatePERAvailability(client, installConfig.Config) + transitGatewayEnabled := err == nil +======= + cpStanza := installConfig.Config.ControlPlane + if cpStanza == nil || cpStanza.Platform.PowerVS == nil || cpStanza.Platform.PowerVS.SysType == "" { + sysTypes, err := powervs.AvailableSysTypes(installConfig.Config.PowerVS.Region, installConfig.Config.PowerVS.Zone) + if err != nil { + return err + } + for i := range masters { + masterConfigs[i].SystemType = sysTypes[0] + } + } +>>>>>>> 07c96aff2b (Create region-zone-sysType hierarchy):pkg/asset/cluster/tfvars/tfvars.go + + serviceInstanceCRN, err := client.ServiceInstanceIDToCRN(ctx, installConfig.Config.PowerVS.ServiceInstanceID) + if err != nil { + return err + } + if serviceInstanceCRN == "" { + return fmt.Errorf("the service instance CRN is empty for the given ID") + } + + osImage := strings.SplitN(string(*rhcosImage), "/", 2) + data, err = powervstfvars.TFVars( + powervstfvars.TFVarsSources{ + MasterConfigs: masterConfigs, + Region: installConfig.Config.Platform.PowerVS.Region, + Zone: installConfig.Config.Platform.PowerVS.Zone, + APIKey: APIKey, + SSHKey: installConfig.Config.SSHKey, + PowerVSResourceGroup: installConfig.Config.PowerVS.PowerVSResourceGroup, + ImageBucketName: osImage[0], + ImageBucketFileName: osImage[1], + NetworkName: installConfig.Config.PowerVS.PVSNetworkName, + VPCRegion: vpcRegion, + VPCZone: vpcZone, + VPCName: vpcName, + VPCSubnetName: vpcSubnet, + VPCPermitted: vpcPermitted, + VPCGatewayName: vpcGatewayName, + VPCGatewayAttached: vpcGatewayAttached, + CloudConnectionName: installConfig.Config.PowerVS.CloudConnectionName, + CISInstanceCRN: cisCRN, + DNSInstanceCRN: dnsCRN, + PublishStrategy: installConfig.Config.Publish, + EnableSNAT: len(installConfig.Config.DeprecatedImageContentSources) == 0 && len(installConfig.Config.ImageDigestSources) == 0, + TransitGatewayEnabled: transitGatewayEnabled, + ServiceInstanceCRN: serviceInstanceCRN, + }, + ) + if err != nil { + return errors.Wrapf(err, "failed to get %s Terraform variables", platform) + } + t.FileList = append(t.FileList, &asset.File{ + Filename: TfPlatformVarsFileName, + Data: data, + }) + + case vsphere.Name: + networkFailureDomainMap := make(map[string]string) + ctx, cancel := context.WithTimeout(context.TODO(), 60*time.Second) + defer cancel() + + vim25Client, _, cleanup, err := vsphereconfig.CreateVSphereClients(context.TODO(), + installConfig.Config.VSphere.VCenters[0].Server, + installConfig.Config.VSphere.VCenters[0].Username, + installConfig.Config.VSphere.VCenters[0].Password) + if err != nil { + return errors.Wrapf(err, "unable to connect to vCenter %s. Ensure provided information is correct and client certs have been added to system trust", installConfig.Config.VSphere.VCenters[0].Server) + } + defer cleanup() + + finder := vsphereconfig.NewFinder(vim25Client) + + controlPlanes, err := mastersAsset.Machines() + if err != nil { + return err + } + controlPlaneConfigs := make([]*machinev1beta1.VSphereMachineProviderSpec, len(controlPlanes)) + for i, c := range controlPlanes { + var clusterMo mo.ClusterComputeResource + controlPlaneConfigs[i] = c.Spec.ProviderSpec.Value.Object.(*machinev1beta1.VSphereMachineProviderSpec) + + rpObj, err := finder.ResourcePool(ctx, controlPlaneConfigs[i].Workspace.ResourcePool) + if err != nil { + return err + } + + clusterRef, err := rpObj.Owner(ctx) + if err != nil { + return err + } + + // When using finder.ObjectReference the InventoryPath is defined + // NewClusterComputeResource I don't believe assigns that value. + clusterObjRef, err := finder.ObjectReference(ctx, clusterRef.Reference()) + if err != nil { + return err + } + + clusterObj, ok := clusterObjRef.(*object.ClusterComputeResource) + if !ok { + return errors.New("unable to convert cluster object reference to object cluster compute resource") + } + err = clusterObj.Properties(ctx, clusterRef.Reference(), []string{"name", "summary"}, &clusterMo) + if err != nil { + return err + } + + networkPath := path.Join(clusterObj.InventoryPath, controlPlaneConfigs[i].Network.Devices[0].NetworkName) + netObj, err := finder.Network(ctx, networkPath) + if err != nil { + return err + } + + controlPlaneConfigs[i].Network.Devices[0].NetworkName = netObj.Reference().Value + } + + for _, fd := range installConfig.Config.VSphere.FailureDomains { + // Must use the Managed Object ID for a port group (e.g. dvportgroup-5258) + // instead of the name since port group names aren't always unique in vSphere. + // https://bugzilla.redhat.com/show_bug.cgi?id=1918005 + + networkPath := path.Join(fd.Topology.ComputeCluster, fd.Topology.Networks[0]) + netObj, err := finder.Network(ctx, networkPath) + if err != nil { + return errors.Wrap(err, "failed to get vSphere network ID") + } + + networkFailureDomainMap[fd.Name] = netObj.Reference().Value + } + + data, err = vspheretfvars.TFVars( + vspheretfvars.TFVarsSources{ + ControlPlaneConfigs: controlPlaneConfigs, + ImageURL: string(*rhcosImage), + DiskType: installConfig.Config.Platform.VSphere.DiskType, + NetworksInFailureDomain: networkFailureDomainMap, + InfraID: clusterID.InfraID, + InstallConfig: installConfig, + ControlPlaneMachines: controlPlanes, + }, + ) + if err != nil { + return errors.Wrapf(err, "failed to get %s Terraform variables", platform) + } + t.FileList = append(t.FileList, &asset.File{ + Filename: TfPlatformVarsFileName, + Data: data, + }) + + case alibabacloud.Name: + client, err := installConfig.AlibabaCloud.Client() + if err != nil { + return errors.Wrapf(err, "failed to create new client use region %s", installConfig.Config.Platform.AlibabaCloud.Region) + } + bucket := fmt.Sprintf("%s-bootstrap", clusterID.InfraID) + object := "bootstrap.ign" + signURL, err := client.GetOSSObjectSignURL(bucket, object) + if err != nil { + return errors.Wrapf(err, "failed to get a presigned URL for OSS object %s", object) + } + + auth := alibabacloudtfvars.Auth{ + AccessKey: client.AccessKeyID, + SecretKey: client.AccessKeySecret, + } + + masters, err := mastersAsset.Machines() + if err != nil { + return errors.Wrapf(err, "failed to get master machine info") + } + masterConfigs := make([]*machinev1.AlibabaCloudMachineProviderConfig, len(masters)) + for i, m := range masters { + masterConfigs[i] = m.Spec.ProviderSpec.Value.Object.(*machinev1.AlibabaCloudMachineProviderConfig) + } + workers, err := workersAsset.MachineSets() + if err != nil { + return errors.Wrapf(err, "failed to get worker machine info") + } + workerConfigs := make([]*machinev1.AlibabaCloudMachineProviderConfig, len(workers)) + for i, w := range workers { + workerConfigs[i] = w.Spec.Template.Spec.ProviderSpec.Value.Object.(*machinev1.AlibabaCloudMachineProviderConfig) + } + + natGatewayZones, err := client.ListEnhanhcedNatGatewayAvailableZones() + if err != nil { + return errors.Wrapf(err, "failed to list available zones for NAT gateway") + } + natGatewayZoneID := natGatewayZones.Zones[0].ZoneId + + vswitchIDs := []string{} + if len(installConfig.Config.AlibabaCloud.VSwitchIDs) > 0 { + vswitchIDs = installConfig.Config.AlibabaCloud.VSwitchIDs + } + data, err := alibabacloudtfvars.TFVars( + alibabacloudtfvars.TFVarsSources{ + Auth: auth, + VpcID: installConfig.Config.AlibabaCloud.VpcID, + VSwitchIDs: vswitchIDs, + PrivateZoneID: installConfig.Config.AlibabaCloud.PrivateZoneID, + ResourceGroupID: installConfig.Config.AlibabaCloud.ResourceGroupID, + BaseDomain: installConfig.Config.BaseDomain, + NatGatewayZoneID: natGatewayZoneID, + MasterConfigs: masterConfigs, + WorkerConfigs: workerConfigs, + IgnitionBucket: bucket, + IgnitionPresignedURL: signURL, + AdditionalTrustBundle: installConfig.Config.AdditionalTrustBundle, + Architecture: installConfig.Config.ControlPlane.Architecture, + Publish: installConfig.Config.Publish, + Proxy: installConfig.Config.Proxy, + }, + ) + if err != nil { + return errors.Wrapf(err, "failed to get %s Terraform variables", platform) + } + t.FileList = append(t.FileList, &asset.File{ + Filename: TfPlatformVarsFileName, + Data: data, + }) + case nutanix.Name: + if rhcosImage == nil { + return errors.New("unable to retrieve rhcos image") + } + controlPlanes, err := mastersAsset.Machines() + if err != nil { + return errors.Wrapf(err, "error getting control plane machines") + } + controlPlaneConfigs := make([]*machinev1.NutanixMachineProviderConfig, len(controlPlanes)) + for i, c := range controlPlanes { + controlPlaneConfigs[i] = c.Spec.ProviderSpec.Value.Object.(*machinev1.NutanixMachineProviderConfig) + } + + imgURI := string(*rhcosImage) + if installConfig.Config.Nutanix.ClusterOSImage != "" { + imgURI = installConfig.Config.Nutanix.ClusterOSImage + } + data, err = nutanixtfvars.TFVars( + nutanixtfvars.TFVarsSources{ + PrismCentralAddress: installConfig.Config.Nutanix.PrismCentral.Endpoint.Address, + Port: strconv.Itoa(int(installConfig.Config.Nutanix.PrismCentral.Endpoint.Port)), + Username: installConfig.Config.Nutanix.PrismCentral.Username, + Password: installConfig.Config.Nutanix.PrismCentral.Password, + ImageURI: imgURI, + BootstrapIgnitionData: bootstrapIgn, + ClusterID: clusterID.InfraID, + ControlPlaneConfigs: controlPlaneConfigs, + }, + ) + if err != nil { + return errors.Wrapf(err, "failed to get %s Terraform variables", platform) + } + t.FileList = append(t.FileList, &asset.File{ + Filename: TfPlatformVarsFileName, + Data: data, + }) + default: + logrus.Warnf("unrecognized platform %s", platform) + } + + return nil +} + +// Files returns the files generated by the asset. +func (t *TerraformVariables) Files() []*asset.File { + return t.FileList +} + +// Load reads the terraform.tfvars from disk. +func (t *TerraformVariables) Load(f asset.FileFetcher) (found bool, err error) { + file, err := f.FetchByName(TfVarsFileName) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + t.FileList = []*asset.File{file} + + switch file, err := f.FetchByName(TfPlatformVarsFileName); { + case err == nil: + t.FileList = append(t.FileList, file) + case !os.IsNotExist(err): + return false, err + } + + return true, nil +} + +// injectInstallInfo adds information about the installer and its invoker as a +// ConfigMap to the provided bootstrap Ignition config. +func injectInstallInfo(bootstrap []byte) (string, error) { + config := &igntypes.Config{} + if err := json.Unmarshal(bootstrap, &config); err != nil { + return "", errors.Wrap(err, "failed to unmarshal bootstrap Ignition config") + } + + cm, err := openshiftinstall.CreateInstallConfigMap("openshift-install") + if err != nil { + return "", errors.Wrap(err, "failed to generate openshift-install config") + } + + config.Storage.Files = append(config.Storage.Files, ignition.FileFromString("/opt/openshift/manifests/openshift-install.yaml", "root", 0644, cm)) + + ign, err := ignition.Marshal(config) + if err != nil { + return "", errors.Wrap(err, "failed to marshal bootstrap Ignition config") + } + + return string(ign), nil +} diff --git a/.history/pkg/asset/cluster/tfvars_20241219121700.go b/.history/pkg/asset/cluster/tfvars_20241219121700.go new file mode 100644 index 00000000000..c2cc6b4f819 --- /dev/null +++ b/.history/pkg/asset/cluster/tfvars_20241219121700.go @@ -0,0 +1,1186 @@ +package cluster + +import ( + "context" + "encoding/json" + "fmt" + "math/rand" + "os" + "path" + "strconv" + "strings" + "time" + + "github.com/IBM/vpc-go-sdk/vpcv1" + igntypes "github.com/coreos/ignition/v2/config/v3_2/types" + coreosarch "github.com/coreos/stream-metadata-go/arch" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/mo" + "sigs.k8s.io/yaml" + + configv1 "github.com/openshift/api/config/v1" + machinev1 "github.com/openshift/api/machine/v1" + machinev1beta1 "github.com/openshift/api/machine/v1beta1" + libvirtprovider "github.com/openshift/cluster-api-provider-libvirt/pkg/apis/libvirtproviderconfig/v1beta1" + ovirtprovider "github.com/openshift/cluster-api-provider-ovirt/pkg/apis/ovirtprovider/v1beta1" + "github.com/openshift/installer/pkg/asset" + "github.com/openshift/installer/pkg/asset/ignition" + "github.com/openshift/installer/pkg/asset/ignition/bootstrap" + baremetalbootstrap "github.com/openshift/installer/pkg/asset/ignition/bootstrap/baremetal" + "github.com/openshift/installer/pkg/asset/ignition/machine" + "github.com/openshift/installer/pkg/asset/installconfig" + awsconfig "github.com/openshift/installer/pkg/asset/installconfig/aws" + aztypes "github.com/openshift/installer/pkg/asset/installconfig/azure" + gcpconfig "github.com/openshift/installer/pkg/asset/installconfig/gcp" + ovirtconfig "github.com/openshift/installer/pkg/asset/installconfig/ovirt" + powervsconfig "github.com/openshift/installer/pkg/asset/installconfig/powervs" + vsphereconfig "github.com/openshift/installer/pkg/asset/installconfig/vsphere" + "github.com/openshift/installer/pkg/asset/machines" + "github.com/openshift/installer/pkg/asset/manifests" + "github.com/openshift/installer/pkg/asset/openshiftinstall" + "github.com/openshift/installer/pkg/asset/rhcos" + rhcospkg "github.com/openshift/installer/pkg/rhcos" + "github.com/openshift/installer/pkg/tfvars" + alibabacloudtfvars "github.com/openshift/installer/pkg/tfvars/alibabacloud" + awstfvars "github.com/openshift/installer/pkg/tfvars/aws" + azuretfvars "github.com/openshift/installer/pkg/tfvars/azure" + baremetaltfvars "github.com/openshift/installer/pkg/tfvars/baremetal" + gcptfvars "github.com/openshift/installer/pkg/tfvars/gcp" + ibmcloudtfvars "github.com/openshift/installer/pkg/tfvars/ibmcloud" + libvirttfvars "github.com/openshift/installer/pkg/tfvars/libvirt" + nutanixtfvars "github.com/openshift/installer/pkg/tfvars/nutanix" + openstacktfvars "github.com/openshift/installer/pkg/tfvars/openstack" + ovirttfvars "github.com/openshift/installer/pkg/tfvars/ovirt" + powervstfvars "github.com/openshift/installer/pkg/tfvars/powervs" + vspheretfvars "github.com/openshift/installer/pkg/tfvars/vsphere" + "github.com/openshift/installer/pkg/types" + "github.com/openshift/installer/pkg/types/alibabacloud" + "github.com/openshift/installer/pkg/types/aws" + "github.com/openshift/installer/pkg/types/azure" + "github.com/openshift/installer/pkg/types/baremetal" + "github.com/openshift/installer/pkg/types/external" + "github.com/openshift/installer/pkg/types/gcp" + "github.com/openshift/installer/pkg/types/ibmcloud" + "github.com/openshift/installer/pkg/types/libvirt" + "github.com/openshift/installer/pkg/types/none" + "github.com/openshift/installer/pkg/types/nutanix" + "github.com/openshift/installer/pkg/types/openstack" + "github.com/openshift/installer/pkg/types/ovirt" + "github.com/openshift/installer/pkg/types/powervs" + "github.com/openshift/installer/pkg/types/vsphere" + ibmcloudprovider "github.com/openshift/machine-api-provider-ibmcloud/pkg/apis/ibmcloudprovider/v1" +) + +const ( + // GCPFirewallPermission is the role/permission to create or skip the creation of + // firewall rules for GCP during an xpn installation. + GCPFirewallPermission = "compute.firewalls.create" + + // TfVarsFileName is the filename for Terraform variables. + TfVarsFileName = "terraform.tfvars.json" + + // TfPlatformVarsFileName is the name for platform-specific + // Terraform variable files. + // + // https://www.terraform.io/docs/configuration/variables.html#variable-files + TfPlatformVarsFileName = "terraform.platform.auto.tfvars.json" + + tfvarsAssetName = "Terraform Variables" +) + +// TerraformVariables depends on InstallConfig, Manifests, +// and Ignition to generate the terrafor.tfvars. +type TerraformVariables struct { + FileList []*asset.File +} + +var _ asset.WritableAsset = (*TerraformVariables)(nil) + +// Name returns the human-friendly name of the asset. +func (t *TerraformVariables) Name() string { + return tfvarsAssetName +} + +// Dependencies returns the dependency of the TerraformVariable +func (t *TerraformVariables) Dependencies() []asset.Asset { + return []asset.Asset{ + &installconfig.ClusterID{}, + &installconfig.InstallConfig{}, + new(rhcos.Image), + new(rhcos.Release), + new(rhcos.BootstrapImage), + &bootstrap.Bootstrap{}, + &machine.Master{}, + &machines.Master{}, + &machines.Worker{}, + &baremetalbootstrap.IronicCreds{}, + &installconfig.PlatformProvisionCheck{}, + &manifests.Manifests{}, + } +} + +// Generate generates the terraform.tfvars file. +func (t *TerraformVariables) Generate(parents asset.Parents) error { + ctx := context.TODO() + clusterID := &installconfig.ClusterID{} + installConfig := &installconfig.InstallConfig{} + bootstrapIgnAsset := &bootstrap.Bootstrap{} + masterIgnAsset := &machine.Master{} + mastersAsset := &machines.Master{} + workersAsset := &machines.Worker{} + manifestsAsset := &manifests.Manifests{} + rhcosImage := new(rhcos.Image) + rhcosRelease := new(rhcos.Release) + rhcosBootstrapImage := new(rhcos.BootstrapImage) + ironicCreds := &baremetalbootstrap.IronicCreds{} + parents.Get(clusterID, installConfig, bootstrapIgnAsset, masterIgnAsset, mastersAsset, workersAsset, manifestsAsset, rhcosImage, rhcosRelease, rhcosBootstrapImage, ironicCreds) + + platform := installConfig.Config.Platform.Name() + switch platform { + case external.Name, none.Name: + return errors.Errorf("cannot create the cluster because %q is a UPI platform", platform) + } + + masterIgn := string(masterIgnAsset.Files()[0].Data) + bootstrapIgn, err := injectInstallInfo(bootstrapIgnAsset.Files()[0].Data) + if err != nil { + return errors.Wrap(err, "unable to inject installation info") + } + + var useIPv4, useIPv6 bool + for _, network := range installConfig.Config.Networking.ServiceNetwork { + if network.IP.To4() != nil { + useIPv4 = true + } else { + useIPv6 = true + } + } + + machineV4CIDRs, machineV6CIDRs := []string{}, []string{} + for _, network := range installConfig.Config.Networking.MachineNetwork { + if network.CIDR.IPNet.IP.To4() != nil { + machineV4CIDRs = append(machineV4CIDRs, network.CIDR.IPNet.String()) + } else { + machineV6CIDRs = append(machineV6CIDRs, network.CIDR.IPNet.String()) + } + } + + masterCount := len(mastersAsset.MachineFiles) + mastersSchedulable := false + for _, f := range manifestsAsset.Files() { + if f.Filename == manifests.SchedulerCfgFilename { + schedulerConfig := configv1.Scheduler{} + err = yaml.Unmarshal(f.Data, &schedulerConfig) + if err != nil { + return errors.Wrapf(err, "failed to unmarshall %s", manifests.SchedulerCfgFilename) + } + mastersSchedulable = schedulerConfig.Spec.MastersSchedulable + break + } + } + + data, err := tfvars.TFVars( + clusterID.InfraID, + installConfig.Config.ClusterDomain(), + installConfig.Config.BaseDomain, + machineV4CIDRs, + machineV6CIDRs, + useIPv4, + useIPv6, + bootstrapIgn, + masterIgn, + masterCount, + mastersSchedulable, + ) + if err != nil { + return errors.Wrap(err, "failed to get Terraform variables") + } + t.FileList = []*asset.File{ + { + Filename: TfVarsFileName, + Data: data, + }, + } + + if masterCount == 0 { + return errors.Errorf("master slice cannot be empty") + } + + switch platform { + case aws.Name: + var vpc string + var privateSubnets []string + var publicSubnets []string + + if len(installConfig.Config.Platform.AWS.Subnets) > 0 { + subnets, err := installConfig.AWS.PrivateSubnets(ctx) + if err != nil { + return err + } + + for id := range subnets { + privateSubnets = append(privateSubnets, id) + } + + subnets, err = installConfig.AWS.PublicSubnets(ctx) + if err != nil { + return err + } + + for id := range subnets { + publicSubnets = append(publicSubnets, id) + } + + vpc, err = installConfig.AWS.VPC(ctx) + if err != nil { + return err + } + } + + sess, err := installConfig.AWS.Session(ctx) + if err != nil { + return err + } + object := "bootstrap.ign" + bucket := fmt.Sprintf("%s-bootstrap", clusterID.InfraID) + url, err := awsconfig.PresignedS3URL(sess, installConfig.Config.Platform.AWS.Region, bucket, object) + if err != nil { + return err + } + masters, err := mastersAsset.Machines() + if err != nil { + return err + } + masterConfigs := make([]*machinev1beta1.AWSMachineProviderConfig, len(masters)) + for i, m := range masters { + masterConfigs[i] = m.Spec.ProviderSpec.Value.Object.(*machinev1beta1.AWSMachineProviderConfig) + } + workers, err := workersAsset.MachineSets() + if err != nil { + return err + } + workerConfigs := make([]*machinev1beta1.AWSMachineProviderConfig, len(workers)) + for i, m := range workers { + workerConfigs[i] = m.Spec.Template.Spec.ProviderSpec.Value.Object.(*machinev1beta1.AWSMachineProviderConfig) + } + osImage := strings.SplitN(string(*rhcosImage), ",", 2) + osImageID := osImage[0] + osImageRegion := installConfig.Config.AWS.Region + if len(osImage) == 2 { + osImageRegion = osImage[1] + } + + workerIAMRoleName := "" + if mp := installConfig.Config.WorkerMachinePool(); mp != nil { + awsMP := &aws.MachinePool{} + awsMP.Set(installConfig.Config.AWS.DefaultMachinePlatform) + awsMP.Set(mp.Platform.AWS) + workerIAMRoleName = awsMP.IAMRole + } + + var securityGroups []string + if mp := installConfig.Config.AWS.DefaultMachinePlatform; mp != nil { + securityGroups = mp.AdditionalSecurityGroupIDs + } + masterIAMRoleName := "" + if mp := installConfig.Config.ControlPlane; mp != nil { + awsMP := &aws.MachinePool{} + awsMP.Set(installConfig.Config.AWS.DefaultMachinePlatform) + awsMP.Set(mp.Platform.AWS) + masterIAMRoleName = awsMP.IAMRole + if len(awsMP.AdditionalSecurityGroupIDs) > 0 { + securityGroups = awsMP.AdditionalSecurityGroupIDs + } + } + + // AWS Zones is used to determine which route table the edge zone will be associated. + allZones, err := installConfig.AWS.AllZones(ctx) + if err != nil { + return err + } + + data, err := awstfvars.TFVars(awstfvars.TFVarsSources{ + VPC: vpc, + PrivateSubnets: privateSubnets, + PublicSubnets: publicSubnets, + AvailabilityZones: allZones, + InternalZone: installConfig.Config.AWS.HostedZone, + InternalZoneRole: installConfig.Config.AWS.HostedZoneRole, + Services: installConfig.Config.AWS.ServiceEndpoints, + Publish: installConfig.Config.Publish, + MasterConfigs: masterConfigs, + WorkerConfigs: workerConfigs, + AMIID: osImageID, + AMIRegion: osImageRegion, + IgnitionBucket: bucket, + IgnitionPresignedURL: url, + AdditionalTrustBundle: installConfig.Config.AdditionalTrustBundle, + MasterIAMRoleName: masterIAMRoleName, + WorkerIAMRoleName: workerIAMRoleName, + Architecture: installConfig.Config.ControlPlane.Architecture, + Proxy: installConfig.Config.Proxy, + PreserveBootstrapIgnition: installConfig.Config.AWS.PreserveBootstrapIgnition, + MasterSecurityGroups: securityGroups, + }) + if err != nil { + return errors.Wrapf(err, "failed to get %s Terraform variables", platform) + } + t.FileList = append(t.FileList, &asset.File{ + Filename: TfPlatformVarsFileName, + Data: data, + }) + case azure.Name: + session, err := installConfig.Azure.Session() + if err != nil { + return err + } + + auth := azuretfvars.Auth{ + SubscriptionID: session.Credentials.SubscriptionID, + ClientID: session.Credentials.ClientID, + ClientSecret: session.Credentials.ClientSecret, + TenantID: session.Credentials.TenantID, + ClientCertificatePath: session.Credentials.ClientCertificatePath, + ClientCertificatePassword: session.Credentials.ClientCertificatePassword, + UseMSI: session.AuthType == aztypes.ManagedIdentityAuth, + } + masters, err := mastersAsset.Machines() + if err != nil { + return err + } + masterConfigs := make([]*machinev1beta1.AzureMachineProviderSpec, len(masters)) + for i, m := range masters { + masterConfigs[i] = m.Spec.ProviderSpec.Value.Object.(*machinev1beta1.AzureMachineProviderSpec) + } + workers, err := workersAsset.MachineSets() + if err != nil { + return err + } + workerConfigs := make([]*machinev1beta1.AzureMachineProviderSpec, len(workers)) + for i, w := range workers { + workerConfigs[i] = w.Spec.Template.Spec.ProviderSpec.Value.Object.(*machinev1beta1.AzureMachineProviderSpec) + } + client := aztypes.NewClient(session) + hyperVGeneration, err := client.GetHyperVGenerationVersion(context.TODO(), masterConfigs[0].VMSize, masterConfigs[0].Location, "") + if err != nil { + return err + } + + preexistingnetwork := installConfig.Config.Azure.VirtualNetwork != "" + + var bootstrapIgnStub, bootstrapIgnURLPlaceholder string + if installConfig.Azure.CloudName == azure.StackCloud { + // Due to the SAS created in Terraform to limit access to bootstrap ignition, we cannot know the URL in advance. + // Instead, we will pass a placeholder string in the ignition to be replaced in TF once the value is known. + bootstrapIgnURLPlaceholder = "BOOTSTRAP_IGNITION_URL_PLACEHOLDER" + shim, err := bootstrap.GenerateIgnitionShimWithCertBundleAndProxy(bootstrapIgnURLPlaceholder, installConfig.Config.AdditionalTrustBundle, installConfig.Config.Proxy) + if err != nil { + return errors.Wrap(err, "failed to create stub Ignition config for bootstrap") + } + bootstrapIgnStub = string(shim) + } + + data, err := azuretfvars.TFVars( + azuretfvars.TFVarsSources{ + Auth: auth, + CloudName: installConfig.Config.Azure.CloudName, + ARMEndpoint: installConfig.Config.Azure.ARMEndpoint, + ResourceGroupName: installConfig.Config.Azure.ResourceGroupName, + BaseDomainResourceGroupName: installConfig.Config.Azure.BaseDomainResourceGroupName, + MasterConfigs: masterConfigs, + WorkerConfigs: workerConfigs, + ImageURL: string(*rhcosImage), + ImageRelease: rhcosRelease.GetAzureReleaseVersion(), + PreexistingNetwork: preexistingnetwork, + Publish: installConfig.Config.Publish, + OutboundType: installConfig.Config.Azure.OutboundType, + BootstrapIgnStub: bootstrapIgnStub, + BootstrapIgnitionURLPlaceholder: bootstrapIgnURLPlaceholder, + HyperVGeneration: hyperVGeneration, + VMArchitecture: installConfig.Config.ControlPlane.Architecture, + InfrastructureName: clusterID.InfraID, + }, + ) + if err != nil { + return errors.Wrapf(err, "failed to get %s Terraform variables", platform) + } + t.FileList = append(t.FileList, &asset.File{ + Filename: TfPlatformVarsFileName, + Data: data, + }) + case gcp.Name: + sess, err := gcpconfig.GetSession(ctx) + if err != nil { + return err + } + + auth := gcptfvars.Auth{ + ProjectID: installConfig.Config.GCP.ProjectID, + NetworkProjectID: installConfig.Config.GCP.NetworkProjectID, + ServiceAccount: string(sess.Credentials.JSON), + } + + client, err := gcpconfig.NewClient(context.Background()) + if err != nil { + return err + } + + // In the case of a shared vpn, the firewall rules should only be created if the user has permissions to do so + createFirewallRules := true + if installConfig.Config.GCP.NetworkProjectID != "" { + permissions, err := client.GetProjectPermissions(context.Background(), installConfig.Config.GCP.NetworkProjectID, []string{ + GCPFirewallPermission, + }) + if err != nil { + return err + } + createFirewallRules = permissions.Has(GCPFirewallPermission) + } + + masters, err := mastersAsset.Machines() + if err != nil { + return err + } + masterConfigs := make([]*machinev1beta1.GCPMachineProviderSpec, len(masters)) + for i, m := range masters { + masterConfigs[i] = m.Spec.ProviderSpec.Value.Object.(*machinev1beta1.GCPMachineProviderSpec) + } + workers, err := workersAsset.MachineSets() + if err != nil { + return err + } + workerConfigs := make([]*machinev1beta1.GCPMachineProviderSpec, len(workers)) + for i, w := range workers { + workerConfigs[i] = w.Spec.Template.Spec.ProviderSpec.Value.Object.(*machinev1beta1.GCPMachineProviderSpec) + } + preexistingnetwork := installConfig.Config.GCP.Network != "" + + // Search the project for a dns zone with the specified base domain. + publicZoneName := "" + if installConfig.Config.Publish == types.ExternalPublishingStrategy { + publicZone, err := client.GetDNSZone(ctx, installConfig.Config.GCP.ProjectID, installConfig.Config.BaseDomain, true) + if err != nil { + return errors.Wrapf(err, "failed to get GCP public zone") + } + publicZoneName = publicZone.Name + } + + privateZoneName := "" + if installConfig.Config.GCP.NetworkProjectID != "" { + privateZone, err := client.GetDNSZone(ctx, installConfig.Config.GCP.ProjectID, installConfig.Config.ClusterDomain(), false) + if err != nil { + return errors.Wrapf(err, "failed to get GCP private zone") + } + if privateZone != nil { + privateZoneName = privateZone.Name + } + } + + archName := coreosarch.RpmArch(string(installConfig.Config.ControlPlane.Architecture)) + st, err := rhcospkg.FetchCoreOSBuild(ctx) + if err != nil { + return err + } + streamArch, err := st.GetArchitecture(archName) + if err != nil { + return err + } + + img := streamArch.Images.Gcp + if img == nil { + return fmt.Errorf("%s: No GCP build found", st.FormatPrefix(archName)) + } + + data, err := gcptfvars.TFVars( + gcptfvars.TFVarsSources{ + Auth: auth, + MasterConfigs: masterConfigs, + WorkerConfigs: workerConfigs, + CreateFirewallRules: createFirewallRules, + PreexistingNetwork: preexistingnetwork, + PublicZoneName: publicZoneName, + PrivateZoneName: privateZoneName, + PublishStrategy: installConfig.Config.Publish, + InfrastructureName: clusterID.InfraID, + }, + ) + if err != nil { + return errors.Wrapf(err, "failed to get %s Terraform variables", platform) + } + t.FileList = append(t.FileList, &asset.File{ + Filename: TfPlatformVarsFileName, + Data: data, + }) + case ibmcloud.Name: + client, err := installConfig.IBMCloud.Client() + if err != nil { + return err + } + auth := ibmcloudtfvars.Auth{ + APIKey: client.GetAPIKey(), + } + + // Get master and worker machine info + masters, err := mastersAsset.Machines() + if err != nil { + return err + } + masterConfigs := make([]*ibmcloudprovider.IBMCloudMachineProviderSpec, len(masters)) + for i, m := range masters { + masterConfigs[i] = m.Spec.ProviderSpec.Value.Object.(*ibmcloudprovider.IBMCloudMachineProviderSpec) + } + workers, err := workersAsset.MachineSets() + if err != nil { + return err + } + workerConfigs := make([]*ibmcloudprovider.IBMCloudMachineProviderSpec, len(workers)) + for i, w := range workers { + workerConfigs[i] = w.Spec.Template.Spec.ProviderSpec.Value.Object.(*ibmcloudprovider.IBMCloudMachineProviderSpec) + } + + // Set existing network (boolean of whether one is being used) + preexistingVPC := installConfig.Config.Platform.IBMCloud.GetVPCName() != "" + + // Set machine pool info + var masterMachinePool ibmcloud.MachinePool + var workerMachinePool ibmcloud.MachinePool + if installConfig.Config.Platform.IBMCloud.DefaultMachinePlatform != nil { + masterMachinePool.Set(installConfig.Config.Platform.IBMCloud.DefaultMachinePlatform) + workerMachinePool.Set(installConfig.Config.Platform.IBMCloud.DefaultMachinePlatform) + } + if installConfig.Config.ControlPlane.Platform.IBMCloud != nil { + masterMachinePool.Set(installConfig.Config.ControlPlane.Platform.IBMCloud) + } + if worker := installConfig.Config.WorkerMachinePool(); worker != nil { + workerMachinePool.Set(worker.Platform.IBMCloud) + } + + // Get master dedicated host info + var masterDedicatedHosts []ibmcloudtfvars.DedicatedHost + for _, dhost := range masterMachinePool.DedicatedHosts { + if dhost.Name != "" { + dh, err := client.GetDedicatedHostByName(ctx, dhost.Name, installConfig.Config.Platform.IBMCloud.Region) + if err != nil { + return err + } + masterDedicatedHosts = append(masterDedicatedHosts, ibmcloudtfvars.DedicatedHost{ + ID: *dh.ID, + }) + } else { + masterDedicatedHosts = append(masterDedicatedHosts, ibmcloudtfvars.DedicatedHost{ + Profile: dhost.Profile, + }) + } + } + + // Get worker dedicated host info + var workerDedicatedHosts []ibmcloudtfvars.DedicatedHost + for _, dhost := range workerMachinePool.DedicatedHosts { + if dhost.Name != "" { + dh, err := client.GetDedicatedHostByName(ctx, dhost.Name, installConfig.Config.Platform.IBMCloud.Region) + if err != nil { + return err + } + workerDedicatedHosts = append(workerDedicatedHosts, ibmcloudtfvars.DedicatedHost{ + ID: *dh.ID, + }) + } else { + workerDedicatedHosts = append(workerDedicatedHosts, ibmcloudtfvars.DedicatedHost{ + Profile: dhost.Profile, + }) + } + } + + var cisCRN, dnsID string + vpcPermitted := false + + if installConfig.Config.Publish == types.InternalPublishingStrategy { + // Get DNSInstanceCRN from InstallConfig metadata + dnsInstance, err := installConfig.IBMCloud.DNSInstance(ctx) + if err != nil { + return err + } + if dnsInstance != nil { + dnsID = dnsInstance.ID + } + // If the VPC already exists and the cluster is Private, check if the VPC is already a Permitted Network on DNS Instance + if preexistingVPC { + vpcPermitted, err = installConfig.IBMCloud.IsVPCPermittedNetwork(ctx, installConfig.Config.Platform.IBMCloud.VPCName) + if err != nil { + return err + } + } + } else { + // Get CISInstanceCRN from InstallConfig metadata + cisCRN, err = installConfig.IBMCloud.CISInstanceCRN(ctx) + if err != nil { + return err + } + } + + data, err = ibmcloudtfvars.TFVars( + ibmcloudtfvars.TFVarsSources{ + Auth: auth, + CISInstanceCRN: cisCRN, + DNSInstanceID: dnsID, + ImageURL: string(*rhcosImage), + MasterConfigs: masterConfigs, + MasterDedicatedHosts: masterDedicatedHosts, + NetworkResourceGroupName: installConfig.Config.Platform.IBMCloud.NetworkResourceGroupName, + PreexistingVPC: preexistingVPC, + PublishStrategy: installConfig.Config.Publish, + ResourceGroupName: installConfig.Config.Platform.IBMCloud.ResourceGroupName, + VPCPermitted: vpcPermitted, + WorkerConfigs: workerConfigs, + WorkerDedicatedHosts: workerDedicatedHosts, + }, + ) + if err != nil { + return errors.Wrapf(err, "failed to get %s Terraform variables", platform) + } + t.FileList = append(t.FileList, &asset.File{ + Filename: TfPlatformVarsFileName, + Data: data, + }) + case libvirt.Name: + masters, err := mastersAsset.Machines() + if err != nil { + return err + } + // convert options list to a list of mappings which can be consumed by terraform + var dnsmasqoptions []map[string]string + for _, option := range installConfig.Config.Platform.Libvirt.Network.DnsmasqOptions { + dnsmasqoptions = append(dnsmasqoptions, + map[string]string{ + "option_name": option.Name, + "option_value": option.Value}) + } + + data, err = libvirttfvars.TFVars( + libvirttfvars.TFVarsSources{ + MasterConfig: masters[0].Spec.ProviderSpec.Value.Object.(*libvirtprovider.LibvirtMachineProviderConfig), + OsImage: string(*rhcosImage), + MachineCIDR: &installConfig.Config.Networking.MachineNetwork[0].CIDR.IPNet, + Bridge: installConfig.Config.Platform.Libvirt.Network.IfName, + MasterCount: masterCount, + Architecture: installConfig.Config.ControlPlane.Architecture, + DnsmasqOptions: dnsmasqoptions, + }, + ) + if err != nil { + return errors.Wrapf(err, "failed to get %s Terraform variables", platform) + } + t.FileList = append(t.FileList, &asset.File{ + Filename: TfPlatformVarsFileName, + Data: data, + }) + case openstack.Name: + data, err = openstacktfvars.TFVars( + installConfig, + mastersAsset, + workersAsset, + string(*rhcosImage), + clusterID, + bootstrapIgn, + ) + if err != nil { + return errors.Wrapf(err, "failed to get %s Terraform variables", platform) + } + t.FileList = append(t.FileList, &asset.File{ + Filename: TfPlatformVarsFileName, + Data: data, + }) + case baremetal.Name: + var imageCacheIP string + if installConfig.Config.Platform.BareMetal.ProvisioningNetwork == baremetal.DisabledProvisioningNetwork { + imageCacheIP = installConfig.Config.Platform.BareMetal.APIVIPs[0] + } else { + imageCacheIP = installConfig.Config.Platform.BareMetal.BootstrapProvisioningIP + } + + data, err = baremetaltfvars.TFVars( + *installConfig.Config.ControlPlane.Replicas, + installConfig.Config.Platform.BareMetal.LibvirtURI, + installConfig.Config.Platform.BareMetal.APIVIPs, + imageCacheIP, + string(*rhcosBootstrapImage), + installConfig.Config.Platform.BareMetal.ExternalBridge, + installConfig.Config.Platform.BareMetal.ExternalMACAddress, + installConfig.Config.Platform.BareMetal.ProvisioningBridge, + installConfig.Config.Platform.BareMetal.ProvisioningMACAddress, + installConfig.Config.Platform.BareMetal.Hosts, + mastersAsset.HostFiles, + string(*rhcosImage), + ironicCreds.Username, + ironicCreds.Password, + masterIgn, + ) + if err != nil { + return errors.Wrapf(err, "failed to get %s Terraform variables", platform) + } + t.FileList = append(t.FileList, &asset.File{ + Filename: TfPlatformVarsFileName, + Data: data, + }) + case ovirt.Name: + config, err := ovirtconfig.NewConfig() + if err != nil { + return err + } + con, err := ovirtconfig.NewConnection() + if err != nil { + return err + } + defer con.Close() + + if installConfig.Config.Platform.Ovirt.VNICProfileID == "" { + profiles, err := ovirtconfig.FetchVNICProfileByClusterNetwork( + con, + installConfig.Config.Platform.Ovirt.ClusterID, + installConfig.Config.Platform.Ovirt.NetworkName) + if err != nil { + return errors.Wrapf(err, "failed to compute values for Engine platform") + } + if len(profiles) != 1 { + return fmt.Errorf("failed to compute values for Engine platform, "+ + "there are multiple vNIC profiles. found %v vNIC profiles for network %s", + len(profiles), installConfig.Config.Platform.Ovirt.NetworkName) + } + installConfig.Config.Platform.Ovirt.VNICProfileID = profiles[0].MustId() + } + + masters, err := mastersAsset.Machines() + if err != nil { + return err + } + + data, err := ovirttfvars.TFVars( + ovirttfvars.Auth{ + URL: config.URL, + Username: config.Username, + Password: config.Password, + Cafile: config.CAFile, + Cabundle: config.CABundle, + Insecure: config.Insecure, + }, + installConfig.Config.Platform.Ovirt.ClusterID, + installConfig.Config.Platform.Ovirt.StorageDomainID, + installConfig.Config.Platform.Ovirt.NetworkName, + installConfig.Config.Platform.Ovirt.VNICProfileID, + string(*rhcosImage), + clusterID.InfraID, + masters[0].Spec.ProviderSpec.Value.Object.(*ovirtprovider.OvirtMachineProviderSpec), + installConfig.Config.Platform.Ovirt.AffinityGroups, + ) + if err != nil { + return errors.Wrapf(err, "failed to get %s Terraform variables", platform) + } + t.FileList = append(t.FileList, &asset.File{ + Filename: TfPlatformVarsFileName, + Data: data, + }) + case powervs.Name: + APIKey, err := installConfig.PowerVS.APIKey(ctx) + if err != nil { + return err + } + + masters, err := mastersAsset.Machines() + if err != nil { + return err + } + + var ( + cisCRN, dnsCRN, vpcGatewayName, vpcSubnet string + vpcPermitted, vpcGatewayAttached bool + ) + if len(installConfig.Config.PowerVS.VPCSubnets) > 0 { + vpcSubnet = installConfig.Config.PowerVS.VPCSubnets[0] + } + switch installConfig.Config.Publish { + case types.InternalPublishingStrategy: + // Get DNSInstanceCRN from InstallConfig metadata + dnsCRN, err = installConfig.PowerVS.DNSInstanceCRN(ctx) + if err != nil { + return err + } + + // If the VPC already exists and the cluster is Private, check if the VPC is already a Permitted Network on DNS Instance + if installConfig.Config.PowerVS.VPCName != "" { + vpcPermitted, err = installConfig.PowerVS.IsVPCPermittedNetwork(ctx, installConfig.Config.Platform.PowerVS.VPCName, installConfig.Config.BaseDomain) + if err != nil { + return err + } + vpcGatewayName, vpcGatewayAttached, err = installConfig.PowerVS.GetExistingVPCGateway(ctx, installConfig.Config.Platform.PowerVS.VPCName, vpcSubnet) + if err != nil { + return err + } + } + case types.ExternalPublishingStrategy: + // Get CISInstanceCRN from InstallConfig metadata + cisCRN, err = installConfig.PowerVS.CISInstanceCRN(ctx) + if err != nil { + return err + } + default: + return errors.New("unknown publishing strategy") + } + + masterConfigs := make([]*machinev1.PowerVSMachineProviderConfig, len(masters)) + for i, m := range masters { + masterConfigs[i] = m.Spec.ProviderSpec.Value.Object.(*machinev1.PowerVSMachineProviderConfig) + } + + client, err := powervsconfig.NewClient() + if err != nil { + return err + } + var ( + vpcRegion, vpcZone string + ) + vpcName := installConfig.Config.PowerVS.VPCName + if vpcName != "" { + var vpc *vpcv1.VPC + vpc, err = client.GetVPCByName(ctx, vpcName) + if err != nil { + return err + } + var crnElems = strings.SplitN(*vpc.CRN, ":", 8) + vpcRegion = crnElems[5] + } else { + specified := installConfig.Config.PowerVS.VPCRegion + if specified != "" { + if powervs.ValidateVPCRegion(specified) { + vpcRegion = specified + } else { + return errors.New("unknown VPC region") + } + } else if vpcRegion, err = powervs.VPCRegionForPowerVSRegion(installConfig.Config.PowerVS.Region); err != nil { + return err + } + } + if vpcSubnet != "" { + var sn *vpcv1.Subnet + sn, err = client.GetSubnetByName(ctx, vpcSubnet, vpcRegion) + if err != nil { + return err + } + vpcZone = *sn.Zone.Name + } else { + rand.Seed(time.Now().UnixNano()) + vpcZone = fmt.Sprintf("%s-%d", vpcRegion, rand.Intn(2)+1) //nolint:gosec // we don't need a crypto secure number + } + + err = powervsconfig.ValidatePERAvailability(client, installConfig.Config) + transitGatewayEnabled := err == nil + cpStanza := installConfig.Config.ControlPlane + if cpStanza == nil || cpStanza.Platform.PowerVS == nil || cpStanza.Platform.PowerVS.SysType == "" { + sysTypes, err := powervs.AvailableSysTypes(installConfig.Config.PowerVS.Region, installConfig.Config.PowerVS.Zone) + if err != nil { + return err + } + for i := range masters { + masterConfigs[i].SystemType = sysTypes[0] + } + } + + serviceInstanceCRN, err := client.ServiceInstanceIDToCRN(ctx, installConfig.Config.PowerVS.ServiceInstanceID) + if err != nil { + return err + } + if serviceInstanceCRN == "" { + return fmt.Errorf("the service instance CRN is empty for the given ID") + } + + osImage := strings.SplitN(string(*rhcosImage), "/", 2) + data, err = powervstfvars.TFVars( + powervstfvars.TFVarsSources{ + MasterConfigs: masterConfigs, + Region: installConfig.Config.Platform.PowerVS.Region, + Zone: installConfig.Config.Platform.PowerVS.Zone, + APIKey: APIKey, + SSHKey: installConfig.Config.SSHKey, + PowerVSResourceGroup: installConfig.Config.PowerVS.PowerVSResourceGroup, + ImageBucketName: osImage[0], + ImageBucketFileName: osImage[1], + NetworkName: installConfig.Config.PowerVS.PVSNetworkName, + VPCRegion: vpcRegion, + VPCZone: vpcZone, + VPCName: vpcName, + VPCSubnetName: vpcSubnet, + VPCPermitted: vpcPermitted, + VPCGatewayName: vpcGatewayName, + VPCGatewayAttached: vpcGatewayAttached, + CloudConnectionName: installConfig.Config.PowerVS.CloudConnectionName, + CISInstanceCRN: cisCRN, + DNSInstanceCRN: dnsCRN, + PublishStrategy: installConfig.Config.Publish, + EnableSNAT: len(installConfig.Config.DeprecatedImageContentSources) == 0 && len(installConfig.Config.ImageDigestSources) == 0, + TransitGatewayEnabled: transitGatewayEnabled, + ServiceInstanceCRN: serviceInstanceCRN, + }, + ) + if err != nil { + return errors.Wrapf(err, "failed to get %s Terraform variables", platform) + } + t.FileList = append(t.FileList, &asset.File{ + Filename: TfPlatformVarsFileName, + Data: data, + }) + + case vsphere.Name: + networkFailureDomainMap := make(map[string]string) + ctx, cancel := context.WithTimeout(context.TODO(), 60*time.Second) + defer cancel() + + vim25Client, _, cleanup, err := vsphereconfig.CreateVSphereClients(context.TODO(), + installConfig.Config.VSphere.VCenters[0].Server, + installConfig.Config.VSphere.VCenters[0].Username, + installConfig.Config.VSphere.VCenters[0].Password) + if err != nil { + return errors.Wrapf(err, "unable to connect to vCenter %s. Ensure provided information is correct and client certs have been added to system trust", installConfig.Config.VSphere.VCenters[0].Server) + } + defer cleanup() + + finder := vsphereconfig.NewFinder(vim25Client) + + controlPlanes, err := mastersAsset.Machines() + if err != nil { + return err + } + controlPlaneConfigs := make([]*machinev1beta1.VSphereMachineProviderSpec, len(controlPlanes)) + for i, c := range controlPlanes { + var clusterMo mo.ClusterComputeResource + controlPlaneConfigs[i] = c.Spec.ProviderSpec.Value.Object.(*machinev1beta1.VSphereMachineProviderSpec) + + rpObj, err := finder.ResourcePool(ctx, controlPlaneConfigs[i].Workspace.ResourcePool) + if err != nil { + return err + } + + clusterRef, err := rpObj.Owner(ctx) + if err != nil { + return err + } + + // When using finder.ObjectReference the InventoryPath is defined + // NewClusterComputeResource I don't believe assigns that value. + clusterObjRef, err := finder.ObjectReference(ctx, clusterRef.Reference()) + if err != nil { + return err + } + + clusterObj, ok := clusterObjRef.(*object.ClusterComputeResource) + if !ok { + return errors.New("unable to convert cluster object reference to object cluster compute resource") + } + err = clusterObj.Properties(ctx, clusterRef.Reference(), []string{"name", "summary"}, &clusterMo) + if err != nil { + return err + } + + networkPath := path.Join(clusterObj.InventoryPath, controlPlaneConfigs[i].Network.Devices[0].NetworkName) + netObj, err := finder.Network(ctx, networkPath) + if err != nil { + return err + } + + controlPlaneConfigs[i].Network.Devices[0].NetworkName = netObj.Reference().Value + } + + for _, fd := range installConfig.Config.VSphere.FailureDomains { + // Must use the Managed Object ID for a port group (e.g. dvportgroup-5258) + // instead of the name since port group names aren't always unique in vSphere. + // https://bugzilla.redhat.com/show_bug.cgi?id=1918005 + + networkPath := path.Join(fd.Topology.ComputeCluster, fd.Topology.Networks[0]) + netObj, err := finder.Network(ctx, networkPath) + if err != nil { + return errors.Wrap(err, "failed to get vSphere network ID") + } + + networkFailureDomainMap[fd.Name] = netObj.Reference().Value + } + + data, err = vspheretfvars.TFVars( + vspheretfvars.TFVarsSources{ + ControlPlaneConfigs: controlPlaneConfigs, + ImageURL: string(*rhcosImage), + DiskType: installConfig.Config.Platform.VSphere.DiskType, + NetworksInFailureDomain: networkFailureDomainMap, + InfraID: clusterID.InfraID, + InstallConfig: installConfig, + ControlPlaneMachines: controlPlanes, + }, + ) + if err != nil { + return errors.Wrapf(err, "failed to get %s Terraform variables", platform) + } + t.FileList = append(t.FileList, &asset.File{ + Filename: TfPlatformVarsFileName, + Data: data, + }) + + case alibabacloud.Name: + client, err := installConfig.AlibabaCloud.Client() + if err != nil { + return errors.Wrapf(err, "failed to create new client use region %s", installConfig.Config.Platform.AlibabaCloud.Region) + } + bucket := fmt.Sprintf("%s-bootstrap", clusterID.InfraID) + object := "bootstrap.ign" + signURL, err := client.GetOSSObjectSignURL(bucket, object) + if err != nil { + return errors.Wrapf(err, "failed to get a presigned URL for OSS object %s", object) + } + + auth := alibabacloudtfvars.Auth{ + AccessKey: client.AccessKeyID, + SecretKey: client.AccessKeySecret, + } + + masters, err := mastersAsset.Machines() + if err != nil { + return errors.Wrapf(err, "failed to get master machine info") + } + masterConfigs := make([]*machinev1.AlibabaCloudMachineProviderConfig, len(masters)) + for i, m := range masters { + masterConfigs[i] = m.Spec.ProviderSpec.Value.Object.(*machinev1.AlibabaCloudMachineProviderConfig) + } + workers, err := workersAsset.MachineSets() + if err != nil { + return errors.Wrapf(err, "failed to get worker machine info") + } + workerConfigs := make([]*machinev1.AlibabaCloudMachineProviderConfig, len(workers)) + for i, w := range workers { + workerConfigs[i] = w.Spec.Template.Spec.ProviderSpec.Value.Object.(*machinev1.AlibabaCloudMachineProviderConfig) + } + + natGatewayZones, err := client.ListEnhanhcedNatGatewayAvailableZones() + if err != nil { + return errors.Wrapf(err, "failed to list available zones for NAT gateway") + } + natGatewayZoneID := natGatewayZones.Zones[0].ZoneId + + vswitchIDs := []string{} + if len(installConfig.Config.AlibabaCloud.VSwitchIDs) > 0 { + vswitchIDs = installConfig.Config.AlibabaCloud.VSwitchIDs + } + data, err := alibabacloudtfvars.TFVars( + alibabacloudtfvars.TFVarsSources{ + Auth: auth, + VpcID: installConfig.Config.AlibabaCloud.VpcID, + VSwitchIDs: vswitchIDs, + PrivateZoneID: installConfig.Config.AlibabaCloud.PrivateZoneID, + ResourceGroupID: installConfig.Config.AlibabaCloud.ResourceGroupID, + BaseDomain: installConfig.Config.BaseDomain, + NatGatewayZoneID: natGatewayZoneID, + MasterConfigs: masterConfigs, + WorkerConfigs: workerConfigs, + IgnitionBucket: bucket, + IgnitionPresignedURL: signURL, + AdditionalTrustBundle: installConfig.Config.AdditionalTrustBundle, + Architecture: installConfig.Config.ControlPlane.Architecture, + Publish: installConfig.Config.Publish, + Proxy: installConfig.Config.Proxy, + }, + ) + if err != nil { + return errors.Wrapf(err, "failed to get %s Terraform variables", platform) + } + t.FileList = append(t.FileList, &asset.File{ + Filename: TfPlatformVarsFileName, + Data: data, + }) + case nutanix.Name: + if rhcosImage == nil { + return errors.New("unable to retrieve rhcos image") + } + controlPlanes, err := mastersAsset.Machines() + if err != nil { + return errors.Wrapf(err, "error getting control plane machines") + } + controlPlaneConfigs := make([]*machinev1.NutanixMachineProviderConfig, len(controlPlanes)) + for i, c := range controlPlanes { + controlPlaneConfigs[i] = c.Spec.ProviderSpec.Value.Object.(*machinev1.NutanixMachineProviderConfig) + } + + imgURI := string(*rhcosImage) + if installConfig.Config.Nutanix.ClusterOSImage != "" { + imgURI = installConfig.Config.Nutanix.ClusterOSImage + } + data, err = nutanixtfvars.TFVars( + nutanixtfvars.TFVarsSources{ + PrismCentralAddress: installConfig.Config.Nutanix.PrismCentral.Endpoint.Address, + Port: strconv.Itoa(int(installConfig.Config.Nutanix.PrismCentral.Endpoint.Port)), + Username: installConfig.Config.Nutanix.PrismCentral.Username, + Password: installConfig.Config.Nutanix.PrismCentral.Password, + ImageURI: imgURI, + BootstrapIgnitionData: bootstrapIgn, + ClusterID: clusterID.InfraID, + ControlPlaneConfigs: controlPlaneConfigs, + }, + ) + if err != nil { + return errors.Wrapf(err, "failed to get %s Terraform variables", platform) + } + t.FileList = append(t.FileList, &asset.File{ + Filename: TfPlatformVarsFileName, + Data: data, + }) + default: + logrus.Warnf("unrecognized platform %s", platform) + } + + return nil +} + +// Files returns the files generated by the asset. +func (t *TerraformVariables) Files() []*asset.File { + return t.FileList +} + +// Load reads the terraform.tfvars from disk. +func (t *TerraformVariables) Load(f asset.FileFetcher) (found bool, err error) { + file, err := f.FetchByName(TfVarsFileName) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + t.FileList = []*asset.File{file} + + switch file, err := f.FetchByName(TfPlatformVarsFileName); { + case err == nil: + t.FileList = append(t.FileList, file) + case !os.IsNotExist(err): + return false, err + } + + return true, nil +} + +// injectInstallInfo adds information about the installer and its invoker as a +// ConfigMap to the provided bootstrap Ignition config. +func injectInstallInfo(bootstrap []byte) (string, error) { + config := &igntypes.Config{} + if err := json.Unmarshal(bootstrap, &config); err != nil { + return "", errors.Wrap(err, "failed to unmarshal bootstrap Ignition config") + } + + cm, err := openshiftinstall.CreateInstallConfigMap("openshift-install") + if err != nil { + return "", errors.Wrap(err, "failed to generate openshift-install config") + } + + config.Storage.Files = append(config.Storage.Files, ignition.FileFromString("/opt/openshift/manifests/openshift-install.yaml", "root", 0644, cm)) + + ign, err := ignition.Marshal(config) + if err != nil { + return "", errors.Wrap(err, "failed to marshal bootstrap Ignition config") + } + + return string(ign), nil +} diff --git a/.history/pkg/asset/installconfig/platformprovisioncheck_20241219120242.go b/.history/pkg/asset/installconfig/platformprovisioncheck_20241219120242.go new file mode 100644 index 00000000000..58fe4a44e38 --- /dev/null +++ b/.history/pkg/asset/installconfig/platformprovisioncheck_20241219120242.go @@ -0,0 +1,206 @@ +package installconfig + +import ( + "context" + "errors" + "fmt" + + "k8s.io/apimachinery/pkg/util/sets" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/installer/pkg/asset" + alibabacloudconfig "github.com/openshift/installer/pkg/asset/installconfig/alibabacloud" + awsconfig "github.com/openshift/installer/pkg/asset/installconfig/aws" + azconfig "github.com/openshift/installer/pkg/asset/installconfig/azure" + bmconfig "github.com/openshift/installer/pkg/asset/installconfig/baremetal" + gcpconfig "github.com/openshift/installer/pkg/asset/installconfig/gcp" + ibmcloudconfig "github.com/openshift/installer/pkg/asset/installconfig/ibmcloud" + nutanixconfig "github.com/openshift/installer/pkg/asset/installconfig/nutanix" + osconfig "github.com/openshift/installer/pkg/asset/installconfig/openstack" + ovirtconfig "github.com/openshift/installer/pkg/asset/installconfig/ovirt" + powervsconfig "github.com/openshift/installer/pkg/asset/installconfig/powervs" + vsconfig "github.com/openshift/installer/pkg/asset/installconfig/vsphere" + "github.com/openshift/installer/pkg/types/alibabacloud" + "github.com/openshift/installer/pkg/types/aws" + "github.com/openshift/installer/pkg/types/azure" + "github.com/openshift/installer/pkg/types/baremetal" + "github.com/openshift/installer/pkg/types/external" + "github.com/openshift/installer/pkg/types/gcp" + "github.com/openshift/installer/pkg/types/ibmcloud" + "github.com/openshift/installer/pkg/types/libvirt" + "github.com/openshift/installer/pkg/types/none" + "github.com/openshift/installer/pkg/types/nutanix" + "github.com/openshift/installer/pkg/types/openstack" + "github.com/openshift/installer/pkg/types/ovirt" + "github.com/openshift/installer/pkg/types/powervs" + "github.com/openshift/installer/pkg/types/vsphere" +) + +// PlatformProvisionCheck is an asset that validates the install-config platform for +// any requirements specific for provisioning infrastructure. +type PlatformProvisionCheck struct { +} + +var _ asset.Asset = (*PlatformProvisionCheck)(nil) + +// Dependencies returns the dependencies for PlatformProvisionCheck +func (a *PlatformProvisionCheck) Dependencies() []asset.Asset { + return []asset.Asset{ + &InstallConfig{}, + } +} + +// Generate queries for input from the user. +func (a *PlatformProvisionCheck) Generate(dependencies asset.Parents) error { + ic := &InstallConfig{} + dependencies.Get(ic) + platform := ic.Config.Platform.Name() + + // IPI requires MachineAPI capability + enabledCaps := sets.NewString() + if ic.Config.Capabilities == nil || ic.Config.Capabilities.BaselineCapabilitySet == "" { + // when Capabilities and/or BaselineCapabilitySet is not specified, default is vCurrent + baseSet := configv1.ClusterVersionCapabilitySets[configv1.ClusterVersionCapabilitySetCurrent] + for _, cap := range baseSet { + enabledCaps.Insert(string(cap)) + } + } + if ic.Config.Capabilities != nil { + if ic.Config.Capabilities.BaselineCapabilitySet != "" { + baseSet := configv1.ClusterVersionCapabilitySets[ic.Config.Capabilities.BaselineCapabilitySet] + for _, cap := range baseSet { + enabledCaps.Insert(string(cap)) + } + } + if ic.Config.Capabilities.AdditionalEnabledCapabilities != nil { + for _, cap := range ic.Config.Capabilities.AdditionalEnabledCapabilities { + enabledCaps.Insert(string(cap)) + } + } + } + if !enabledCaps.Has(string(configv1.ClusterVersionCapabilityMachineAPI)) { + return errors.New("IPI requires MachineAPI capability") + } + + switch platform { + case aws.Name: + session, err := ic.AWS.Session(context.TODO()) + if err != nil { + return err + } + client := awsconfig.NewClient(session) + return awsconfig.ValidateForProvisioning(client, ic.Config, ic.AWS) + case azure.Name: + dnsConfig, err := ic.Azure.DNSConfig() + if err != nil { + return err + } + err = azconfig.ValidatePublicDNS(ic.Config, dnsConfig) + if err != nil { + return err + } + client, err := ic.Azure.Client() + if err != nil { + return err + } + return azconfig.ValidateForProvisioning(client, ic.Config) + case baremetal.Name: + err := bmconfig.ValidateBaremetalPlatformSet(ic.Config) + if err != nil { + return err + } + err = bmconfig.ValidateProvisioning(ic.Config) + if err != nil { + return err + } + err = bmconfig.ValidateStaticBootstrapNetworking(ic.Config) + if err != nil { + return err + } + case gcp.Name: + err := gcpconfig.ValidateForProvisioning(ic.Config) + if err != nil { + return err + } + case ibmcloud.Name: + client, err := ibmcloudconfig.NewClient() + if err != nil { + return err + } + err = ibmcloudconfig.ValidatePreExistingPublicDNS(client, ic.Config, ic.IBMCloud) + if err != nil { + return err + } + case openstack.Name: + err := osconfig.ValidateForProvisioning(ic.Config) + if err != nil { + return err + } + case vsphere.Name: + if err := vsconfig.ValidateForProvisioning(ic.Config); err != nil { + return err + } + case ovirt.Name: + err := ovirtconfig.ValidateForProvisioning(ic.Config) + if err != nil { + return err + } + case alibabacloud.Name: + client, err := ic.AlibabaCloud.Client() + if err != nil { + return err + } + err = alibabacloudconfig.ValidateForProvisioning(client, ic.Config, ic.AlibabaCloud) + if err != nil { + return err + } + case powervs.Name: + client, err := powervsconfig.NewClient() + if err != nil { + return err + } + + err = powervsconfig.ValidatePreExistingDNS(client, ic.Config, ic.PowerVS) + if err != nil { + return err + } + + err = powervsconfig.ValidateCustomVPCSetup(client, ic.Config) + if err != nil { + return err + } + + err = powervsconfig.ValidateResourceGroup(client, ic.Config) + if err != nil { + return err + } + +<<<<<<< HEAD +======= + err = powervsconfig.ValidateSystemTypeForZone(client, ic.Config) + if err != nil { + return err + } + +>>>>>>> 07c96aff2b (Create region-zone-sysType hierarchy) + err = powervsconfig.ValidateServiceInstance(client, ic.Config) + if err != nil { + return err + } + case external.Name, libvirt.Name, none.Name: + // no special provisioning requirements to check + case nutanix.Name: + err := nutanixconfig.ValidateForProvisioning(ic.Config) + if err != nil { + return err + } + default: + return fmt.Errorf("unknown platform type %q", platform) + } + return nil +} + +// Name returns the human-friendly name of the asset. +func (a *PlatformProvisionCheck) Name() string { + return "Platform Provisioning Check" +} diff --git a/.history/pkg/asset/installconfig/platformprovisioncheck_20241219121357.go b/.history/pkg/asset/installconfig/platformprovisioncheck_20241219121357.go new file mode 100644 index 00000000000..2e2b5510f28 --- /dev/null +++ b/.history/pkg/asset/installconfig/platformprovisioncheck_20241219121357.go @@ -0,0 +1,203 @@ +package installconfig + +import ( + "context" + "errors" + "fmt" + + "k8s.io/apimachinery/pkg/util/sets" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/installer/pkg/asset" + alibabacloudconfig "github.com/openshift/installer/pkg/asset/installconfig/alibabacloud" + awsconfig "github.com/openshift/installer/pkg/asset/installconfig/aws" + azconfig "github.com/openshift/installer/pkg/asset/installconfig/azure" + bmconfig "github.com/openshift/installer/pkg/asset/installconfig/baremetal" + gcpconfig "github.com/openshift/installer/pkg/asset/installconfig/gcp" + ibmcloudconfig "github.com/openshift/installer/pkg/asset/installconfig/ibmcloud" + nutanixconfig "github.com/openshift/installer/pkg/asset/installconfig/nutanix" + osconfig "github.com/openshift/installer/pkg/asset/installconfig/openstack" + ovirtconfig "github.com/openshift/installer/pkg/asset/installconfig/ovirt" + powervsconfig "github.com/openshift/installer/pkg/asset/installconfig/powervs" + vsconfig "github.com/openshift/installer/pkg/asset/installconfig/vsphere" + "github.com/openshift/installer/pkg/types/alibabacloud" + "github.com/openshift/installer/pkg/types/aws" + "github.com/openshift/installer/pkg/types/azure" + "github.com/openshift/installer/pkg/types/baremetal" + "github.com/openshift/installer/pkg/types/external" + "github.com/openshift/installer/pkg/types/gcp" + "github.com/openshift/installer/pkg/types/ibmcloud" + "github.com/openshift/installer/pkg/types/libvirt" + "github.com/openshift/installer/pkg/types/none" + "github.com/openshift/installer/pkg/types/nutanix" + "github.com/openshift/installer/pkg/types/openstack" + "github.com/openshift/installer/pkg/types/ovirt" + "github.com/openshift/installer/pkg/types/powervs" + "github.com/openshift/installer/pkg/types/vsphere" +) + +// PlatformProvisionCheck is an asset that validates the install-config platform for +// any requirements specific for provisioning infrastructure. +type PlatformProvisionCheck struct { +} + +var _ asset.Asset = (*PlatformProvisionCheck)(nil) + +// Dependencies returns the dependencies for PlatformProvisionCheck +func (a *PlatformProvisionCheck) Dependencies() []asset.Asset { + return []asset.Asset{ + &InstallConfig{}, + } +} + +// Generate queries for input from the user. +func (a *PlatformProvisionCheck) Generate(dependencies asset.Parents) error { + ic := &InstallConfig{} + dependencies.Get(ic) + platform := ic.Config.Platform.Name() + + // IPI requires MachineAPI capability + enabledCaps := sets.NewString() + if ic.Config.Capabilities == nil || ic.Config.Capabilities.BaselineCapabilitySet == "" { + // when Capabilities and/or BaselineCapabilitySet is not specified, default is vCurrent + baseSet := configv1.ClusterVersionCapabilitySets[configv1.ClusterVersionCapabilitySetCurrent] + for _, cap := range baseSet { + enabledCaps.Insert(string(cap)) + } + } + if ic.Config.Capabilities != nil { + if ic.Config.Capabilities.BaselineCapabilitySet != "" { + baseSet := configv1.ClusterVersionCapabilitySets[ic.Config.Capabilities.BaselineCapabilitySet] + for _, cap := range baseSet { + enabledCaps.Insert(string(cap)) + } + } + if ic.Config.Capabilities.AdditionalEnabledCapabilities != nil { + for _, cap := range ic.Config.Capabilities.AdditionalEnabledCapabilities { + enabledCaps.Insert(string(cap)) + } + } + } + if !enabledCaps.Has(string(configv1.ClusterVersionCapabilityMachineAPI)) { + return errors.New("IPI requires MachineAPI capability") + } + + switch platform { + case aws.Name: + session, err := ic.AWS.Session(context.TODO()) + if err != nil { + return err + } + client := awsconfig.NewClient(session) + return awsconfig.ValidateForProvisioning(client, ic.Config, ic.AWS) + case azure.Name: + dnsConfig, err := ic.Azure.DNSConfig() + if err != nil { + return err + } + err = azconfig.ValidatePublicDNS(ic.Config, dnsConfig) + if err != nil { + return err + } + client, err := ic.Azure.Client() + if err != nil { + return err + } + return azconfig.ValidateForProvisioning(client, ic.Config) + case baremetal.Name: + err := bmconfig.ValidateBaremetalPlatformSet(ic.Config) + if err != nil { + return err + } + err = bmconfig.ValidateProvisioning(ic.Config) + if err != nil { + return err + } + err = bmconfig.ValidateStaticBootstrapNetworking(ic.Config) + if err != nil { + return err + } + case gcp.Name: + err := gcpconfig.ValidateForProvisioning(ic.Config) + if err != nil { + return err + } + case ibmcloud.Name: + client, err := ibmcloudconfig.NewClient() + if err != nil { + return err + } + err = ibmcloudconfig.ValidatePreExistingPublicDNS(client, ic.Config, ic.IBMCloud) + if err != nil { + return err + } + case openstack.Name: + err := osconfig.ValidateForProvisioning(ic.Config) + if err != nil { + return err + } + case vsphere.Name: + if err := vsconfig.ValidateForProvisioning(ic.Config); err != nil { + return err + } + case ovirt.Name: + err := ovirtconfig.ValidateForProvisioning(ic.Config) + if err != nil { + return err + } + case alibabacloud.Name: + client, err := ic.AlibabaCloud.Client() + if err != nil { + return err + } + err = alibabacloudconfig.ValidateForProvisioning(client, ic.Config, ic.AlibabaCloud) + if err != nil { + return err + } + case powervs.Name: + client, err := powervsconfig.NewClient() + if err != nil { + return err + } + + err = powervsconfig.ValidatePreExistingDNS(client, ic.Config, ic.PowerVS) + if err != nil { + return err + } + + err = powervsconfig.ValidateCustomVPCSetup(client, ic.Config) + if err != nil { + return err + } + + err = powervsconfig.ValidateResourceGroup(client, ic.Config) + if err != nil { + return err + } + + err = powervsconfig.ValidateSystemTypeForZone(client, ic.Config) + if err != nil { + return err + } + + err = powervsconfig.ValidateServiceInstance(client, ic.Config) + if err != nil { + return err + } + case external.Name, libvirt.Name, none.Name: + // no special provisioning requirements to check + case nutanix.Name: + err := nutanixconfig.ValidateForProvisioning(ic.Config) + if err != nil { + return err + } + default: + return fmt.Errorf("unknown platform type %q", platform) + } + return nil +} + +// Name returns the human-friendly name of the asset. +func (a *PlatformProvisionCheck) Name() string { + return "Platform Provisioning Check" +} diff --git a/.history/pkg/asset/installconfig/powervs/validation_20241219120242.go b/.history/pkg/asset/installconfig/powervs/validation_20241219120242.go new file mode 100644 index 00000000000..da8bdf51af5 --- /dev/null +++ b/.history/pkg/asset/installconfig/powervs/validation_20241219120242.go @@ -0,0 +1,321 @@ +package powervs + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + "k8s.io/apimachinery/pkg/util/validation/field" + + "github.com/openshift/installer/pkg/types" + powervstypes "github.com/openshift/installer/pkg/types/powervs" +) + +// Validate executes platform specific validation/ +func Validate(config *types.InstallConfig) error { + allErrs := field.ErrorList{} + + if config.Platform.PowerVS == nil { + allErrs = append(allErrs, field.Required(field.NewPath("platform", "powervs"), "Power VS Validation requires a Power VS platform configuration.")) + } else { + if config.ControlPlane != nil { + fldPath := field.NewPath("controlPlane") + allErrs = append(allErrs, validateMachinePool(fldPath, config.ControlPlane)...) + } + for idx, compute := range config.Compute { + fldPath := field.NewPath("compute").Index(idx) + allErrs = append(allErrs, validateMachinePool(fldPath, &compute)...) + } + // Machine pool CIDR check + for i := range config.Networking.MachineNetwork { + // Each machine pool CIDR must have 24 significant bits (/24) + if bits, _ := config.Networking.MachineNetwork[i].CIDR.Mask.Size(); bits != 24 { + // If not, create an error displaying the CIDR in the install config vs the expectation (/24) + fldPath := field.NewPath("Networking") + allErrs = append(allErrs, field.Invalid(fldPath.Child("MachineNetwork").Child("CIDR"), (&config.Networking.MachineNetwork[i].CIDR).String(), "Machine Pool CIDR must be /24.")) + } + } + } + return allErrs.ToAggregate() +} + +func validateMachinePool(fldPath *field.Path, machinePool *types.MachinePool) field.ErrorList { + allErrs := field.ErrorList{} + if machinePool.Architecture != "ppc64le" { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("architecture"), machinePool.Architecture, []string{"ppc64le"})) + } + return allErrs +} + +// ValidatePERAvailability ensures the target datacenter has PER enabled. +func ValidatePERAvailability(client API, ic *types.InstallConfig) error { + capabilities, err := client.GetDatacenterCapabilities(context.TODO(), ic.PowerVS.Zone) + if err != nil { + return err + } + const per = "power-edge-router" + perAvail, ok := capabilities[per] + if !ok { + return fmt.Errorf("%s capability unknown at: %s", per, ic.PowerVS.Zone) + } + if !perAvail { + return fmt.Errorf("%s is not available at: %s", per, ic.PowerVS.Zone) + } + + capabilities, err = client.GetWorkspaceCapabilities(context.TODO(), ic.PowerVS.ServiceInstanceID) + if err != nil { + return err + } + if !capabilities[per] { + return fmt.Errorf("%s is not available in workspace: %s", per, ic.PowerVS.ServiceInstanceID) + } + + return nil +} + +// ValidatePreExistingDNS ensures no pre-existing DNS record exists in the CIS +// DNS zone or IBM DNS zone for cluster's Kubernetes API. +func ValidatePreExistingDNS(client API, ic *types.InstallConfig, metadata MetadataAPI) error { + allErrs := field.ErrorList{} + + fldPath := field.NewPath("baseDomain") + if ic.Publish == types.ExternalPublishingStrategy { + allErrs = append(allErrs, validatePreExistingPublicDNS(fldPath, client, ic, metadata)...) + } else { + allErrs = append(allErrs, validatePreExistingPrivateDNS(fldPath, client, ic, metadata)...) + } + + return allErrs.ToAggregate() +} + +func validatePreExistingPublicDNS(fldPath *field.Path, client API, ic *types.InstallConfig, metadata MetadataAPI) field.ErrorList { + allErrs := field.ErrorList{} + // Get CIS CRN + crn, err := metadata.CISInstanceCRN(context.TODO()) + if err != nil { + return append(allErrs, field.InternalError(fldPath, err)) + } + + // Get CIS zone ID by name + zoneID, err := client.GetDNSZoneIDByName(context.TODO(), ic.BaseDomain, types.ExternalPublishingStrategy) + if err != nil { + return append(allErrs, field.InternalError(fldPath, err)) + } + + // Search for existing records + recordNames := [...]string{fmt.Sprintf("api.%s", ic.ClusterDomain()), fmt.Sprintf("api-int.%s", ic.ClusterDomain())} + for _, recordName := range recordNames { + records, err := client.GetDNSRecordsByName(context.TODO(), crn, zoneID, recordName, types.ExternalPublishingStrategy) + if err != nil { + allErrs = append(allErrs, field.InternalError(fldPath, err)) + } + + // DNS record exists + if len(records) != 0 { + allErrs = append(allErrs, field.Duplicate(fldPath, fmt.Sprintf("record %s already exists in CIS zone (%s) and might be in use by another cluster, please remove it to continue", recordName, zoneID))) + } + } + return allErrs +} + +func validatePreExistingPrivateDNS(fldPath *field.Path, client API, ic *types.InstallConfig, metadata MetadataAPI) field.ErrorList { + allErrs := field.ErrorList{} + // Get DNS CRN + crn, err := metadata.DNSInstanceCRN(context.TODO()) + if err != nil { + return append(allErrs, field.InternalError(fldPath, err)) + } + + // Get CIS zone ID by name + zoneID, err := client.GetDNSZoneIDByName(context.TODO(), ic.BaseDomain, types.InternalPublishingStrategy) + if err != nil { + return append(allErrs, field.InternalError(fldPath, err)) + } + + // Search for existing records + recordNames := [...]string{fmt.Sprintf("api-int.%s", ic.ClusterDomain())} + for _, recordName := range recordNames { + records, err := client.GetDNSRecordsByName(context.TODO(), crn, zoneID, recordName, types.InternalPublishingStrategy) + if err != nil { + allErrs = append(allErrs, field.InternalError(fldPath, err)) + } + + // DNS record exists + if len(records) != 0 { + allErrs = append(allErrs, field.Duplicate(fldPath, fmt.Sprintf("record %s already exists in DNS zone (%s) and might be in use by another cluster, please remove it to continue", recordName, zoneID))) + } + } + return allErrs +} + +// ValidateCustomVPCSetup ensures optional VPC settings, if specified, are all legit. +func ValidateCustomVPCSetup(client API, ic *types.InstallConfig) error { + allErrs := field.ErrorList{} + var vpcRegion = ic.PowerVS.VPCRegion + var vpcName = ic.PowerVS.VPCName + var err error + fldPath := field.NewPath("VPC") + + if vpcRegion != "" { + if !powervstypes.ValidateVPCRegion(vpcRegion) { + allErrs = append(allErrs, field.NotFound(fldPath.Child("vpcRegion"), vpcRegion)) + } + } else { + vpcRegion, err = powervstypes.VPCRegionForPowerVSRegion(ic.PowerVS.Region) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("region"), nil, ic.PowerVS.Region)) + } + } + + if vpcName != "" { + allErrs = append(allErrs, findVPCInRegion(client, vpcName, vpcRegion, fldPath)...) + allErrs = append(allErrs, findSubnetInVPC(client, ic.PowerVS.VPCSubnets, vpcRegion, vpcName, fldPath)...) + } else if len(ic.PowerVS.VPCSubnets) != 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("vpcSubnets"), nil, "invalid without vpcName")) + } + + return allErrs.ToAggregate() +} + +func findVPCInRegion(client API, name string, region string, path *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if name == "" { + return allErrs + } + + vpcs, err := client.GetVPCs(context.TODO(), region) + if err != nil { + return append(allErrs, field.InternalError(path.Child("vpcRegion"), err)) + } + + found := false + for _, vpc := range vpcs { + if *vpc.Name == name { + found = true + break + } + } + if !found { + allErrs = append(allErrs, field.NotFound(path.Child("vpcName"), name)) + } + + return allErrs +} + +func findSubnetInVPC(client API, subnets []string, region string, name string, path *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(subnets) == 0 { + return allErrs + } + + subnet, err := client.GetSubnetByName(context.TODO(), subnets[0], region) + if err != nil { + allErrs = append(allErrs, field.InternalError(path.Child("vpcSubnets"), err)) + } else if *subnet.VPC.Name != name { + allErrs = append(allErrs, field.Invalid(path.Child("vpcSubnets"), nil, "not attached to VPC")) + } + + return allErrs +} + +// ValidateResourceGroup validates the resource group in our install config. +func ValidateResourceGroup(client API, ic *types.InstallConfig) error { + ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Minute) + defer cancel() + + resourceGroups, err := client.ListResourceGroups(ctx) + if err != nil { + return fmt.Errorf("failed to list resourceGroups: %w", err) + } + + switch ic.PowerVS.PowerVSResourceGroup { + case "": + return errors.New("platform:powervs:powervsresourcegroup is empty") + case "Default": + found := false + for _, resourceGroup := range resourceGroups.Resources { + if resourceGroup.Default != nil && *resourceGroup.Default { + found = true + ic.PowerVS.PowerVSResourceGroup = *resourceGroup.Name + break + } + } + if !found { + return errors.New("platform:powervs:powervsresourcegroup is default but no default exists") + } + default: + found := false + for _, resourceGroup := range resourceGroups.Resources { + if *resourceGroup.Name == ic.PowerVS.PowerVSResourceGroup { + found = true + break + } + } + if !found { + return errors.New("platform:powervs:powervsresourcegroup has an invalid name") + } + } + + return nil +} + +<<<<<<< HEAD +// ValidateServiceInstance validates the service instance in our install config. +======= +// ValidateSystemTypeForZone checks if the specified sysType is available in the target zone. +func ValidateSystemTypeForZone(client API, ic *types.InstallConfig) error { + if ic.ControlPlane == nil || ic.ControlPlane.Platform.PowerVS == nil || ic.ControlPlane.Platform.PowerVS.SysType == "" { + return nil + } + availableOnes, err := powervstypes.AvailableSysTypes(ic.PowerVS.Region, ic.PowerVS.Zone) + if err != nil { + return fmt.Errorf("failed to obtain available SysTypes for: %s", ic.PowerVS.Zone) + } + requested := ic.ControlPlane.Platform.PowerVS.SysType + found := false + for i := range availableOnes { + if requested == availableOnes[i] { + found = true + break + } + } + if found { + return nil + } + return fmt.Errorf("%s is not available in: %s", requested, ic.PowerVS.Zone) +} + +// ValidateServiceInstance validates the optional service instance GUID in our install config. +>>>>>>> 07c96aff2b (Create region-zone-sysType hierarchy) +func ValidateServiceInstance(client API, ic *types.InstallConfig) error { + ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Minute) + defer cancel() + + serviceInstances, err := client.ListServiceInstances(ctx) + if err != nil { + return err + } + + switch ic.PowerVS.ServiceInstanceID { + case "": + return errors.New("platform:powervs:serviceinstance is empty") + default: + found := false + for _, serviceInstance := range serviceInstances { + guid := strings.SplitN(serviceInstance, " ", 2)[1] + if guid == ic.PowerVS.ServiceInstanceID { + found = true + break + } + } + if !found { + return errors.New("platform:powervs:serviceinstance has an invalid guid") + } + } + + return nil +} diff --git a/.history/pkg/asset/installconfig/powervs/validation_20241219121130.go b/.history/pkg/asset/installconfig/powervs/validation_20241219121130.go new file mode 100644 index 00000000000..79df6c89948 --- /dev/null +++ b/.history/pkg/asset/installconfig/powervs/validation_20241219121130.go @@ -0,0 +1,317 @@ +package powervs + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + "k8s.io/apimachinery/pkg/util/validation/field" + + "github.com/openshift/installer/pkg/types" + powervstypes "github.com/openshift/installer/pkg/types/powervs" +) + +// Validate executes platform specific validation/ +func Validate(config *types.InstallConfig) error { + allErrs := field.ErrorList{} + + if config.Platform.PowerVS == nil { + allErrs = append(allErrs, field.Required(field.NewPath("platform", "powervs"), "Power VS Validation requires a Power VS platform configuration.")) + } else { + if config.ControlPlane != nil { + fldPath := field.NewPath("controlPlane") + allErrs = append(allErrs, validateMachinePool(fldPath, config.ControlPlane)...) + } + for idx, compute := range config.Compute { + fldPath := field.NewPath("compute").Index(idx) + allErrs = append(allErrs, validateMachinePool(fldPath, &compute)...) + } + // Machine pool CIDR check + for i := range config.Networking.MachineNetwork { + // Each machine pool CIDR must have 24 significant bits (/24) + if bits, _ := config.Networking.MachineNetwork[i].CIDR.Mask.Size(); bits != 24 { + // If not, create an error displaying the CIDR in the install config vs the expectation (/24) + fldPath := field.NewPath("Networking") + allErrs = append(allErrs, field.Invalid(fldPath.Child("MachineNetwork").Child("CIDR"), (&config.Networking.MachineNetwork[i].CIDR).String(), "Machine Pool CIDR must be /24.")) + } + } + } + return allErrs.ToAggregate() +} + +func validateMachinePool(fldPath *field.Path, machinePool *types.MachinePool) field.ErrorList { + allErrs := field.ErrorList{} + if machinePool.Architecture != "ppc64le" { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("architecture"), machinePool.Architecture, []string{"ppc64le"})) + } + return allErrs +} + +// ValidatePERAvailability ensures the target datacenter has PER enabled. +func ValidatePERAvailability(client API, ic *types.InstallConfig) error { + capabilities, err := client.GetDatacenterCapabilities(context.TODO(), ic.PowerVS.Zone) + if err != nil { + return err + } + const per = "power-edge-router" + perAvail, ok := capabilities[per] + if !ok { + return fmt.Errorf("%s capability unknown at: %s", per, ic.PowerVS.Zone) + } + if !perAvail { + return fmt.Errorf("%s is not available at: %s", per, ic.PowerVS.Zone) + } + + capabilities, err = client.GetWorkspaceCapabilities(context.TODO(), ic.PowerVS.ServiceInstanceID) + if err != nil { + return err + } + if !capabilities[per] { + return fmt.Errorf("%s is not available in workspace: %s", per, ic.PowerVS.ServiceInstanceID) + } + + return nil +} + +// ValidatePreExistingDNS ensures no pre-existing DNS record exists in the CIS +// DNS zone or IBM DNS zone for cluster's Kubernetes API. +func ValidatePreExistingDNS(client API, ic *types.InstallConfig, metadata MetadataAPI) error { + allErrs := field.ErrorList{} + + fldPath := field.NewPath("baseDomain") + if ic.Publish == types.ExternalPublishingStrategy { + allErrs = append(allErrs, validatePreExistingPublicDNS(fldPath, client, ic, metadata)...) + } else { + allErrs = append(allErrs, validatePreExistingPrivateDNS(fldPath, client, ic, metadata)...) + } + + return allErrs.ToAggregate() +} + +func validatePreExistingPublicDNS(fldPath *field.Path, client API, ic *types.InstallConfig, metadata MetadataAPI) field.ErrorList { + allErrs := field.ErrorList{} + // Get CIS CRN + crn, err := metadata.CISInstanceCRN(context.TODO()) + if err != nil { + return append(allErrs, field.InternalError(fldPath, err)) + } + + // Get CIS zone ID by name + zoneID, err := client.GetDNSZoneIDByName(context.TODO(), ic.BaseDomain, types.ExternalPublishingStrategy) + if err != nil { + return append(allErrs, field.InternalError(fldPath, err)) + } + + // Search for existing records + recordNames := [...]string{fmt.Sprintf("api.%s", ic.ClusterDomain()), fmt.Sprintf("api-int.%s", ic.ClusterDomain())} + for _, recordName := range recordNames { + records, err := client.GetDNSRecordsByName(context.TODO(), crn, zoneID, recordName, types.ExternalPublishingStrategy) + if err != nil { + allErrs = append(allErrs, field.InternalError(fldPath, err)) + } + + // DNS record exists + if len(records) != 0 { + allErrs = append(allErrs, field.Duplicate(fldPath, fmt.Sprintf("record %s already exists in CIS zone (%s) and might be in use by another cluster, please remove it to continue", recordName, zoneID))) + } + } + return allErrs +} + +func validatePreExistingPrivateDNS(fldPath *field.Path, client API, ic *types.InstallConfig, metadata MetadataAPI) field.ErrorList { + allErrs := field.ErrorList{} + // Get DNS CRN + crn, err := metadata.DNSInstanceCRN(context.TODO()) + if err != nil { + return append(allErrs, field.InternalError(fldPath, err)) + } + + // Get CIS zone ID by name + zoneID, err := client.GetDNSZoneIDByName(context.TODO(), ic.BaseDomain, types.InternalPublishingStrategy) + if err != nil { + return append(allErrs, field.InternalError(fldPath, err)) + } + + // Search for existing records + recordNames := [...]string{fmt.Sprintf("api-int.%s", ic.ClusterDomain())} + for _, recordName := range recordNames { + records, err := client.GetDNSRecordsByName(context.TODO(), crn, zoneID, recordName, types.InternalPublishingStrategy) + if err != nil { + allErrs = append(allErrs, field.InternalError(fldPath, err)) + } + + // DNS record exists + if len(records) != 0 { + allErrs = append(allErrs, field.Duplicate(fldPath, fmt.Sprintf("record %s already exists in DNS zone (%s) and might be in use by another cluster, please remove it to continue", recordName, zoneID))) + } + } + return allErrs +} + +// ValidateCustomVPCSetup ensures optional VPC settings, if specified, are all legit. +func ValidateCustomVPCSetup(client API, ic *types.InstallConfig) error { + allErrs := field.ErrorList{} + var vpcRegion = ic.PowerVS.VPCRegion + var vpcName = ic.PowerVS.VPCName + var err error + fldPath := field.NewPath("VPC") + + if vpcRegion != "" { + if !powervstypes.ValidateVPCRegion(vpcRegion) { + allErrs = append(allErrs, field.NotFound(fldPath.Child("vpcRegion"), vpcRegion)) + } + } else { + vpcRegion, err = powervstypes.VPCRegionForPowerVSRegion(ic.PowerVS.Region) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("region"), nil, ic.PowerVS.Region)) + } + } + + if vpcName != "" { + allErrs = append(allErrs, findVPCInRegion(client, vpcName, vpcRegion, fldPath)...) + allErrs = append(allErrs, findSubnetInVPC(client, ic.PowerVS.VPCSubnets, vpcRegion, vpcName, fldPath)...) + } else if len(ic.PowerVS.VPCSubnets) != 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("vpcSubnets"), nil, "invalid without vpcName")) + } + + return allErrs.ToAggregate() +} + +func findVPCInRegion(client API, name string, region string, path *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if name == "" { + return allErrs + } + + vpcs, err := client.GetVPCs(context.TODO(), region) + if err != nil { + return append(allErrs, field.InternalError(path.Child("vpcRegion"), err)) + } + + found := false + for _, vpc := range vpcs { + if *vpc.Name == name { + found = true + break + } + } + if !found { + allErrs = append(allErrs, field.NotFound(path.Child("vpcName"), name)) + } + + return allErrs +} + +func findSubnetInVPC(client API, subnets []string, region string, name string, path *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(subnets) == 0 { + return allErrs + } + + subnet, err := client.GetSubnetByName(context.TODO(), subnets[0], region) + if err != nil { + allErrs = append(allErrs, field.InternalError(path.Child("vpcSubnets"), err)) + } else if *subnet.VPC.Name != name { + allErrs = append(allErrs, field.Invalid(path.Child("vpcSubnets"), nil, "not attached to VPC")) + } + + return allErrs +} + +// ValidateResourceGroup validates the resource group in our install config. +func ValidateResourceGroup(client API, ic *types.InstallConfig) error { + ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Minute) + defer cancel() + + resourceGroups, err := client.ListResourceGroups(ctx) + if err != nil { + return fmt.Errorf("failed to list resourceGroups: %w", err) + } + + switch ic.PowerVS.PowerVSResourceGroup { + case "": + return errors.New("platform:powervs:powervsresourcegroup is empty") + case "Default": + found := false + for _, resourceGroup := range resourceGroups.Resources { + if resourceGroup.Default != nil && *resourceGroup.Default { + found = true + ic.PowerVS.PowerVSResourceGroup = *resourceGroup.Name + break + } + } + if !found { + return errors.New("platform:powervs:powervsresourcegroup is default but no default exists") + } + default: + found := false + for _, resourceGroup := range resourceGroups.Resources { + if *resourceGroup.Name == ic.PowerVS.PowerVSResourceGroup { + found = true + break + } + } + if !found { + return errors.New("platform:powervs:powervsresourcegroup has an invalid name") + } + } + + return nil +} + +// ValidateSystemTypeForZone checks if the specified sysType is available in the target zone. +func ValidateSystemTypeForZone(client API, ic *types.InstallConfig) error { + if ic.ControlPlane == nil || ic.ControlPlane.Platform.PowerVS == nil || ic.ControlPlane.Platform.PowerVS.SysType == "" { + return nil + } + availableOnes, err := powervstypes.AvailableSysTypes(ic.PowerVS.Region, ic.PowerVS.Zone) + if err != nil { + return fmt.Errorf("failed to obtain available SysTypes for: %s", ic.PowerVS.Zone) + } + requested := ic.ControlPlane.Platform.PowerVS.SysType + found := false + for i := range availableOnes { + if requested == availableOnes[i] { + found = true + break + } + } + if found { + return nil + } + return fmt.Errorf("%s is not available in: %s", requested, ic.PowerVS.Zone) +} + +// ValidateServiceInstance validates the optional service instance GUID in our install config. +func ValidateServiceInstance(client API, ic *types.InstallConfig) error { + ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Minute) + defer cancel() + + serviceInstances, err := client.ListServiceInstances(ctx) + if err != nil { + return err + } + + switch ic.PowerVS.ServiceInstanceID { + case "": + return errors.New("platform:powervs:serviceinstance is empty") + default: + found := false + for _, serviceInstance := range serviceInstances { + guid := strings.SplitN(serviceInstance, " ", 2)[1] + if guid == ic.PowerVS.ServiceInstanceID { + found = true + break + } + } + if !found { + return errors.New("platform:powervs:serviceinstance has an invalid guid") + } + } + + return nil +} diff --git a/.history/pkg/asset/installconfig/powervs/validation_test_20241219120242.go b/.history/pkg/asset/installconfig/powervs/validation_test_20241219120242.go new file mode 100644 index 00000000000..ad5d39eb80e --- /dev/null +++ b/.history/pkg/asset/installconfig/powervs/validation_test_20241219120242.go @@ -0,0 +1,887 @@ +package powervs_test + +import ( + "fmt" + "os" + "testing" + + "github.com/IBM-Cloud/power-go-client/power/models" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + + machinev1 "github.com/openshift/api/machine/v1" + machinev1beta1 "github.com/openshift/api/machine/v1beta1" + "github.com/openshift/installer/pkg/asset/installconfig/powervs" + "github.com/openshift/installer/pkg/asset/installconfig/powervs/mock" + "github.com/openshift/installer/pkg/ipnet" + "github.com/openshift/installer/pkg/types" + powervstypes "github.com/openshift/installer/pkg/types/powervs" +) + +type editFunctions []func(ic *types.InstallConfig) + +var ( + validRegion = "dal" + validCIDR = "192.168.0.0/24" + validCISInstanceCRN = "crn:v1:bluemix:public:internet-svcs:global:a/valid-account-id:valid-instance-id::" + validClusterName = "valid-cluster-name" + validDNSZoneID = "valid-zone-id" + validBaseDomain = "valid.base.domain" + validPowerVSResourceGroup = "valid-resource-group" + validPublicSubnetUSSouth1ID = "public-subnet-us-south-1-id" + validPublicSubnetUSSouth2ID = "public-subnet-us-south-2-id" + validPrivateSubnetUSSouth1ID = "private-subnet-us-south-1-id" + validPrivateSubnetUSSouth2ID = "private-subnet-us-south-2-id" + validServiceInstanceID = "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" + invalidServiceInstanceID = "bogus-service-instance-id" + validSubnets = []string{ + validPublicSubnetUSSouth1ID, + validPublicSubnetUSSouth2ID, + validPrivateSubnetUSSouth1ID, + validPrivateSubnetUSSouth2ID, + } + validUserID = "valid-user@example.com" + validZone = "dal12" + + existingDNSRecordsResponse = []powervs.DNSRecordResponse{ + { + Name: "valid-dns-record-name-1", + Type: "valid-dns-record-type", + }, + { + Name: "valid-dns-record-name-2", + Type: "valid-dns-record-type", + }, + } + noDNSRecordsResponse = []powervs.DNSRecordResponse{} + invalidArchitecture = func(ic *types.InstallConfig) { ic.ControlPlane.Architecture = "ppc64" } + cidrInvalid, _ = ipnet.ParseCIDR("192.168.0.0/16") + invalidMachinePoolCIDR = func(ic *types.InstallConfig) { ic.Networking.MachineNetwork[0].CIDR = *cidrInvalid } + cidrValid, _ = ipnet.ParseCIDR("192.168.0.0/24") + validMachinePoolCIDR = func(ic *types.InstallConfig) { ic.Networking.MachineNetwork[0].CIDR = *cidrValid } + validVPCRegion = "us-south" + invalidVPCRegion = "foo-bah" + setValidVPCRegion = func(ic *types.InstallConfig) { ic.Platform.PowerVS.VPCRegion = validVPCRegion } + validRG = "valid-resource-group" + anotherValidRG = "another-valid-resource-group" + validVPCID = "valid-id" + anotherValidVPCID = "another-valid-id" + validVPC = "valid-vpc" + setValidVPCName = func(ic *types.InstallConfig) { ic.Platform.PowerVS.VPCName = validVPC } + anotherValidVPC = "another-valid-vpc" + invalidVPC = "bogus-vpc" + validVPCs = []vpcv1.VPC{ + { + Name: &validVPC, + ID: &validVPCID, + ResourceGroup: &vpcv1.ResourceGroupReference{ + Name: &validRG, + ID: &validRG, + }, + }, + { + Name: &anotherValidVPC, + ID: &anotherValidVPCID, + ResourceGroup: &vpcv1.ResourceGroupReference{ + Name: &anotherValidRG, + ID: &anotherValidRG, + }, + }, + } + validVPCSubnet = "valid-vpc-subnet" + invalidVPCSubnet = "invalid-vpc-subnet" + wrongVPCSubnet = "wrong-vpc-subnet" + validSubnet = &vpcv1.Subnet{ + Name: &validRG, + VPC: &vpcv1.VPCReference{ + Name: &validVPC, + ID: &validVPCID, + }, + ResourceGroup: &vpcv1.ResourceGroupReference{ + Name: &validRG, + ID: &validRG, + }, + } + wrongSubnet = &vpcv1.Subnet{ + Name: &validRG, + VPC: &vpcv1.VPCReference{ + Name: &anotherValidVPC, + ID: &anotherValidVPCID, + }, + ResourceGroup: &vpcv1.ResourceGroupReference{ + Name: &validRG, + ID: &validRG, + }, + } + regionWithPER = "dal10" + regionWithoutPER = "foo99" + regionPERUnknown = "bah77" + mapWithPERFalse = map[string]bool{ + "disaster-recover-site": true, + "power-edge-router": false, + "vpn-connections": true, + } + mapWithPERTrue = map[string]bool{ + "disaster-recover-site": true, + "power-edge-router": true, + "vpn-connections": true, + } + mapPERUnknown = map[string]bool{ + "disaster-recover-site": true, + "power-vpn-connections": false, + } +<<<<<<< HEAD +======= + defaultSysType = "s922" + newSysType = "s1022" + invalidZone = "dal11" + validServiceInstanceGUID = "" +>>>>>>> 07c96aff2b (Create region-zone-sysType hierarchy) +) + +func validInstallConfig() *types.InstallConfig { + return &types.InstallConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: validClusterName, + }, + BaseDomain: validBaseDomain, + Networking: &types.Networking{ + MachineNetwork: []types.MachineNetworkEntry{ + {CIDR: *ipnet.MustParseCIDR(validCIDR)}, + }, + }, + Publish: types.ExternalPublishingStrategy, + Platform: types.Platform{ + PowerVS: validMinimalPlatform(), + }, + ControlPlane: &types.MachinePool{ + Architecture: "ppc64le", + }, + Compute: []types.MachinePool{{ + Architecture: "ppc64le", + }}, + } +} + +func validMinimalPlatform() *powervstypes.Platform { + return &powervstypes.Platform{ + PowerVSResourceGroup: validPowerVSResourceGroup, + Region: validRegion, + ServiceInstanceID: validServiceInstanceID, + UserID: validUserID, + Zone: validZone, + } +} + +func validMachinePool() *powervstypes.MachinePool { + return &powervstypes.MachinePool{} +} + +func TestValidate(t *testing.T) { + cases := []struct { + name string + edits editFunctions + errorMsg string + }{ + { + name: "valid install config", + edits: editFunctions{}, + errorMsg: "", + }, + { + name: "invalid architecture", + edits: editFunctions{invalidArchitecture}, + errorMsg: `^controlPlane.architecture\: Unsupported value\: \"ppc64\"\: supported values: \"ppc64le\"`, + }, + { + name: "invalid machine pool CIDR", + edits: editFunctions{invalidMachinePoolCIDR}, + errorMsg: `Networking.MachineNetwork.CIDR: Invalid value: "192.168.0.0/16": Machine Pool CIDR must be /24.`, + }, + { + name: "valid machine pool CIDR", + edits: editFunctions{validMachinePoolCIDR}, + errorMsg: "", + }, + } + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + editedInstallConfig := validInstallConfig() + for _, edit := range tc.edits { + edit(editedInstallConfig) + } + + aggregatedErrors := powervs.Validate(editedInstallConfig) + if tc.errorMsg != "" { + assert.Regexp(t, tc.errorMsg, aggregatedErrors) + } else { + assert.NoError(t, aggregatedErrors) + } + }) + } +} + +func TestValidatePreExistingPublicDNS(t *testing.T) { + cases := []struct { + name string + edits editFunctions + errorMsg string + }{ + { + name: "no pre-existing DNS records", + errorMsg: "", + }, + { + name: "pre-existing DNS records", + errorMsg: `^\[baseDomain\: Duplicate value\: \"record api\.valid-cluster-name\.valid\.base\.domain already exists in CIS zone \(valid-zone-id\) and might be in use by another cluster, please remove it to continue\", baseDomain\: Duplicate value\: \"record api-int\.valid-cluster-name\.valid\.base\.domain already exists in CIS zone \(valid-zone-id\) and might be in use by another cluster, please remove it to continue\"\]$`, + }, + { + name: "cannot get zone ID", + errorMsg: `^baseDomain: Internal error$`, + }, + { + name: "cannot get DNS records", + errorMsg: `^baseDomain: Internal error$`, + }, + } + setMockEnvVars() + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + powervsClient := mock.NewMockAPI(mockCtrl) + metadata := mock.NewMockMetadataAPI(mockCtrl) + + dnsRecordNames := [...]string{fmt.Sprintf("api.%s.%s", validClusterName, validBaseDomain), fmt.Sprintf("api-int.%s.%s", validClusterName, validBaseDomain)} + + // Mock common to all tests + metadata.EXPECT().CISInstanceCRN(gomock.Any()).Return(validCISInstanceCRN, nil).AnyTimes() + + // Mocks: no pre-existing DNS records + powervsClient.EXPECT().GetDNSZoneIDByName(gomock.Any(), validBaseDomain, types.ExternalPublishingStrategy).Return(validDNSZoneID, nil) + for _, dnsRecordName := range dnsRecordNames { + powervsClient.EXPECT().GetDNSRecordsByName(gomock.Any(), validCISInstanceCRN, validDNSZoneID, dnsRecordName, types.ExternalPublishingStrategy).Return(noDNSRecordsResponse, nil) + } + + // Mocks: pre-existing DNS records + powervsClient.EXPECT().GetDNSZoneIDByName(gomock.Any(), validBaseDomain, types.ExternalPublishingStrategy).Return(validDNSZoneID, nil) + for _, dnsRecordName := range dnsRecordNames { + powervsClient.EXPECT().GetDNSRecordsByName(gomock.Any(), validCISInstanceCRN, validDNSZoneID, dnsRecordName, types.ExternalPublishingStrategy).Return(existingDNSRecordsResponse, nil) + } + + // Mocks: cannot get zone ID + powervsClient.EXPECT().GetDNSZoneIDByName(gomock.Any(), validBaseDomain, types.ExternalPublishingStrategy).Return("", fmt.Errorf("")) + + // Mocks: cannot get DNS records + powervsClient.EXPECT().GetDNSZoneIDByName(gomock.Any(), validBaseDomain, types.ExternalPublishingStrategy).Return(validDNSZoneID, nil) + for _, dnsRecordName := range dnsRecordNames { + powervsClient.EXPECT().GetDNSRecordsByName(gomock.Any(), validCISInstanceCRN, validDNSZoneID, dnsRecordName, types.ExternalPublishingStrategy).Return(nil, fmt.Errorf("")) + } + + // Run tests + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + aggregatedErrors := powervs.ValidatePreExistingDNS(powervsClient, validInstallConfig(), metadata) + if tc.errorMsg != "" { + assert.Regexp(t, tc.errorMsg, aggregatedErrors) + } else { + assert.NoError(t, aggregatedErrors) + } + }) + } +} + +func TestValidateCustomVPCSettings(t *testing.T) { + cases := []struct { + name string + edits editFunctions + errorMsg string + }{ + { + name: "invalid VPC region supplied alone", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.VPCRegion = invalidVPCRegion + }, + }, + errorMsg: fmt.Sprintf(`VPC.vpcRegion: Not found: "%s"`, invalidVPCRegion), + }, + { + name: "valid VPC region supplied alone", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.VPCRegion = validVPCRegion + }, + }, + errorMsg: "", + }, + { + name: "invalid VPC name supplied, without VPC region, not found near PowerVS region", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.VPCName = invalidVPC + }, + }, + errorMsg: fmt.Sprintf(`VPC.vpcName: Not found: "%s"`, invalidVPC), + }, + { + name: "valid VPC name supplied, without VPC region, but found close to PowerVS region", + edits: editFunctions{ + setValidVPCName, + }, + errorMsg: "", + }, + { + name: "valid VPC name, with invalid VPC region", + edits: editFunctions{ + setValidVPCName, + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.VPCRegion = invalidVPCRegion + }, + }, + errorMsg: "VPC.vpcRegion: Internal error: unknown region", + }, + { + name: "valid VPC name, valid VPC region", + edits: editFunctions{ + setValidVPCName, + setValidVPCRegion, + }, + errorMsg: "", + }, + { + name: "VPC subnet supplied, without vpcName", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.VPCSubnets = []string{validVPCSubnet} + }, + }, + errorMsg: `VPC.vpcSubnets: Invalid value: "null": invalid without vpcName`, + }, + { + name: "VPC found, but not subnet", + edits: editFunctions{ + setValidVPCName, + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.VPCSubnets = []string{invalidVPCSubnet} + }, + }, + errorMsg: "VPC.vpcSubnets: Internal error", + }, + { + name: "VPC found, subnet found as well, but not attached to the VPC", + edits: editFunctions{ + setValidVPCName, + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.VPCSubnets = []string{wrongVPCSubnet} + }, + }, + errorMsg: `VPC.vpcSubnets: Invalid value: "null": not attached to VPC`, + }, + { + name: "region specified, VPC found, subnet found, and properly attached", + edits: editFunctions{ + setValidVPCName, + setValidVPCRegion, + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.VPCSubnets = []string{validVPCSubnet} + }, + }, + errorMsg: "", + }, + } + setMockEnvVars() + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + powervsClient := mock.NewMockAPI(mockCtrl) + + // Mocks: invalid VPC region only + // nothing to mock + + // Mocks: valid VPC region only + // nothing to mock + + // Mocks: invalid VPC name results in error + powervsClient.EXPECT().GetVPCs(gomock.Any(), validVPCRegion).Return(validVPCs, nil) + + // Mocks: valid VPC name only, no issues + powervsClient.EXPECT().GetVPCs(gomock.Any(), validVPCRegion).Return(validVPCs, nil) + + // Mocks: valid VPC name, invalid VPC region + powervsClient.EXPECT().GetVPCs(gomock.Any(), invalidVPCRegion).Return(nil, fmt.Errorf("unknown region")) + + // Mocks: valid VPC name, valid VPC region, all good + powervsClient.EXPECT().GetVPCs(gomock.Any(), validVPCRegion).Return(validVPCs, nil) + + // Mocks: subnet specified, without vpcName, invalid + // nothing to mock + + // Mocks: valid VPC name, but Subnet not found + powervsClient.EXPECT().GetVPCs(gomock.Any(), validVPCRegion).Return(validVPCs, nil) + powervsClient.EXPECT().GetSubnetByName(gomock.Any(), invalidVPCSubnet, validVPCRegion).Return(nil, fmt.Errorf("")) + + // Mocks: valid VPC name, but wrong Subnet (present, but not attached) + powervsClient.EXPECT().GetVPCs(gomock.Any(), validVPCRegion).Return(validVPCs, nil) + powervsClient.EXPECT().GetSubnetByName(gomock.Any(), wrongVPCSubnet, validVPCRegion).Return(wrongSubnet, nil) + + // Mocks: region specified, valid VPC, valid region, valid Subnet, all good + powervsClient.EXPECT().GetVPCs(gomock.Any(), validVPCRegion).Return(validVPCs, nil) + powervsClient.EXPECT().GetSubnetByName(gomock.Any(), validVPCSubnet, validVPCRegion).Return(validSubnet, nil) + + // Run tests + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + editedInstallConfig := validInstallConfig() + for _, edit := range tc.edits { + edit(editedInstallConfig) + } + + aggregatedErrors := powervs.ValidateCustomVPCSetup(powervsClient, editedInstallConfig) + if tc.errorMsg != "" { + assert.Regexp(t, tc.errorMsg, aggregatedErrors) + } else { + assert.NoError(t, aggregatedErrors) + } + }) + } +} + +func createControlPlanes(numControlPlanes int, controlPlane *machinev1.PowerVSMachineProviderConfig) []machinev1beta1.Machine { + controlPlanes := make([]machinev1beta1.Machine, numControlPlanes) + + for i := range controlPlanes { + masterName := fmt.Sprintf("rdr-hamzy-test3-syd04-zwmgs-master-%d", i) + controlPlanes[i].TypeMeta = metav1.TypeMeta{ + Kind: "Machine", + APIVersion: "machine.openshift.io/v1beta1", + } + controlPlanes[i].ObjectMeta = metav1.ObjectMeta{ + Name: masterName, + Namespace: "openshift-machine-api", + Labels: make(map[string]string), + } + controlPlanes[i].Labels["machine.openshift.io/cluster-api-cluster"] = "rdr-hamzy-test3-syd04-zwmgs" + controlPlanes[i].Labels["machine.openshift.io/cluster-api-machine-role"] = "master" + controlPlanes[i].Labels["machine.openshift.io/cluster-api-machine-type"] = "master" + + controlPlanes[i].Spec.ProviderSpec = machinev1beta1.ProviderSpec{ + Value: &runtime.RawExtension{ + Raw: nil, + Object: controlPlane, + }, + } + } + + return controlPlanes +} + +func createComputes(numComputes int32, compute *machinev1.PowerVSMachineProviderConfig) []machinev1beta1.MachineSet { + computes := make([]machinev1beta1.MachineSet, 1) + + computes[0].Spec.Replicas = &numComputes + + computes[0].Spec.Template.Spec.ProviderSpec = machinev1beta1.ProviderSpec{ + Value: &runtime.RawExtension{ + Raw: nil, + Object: compute, + }, + } + + return computes +} + +func TestSystemPool(t *testing.T) { + setMockEnvVars() + + dedicatedControlPlane := machinev1.PowerVSMachineProviderConfig{ + TypeMeta: metav1.TypeMeta{Kind: "PowerVSMachineProviderConfig", APIVersion: "machine.openshift.io/v1"}, + KeyPairName: "rdr-hamzy-test3-syd04-vcwtz-key", + SystemType: "e980", + ProcessorType: "Dedicated", + Processors: intstr.IntOrString{Type: intstr.Int, IntVal: 1}, + MemoryGiB: 32, + } + + dedicatedControlPlanes := createControlPlanes(5, &dedicatedControlPlane) + + dedicatedCompute := machinev1.PowerVSMachineProviderConfig{ + TypeMeta: metav1.TypeMeta{Kind: "PowerVSMachineProviderConfig", APIVersion: "machine.openshift.io/v1"}, + KeyPairName: "rdr-hamzy-test3-syd04-vcwtz-key", + SystemType: "e980", + ProcessorType: "Dedicated", + Processors: intstr.IntOrString{Type: intstr.Int, IntVal: 1}, + MemoryGiB: 32, + } + + dedicatedComputes := createComputes(3, &dedicatedCompute) + + systemPoolNEComputeCores := &models.System{ + Cores: func(f float64) *float64 { return &f }(2), + ID: 1, + Memory: func(i int64) *int64 { return &i }(256), + } + systemPoolsNEComputeCores := models.SystemPools{ + "NotEnoughComputeCores": models.SystemPool{ + Capacity: systemPoolNEComputeCores, + CoreMemoryRatio: float64(1.0), + MaxAvailable: systemPoolNEComputeCores, + MaxCoresAvailable: systemPoolNEComputeCores, + MaxMemoryAvailable: systemPoolNEComputeCores, + SharedCoreRatio: &models.MinMaxDefault{ + Default: func(f float64) *float64 { return &f }(4), + Max: func(f float64) *float64 { return &f }(4), + Min: func(f float64) *float64 { return &f }(1), + }, + Systems: []*models.System{ + systemPoolNEComputeCores, + }, + Type: "e980", + }, + } + systemPoolNEWorkerCores := &models.System{ + Cores: func(f float64) *float64 { return &f }(6), + ID: 1, + Memory: func(i int64) *int64 { return &i }(256), + } + systemPoolsNEWorkerCores := models.SystemPools{ + "NotEnoughWorkerCores": models.SystemPool{ + Capacity: systemPoolNEWorkerCores, + CoreMemoryRatio: float64(1.0), + MaxAvailable: systemPoolNEWorkerCores, + MaxCoresAvailable: systemPoolNEWorkerCores, + MaxMemoryAvailable: systemPoolNEWorkerCores, + SharedCoreRatio: &models.MinMaxDefault{ + Default: func(f float64) *float64 { return &f }(4), + Max: func(f float64) *float64 { return &f }(4), + Min: func(f float64) *float64 { return &f }(1), + }, + Systems: []*models.System{ + systemPoolNEWorkerCores, + }, + Type: "e980", + }, + } + systemPoolNEComputeMemory := &models.System{ + Cores: func(f float64) *float64 { return &f }(8), + ID: 1, + Memory: func(i int64) *int64 { return &i }(32), + } + systemPoolsNEComputeMemory := models.SystemPools{ + "NotEnoughComputeMemory": models.SystemPool{ + Capacity: systemPoolNEComputeMemory, + CoreMemoryRatio: float64(1.0), + MaxAvailable: systemPoolNEComputeMemory, + MaxCoresAvailable: systemPoolNEComputeMemory, + MaxMemoryAvailable: systemPoolNEComputeMemory, + SharedCoreRatio: &models.MinMaxDefault{ + Default: func(f float64) *float64 { return &f }(4), + Max: func(f float64) *float64 { return &f }(4), + Min: func(f float64) *float64 { return &f }(1), + }, + Systems: []*models.System{ + systemPoolNEComputeMemory, + }, + Type: "e980", + }, + } + systemPoolNEWorkerMemory := &models.System{ + Cores: func(f float64) *float64 { return &f }(8), + ID: 1, + Memory: func(i int64) *int64 { return &i }(192), + } + systemPoolsNEWorkerMemory := models.SystemPools{ + "NotEnoughWorkerMemory": models.SystemPool{ + Capacity: systemPoolNEWorkerMemory, + CoreMemoryRatio: float64(1.0), + MaxAvailable: systemPoolNEWorkerMemory, + MaxCoresAvailable: systemPoolNEWorkerMemory, + MaxMemoryAvailable: systemPoolNEWorkerMemory, + SharedCoreRatio: &models.MinMaxDefault{ + Default: func(f float64) *float64 { return &f }(4), + Max: func(f float64) *float64 { return &f }(4), + Min: func(f float64) *float64 { return &f }(1), + }, + Systems: []*models.System{ + systemPoolNEWorkerMemory, + }, + Type: "e980", + }, + } + systemPoolGood := &models.System{ + Cores: func(f float64) *float64 { return &f }(8), + ID: 1, + Memory: func(i int64) *int64 { return &i }(256), + } + systemPoolsGood := models.SystemPools{ + "Enough": models.SystemPool{ + Capacity: systemPoolGood, + CoreMemoryRatio: float64(1.0), + MaxAvailable: systemPoolGood, + MaxCoresAvailable: systemPoolGood, + MaxMemoryAvailable: systemPoolGood, + SharedCoreRatio: &models.MinMaxDefault{ + Default: func(f float64) *float64 { return &f }(4), + Max: func(f float64) *float64 { return &f }(4), + Min: func(f float64) *float64 { return &f }(1), + }, + Systems: []*models.System{ + systemPoolGood, + }, + Type: "e980", + }, + } + + err := powervs.ValidateCapacityWithPools(dedicatedControlPlanes, dedicatedComputes, systemPoolsNEComputeCores) + assert.EqualError(t, err, "Not enough cores available (2) for the compute nodes (need 5)") + + err = powervs.ValidateCapacityWithPools(dedicatedControlPlanes, dedicatedComputes, systemPoolsNEWorkerCores) + assert.EqualError(t, err, "Not enough cores available (1) for the worker nodes (need 3)") + + err = powervs.ValidateCapacityWithPools(dedicatedControlPlanes, dedicatedComputes, systemPoolsNEComputeMemory) + assert.EqualError(t, err, "Not enough memory available (32) for the compute nodes (need 160)") + + err = powervs.ValidateCapacityWithPools(dedicatedControlPlanes, dedicatedComputes, systemPoolsNEWorkerMemory) + assert.EqualError(t, err, "Not enough memory available (32) for the worker nodes (need 96)") + + err = powervs.ValidateCapacityWithPools(dedicatedControlPlanes, dedicatedComputes, systemPoolsGood) + assert.Empty(t, err) +} + +func TestValidatePERAvailability(t *testing.T) { + cases := []struct { + name string + edits editFunctions + errorMsg string + }{ + { + name: "Region without PER", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.Zone = regionWithoutPER + }, + }, + errorMsg: fmt.Sprintf("power-edge-router is not available at: %s", regionWithoutPER), + }, + { + name: "Region with PER", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.Zone = regionWithPER + ic.Platform.PowerVS.ServiceInstanceID = validServiceInstanceID + }, + }, + errorMsg: "", + }, + { + name: "Region with no PER availability info", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.Zone = regionPERUnknown + }, + }, + errorMsg: fmt.Sprintf("power-edge-router capability unknown at: %s", regionPERUnknown), + }, + { + name: "Region with PER, but with invalid Workspace ID", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.Zone = regionWithPER + ic.Platform.PowerVS.ServiceInstanceID = invalidServiceInstanceID + }, + }, + errorMsg: fmt.Sprintf("power-edge-router is not available in workspace: %s", invalidServiceInstanceID), + }, + } + setMockEnvVars() + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + powervsClient := mock.NewMockAPI(mockCtrl) + + // Mocks: PER-absent region results in false + powervsClient.EXPECT().GetDatacenterCapabilities(gomock.Any(), regionWithoutPER).Return(mapWithPERFalse, nil) + + // Mocks: PER-enabled region results in true + powervsClient.EXPECT().GetDatacenterCapabilities(gomock.Any(), regionWithPER).Return(mapWithPERTrue, nil) + powervsClient.EXPECT().GetWorkspaceCapabilities(gomock.Any(), validServiceInstanceID).Return(mapWithPERTrue, nil) + + // Mocks: PER-unknown region results in false + powervsClient.EXPECT().GetDatacenterCapabilities(gomock.Any(), regionPERUnknown).Return(mapPERUnknown, nil) + + // Mocks: PER-enabled region, but bogus Service Instance results in false + powervsClient.EXPECT().GetDatacenterCapabilities(gomock.Any(), regionWithPER).Return(mapWithPERTrue, nil) + powervsClient.EXPECT().GetWorkspaceCapabilities(gomock.Any(), invalidServiceInstanceID).Return(mapWithPERFalse, nil) + + // Run tests + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + editedInstallConfig := validInstallConfig() + for _, edit := range tc.edits { + edit(editedInstallConfig) + } + + aggregatedErrors := powervs.ValidatePERAvailability(powervsClient, editedInstallConfig) + if tc.errorMsg != "" { + assert.Regexp(t, tc.errorMsg, aggregatedErrors) + } else { + assert.NoError(t, aggregatedErrors) + } + }) + } +} + +<<<<<<< HEAD +======= +func TestValidateSystemTypeForZone(t *testing.T) { + cases := []struct { + name string + edits editFunctions + errorMsg string + }{ + { + name: "Unknown Zone specified", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.Zone = invalidZone + ic.ControlPlane.Platform.PowerVS = validMachinePool() + ic.ControlPlane.Platform.PowerVS.SysType = defaultSysType + }, + }, + errorMsg: fmt.Sprintf("failed to obtain available SysTypes for: %s", invalidZone), + }, + { + name: "No Platform block", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.ControlPlane.Platform.PowerVS = nil + }, + }, + errorMsg: "", + }, + { + name: "Structure present, but no SysType specified", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.ControlPlane.Platform.PowerVS = validMachinePool() + }, + }, + errorMsg: "", + }, + { + name: "Unavailable SysType specified for dal12 zone", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.Region = validRegion + ic.Platform.PowerVS.Zone = validZone + ic.ControlPlane.Platform.PowerVS = validMachinePool() + ic.ControlPlane.Platform.PowerVS.SysType = newSysType + }, + }, + errorMsg: fmt.Sprintf("%s is not available in: %s", newSysType, validZone), + }, + { + name: "Good Zone/SysType combo specified", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.Region = validRegion + ic.Platform.PowerVS.Zone = validZone + ic.ControlPlane.Platform.PowerVS = validMachinePool() + ic.ControlPlane.Platform.PowerVS.SysType = defaultSysType + }, + }, + errorMsg: "", + }, + } + setMockEnvVars() + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + powervsClient := mock.NewMockAPI(mockCtrl) + + // Run tests + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + editedInstallConfig := validInstallConfig() + for _, edit := range tc.edits { + edit(editedInstallConfig) + } + + aggregatedErrors := powervs.ValidateSystemTypeForZone(powervsClient, editedInstallConfig) + if tc.errorMsg != "" { + assert.Regexp(t, tc.errorMsg, aggregatedErrors) + } else { + assert.NoError(t, aggregatedErrors) + } + }) + } +} + +func TestValidateServiceInstance(t *testing.T) { + cases := []struct { + name string + edits editFunctions + errorMsg string + }{ + { + name: "valid install config", + edits: editFunctions{}, + errorMsg: "", + }, + { + name: "invalid install config", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.ServiceInstanceGUID = "invalid-uuid" + }, + }, + errorMsg: "platform:powervs:serviceInstanceGUID has an invalid guid", + }, + } + setMockEnvVars() + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + powervsClient := mock.NewMockAPI(mockCtrl) + + // FIX: Unexpected call to *mock.MockAPI.ListServiceInstances([context.TODO.WithDeadline(2023-12-02 08:38:15.542340268 -0600 CST m=+300.012357408 [4m59.999979046s])]) at validation.go:289 because: there are no expected calls of the method "ListServiceInstances" for that receiver + powervsClient.EXPECT().ListServiceInstances(gomock.Any()).AnyTimes() + + // Run tests + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + editedInstallConfig := validInstallConfig() + for _, edit := range tc.edits { + edit(editedInstallConfig) + } + + aggregatedErrors := powervs.ValidateServiceInstance(powervsClient, editedInstallConfig) + if tc.errorMsg != "" { + assert.Regexp(t, tc.errorMsg, aggregatedErrors) + } else { + assert.NoError(t, aggregatedErrors) + } + }) + } +} + +>>>>>>> 07c96aff2b (Create region-zone-sysType hierarchy) +func setMockEnvVars() { + os.Setenv("POWERVS_AUTH_FILEPATH", "./tmp/powervs/config.json") + os.Setenv("IBMID", "foo") + os.Setenv("IC_API_KEY", "foo") + os.Setenv("IBMCLOUD_REGION", "foo") + os.Setenv("IBMCLOUD_ZONE", "foo") +} diff --git a/.history/pkg/asset/installconfig/powervs/validation_test_20241219121335.go b/.history/pkg/asset/installconfig/powervs/validation_test_20241219121335.go new file mode 100644 index 00000000000..6d5563f0535 --- /dev/null +++ b/.history/pkg/asset/installconfig/powervs/validation_test_20241219121335.go @@ -0,0 +1,881 @@ +package powervs_test + +import ( + "fmt" + "os" + "testing" + + "github.com/IBM-Cloud/power-go-client/power/models" + "github.com/IBM/vpc-go-sdk/vpcv1" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + + machinev1 "github.com/openshift/api/machine/v1" + machinev1beta1 "github.com/openshift/api/machine/v1beta1" + "github.com/openshift/installer/pkg/asset/installconfig/powervs" + "github.com/openshift/installer/pkg/asset/installconfig/powervs/mock" + "github.com/openshift/installer/pkg/ipnet" + "github.com/openshift/installer/pkg/types" + powervstypes "github.com/openshift/installer/pkg/types/powervs" +) + +type editFunctions []func(ic *types.InstallConfig) + +var ( + validRegion = "dal" + validCIDR = "192.168.0.0/24" + validCISInstanceCRN = "crn:v1:bluemix:public:internet-svcs:global:a/valid-account-id:valid-instance-id::" + validClusterName = "valid-cluster-name" + validDNSZoneID = "valid-zone-id" + validBaseDomain = "valid.base.domain" + validPowerVSResourceGroup = "valid-resource-group" + validPublicSubnetUSSouth1ID = "public-subnet-us-south-1-id" + validPublicSubnetUSSouth2ID = "public-subnet-us-south-2-id" + validPrivateSubnetUSSouth1ID = "private-subnet-us-south-1-id" + validPrivateSubnetUSSouth2ID = "private-subnet-us-south-2-id" + validServiceInstanceID = "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" + invalidServiceInstanceID = "bogus-service-instance-id" + validSubnets = []string{ + validPublicSubnetUSSouth1ID, + validPublicSubnetUSSouth2ID, + validPrivateSubnetUSSouth1ID, + validPrivateSubnetUSSouth2ID, + } + validUserID = "valid-user@example.com" + validZone = "dal12" + + existingDNSRecordsResponse = []powervs.DNSRecordResponse{ + { + Name: "valid-dns-record-name-1", + Type: "valid-dns-record-type", + }, + { + Name: "valid-dns-record-name-2", + Type: "valid-dns-record-type", + }, + } + noDNSRecordsResponse = []powervs.DNSRecordResponse{} + invalidArchitecture = func(ic *types.InstallConfig) { ic.ControlPlane.Architecture = "ppc64" } + cidrInvalid, _ = ipnet.ParseCIDR("192.168.0.0/16") + invalidMachinePoolCIDR = func(ic *types.InstallConfig) { ic.Networking.MachineNetwork[0].CIDR = *cidrInvalid } + cidrValid, _ = ipnet.ParseCIDR("192.168.0.0/24") + validMachinePoolCIDR = func(ic *types.InstallConfig) { ic.Networking.MachineNetwork[0].CIDR = *cidrValid } + validVPCRegion = "us-south" + invalidVPCRegion = "foo-bah" + setValidVPCRegion = func(ic *types.InstallConfig) { ic.Platform.PowerVS.VPCRegion = validVPCRegion } + validRG = "valid-resource-group" + anotherValidRG = "another-valid-resource-group" + validVPCID = "valid-id" + anotherValidVPCID = "another-valid-id" + validVPC = "valid-vpc" + setValidVPCName = func(ic *types.InstallConfig) { ic.Platform.PowerVS.VPCName = validVPC } + anotherValidVPC = "another-valid-vpc" + invalidVPC = "bogus-vpc" + validVPCs = []vpcv1.VPC{ + { + Name: &validVPC, + ID: &validVPCID, + ResourceGroup: &vpcv1.ResourceGroupReference{ + Name: &validRG, + ID: &validRG, + }, + }, + { + Name: &anotherValidVPC, + ID: &anotherValidVPCID, + ResourceGroup: &vpcv1.ResourceGroupReference{ + Name: &anotherValidRG, + ID: &anotherValidRG, + }, + }, + } + validVPCSubnet = "valid-vpc-subnet" + invalidVPCSubnet = "invalid-vpc-subnet" + wrongVPCSubnet = "wrong-vpc-subnet" + validSubnet = &vpcv1.Subnet{ + Name: &validRG, + VPC: &vpcv1.VPCReference{ + Name: &validVPC, + ID: &validVPCID, + }, + ResourceGroup: &vpcv1.ResourceGroupReference{ + Name: &validRG, + ID: &validRG, + }, + } + wrongSubnet = &vpcv1.Subnet{ + Name: &validRG, + VPC: &vpcv1.VPCReference{ + Name: &anotherValidVPC, + ID: &anotherValidVPCID, + }, + ResourceGroup: &vpcv1.ResourceGroupReference{ + Name: &validRG, + ID: &validRG, + }, + } + regionWithPER = "dal10" + regionWithoutPER = "foo99" + regionPERUnknown = "bah77" + mapWithPERFalse = map[string]bool{ + "disaster-recover-site": true, + "power-edge-router": false, + "vpn-connections": true, + } + mapWithPERTrue = map[string]bool{ + "disaster-recover-site": true, + "power-edge-router": true, + "vpn-connections": true, + } + mapPERUnknown = map[string]bool{ + "disaster-recover-site": true, + "power-vpn-connections": false, + } + defaultSysType = "s922" + newSysType = "s1022" + invalidZone = "dal11" + validServiceInstanceGUID = "" +) + +func validInstallConfig() *types.InstallConfig { + return &types.InstallConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: validClusterName, + }, + BaseDomain: validBaseDomain, + Networking: &types.Networking{ + MachineNetwork: []types.MachineNetworkEntry{ + {CIDR: *ipnet.MustParseCIDR(validCIDR)}, + }, + }, + Publish: types.ExternalPublishingStrategy, + Platform: types.Platform{ + PowerVS: validMinimalPlatform(), + }, + ControlPlane: &types.MachinePool{ + Architecture: "ppc64le", + }, + Compute: []types.MachinePool{{ + Architecture: "ppc64le", + }}, + } +} + +func validMinimalPlatform() *powervstypes.Platform { + return &powervstypes.Platform{ + PowerVSResourceGroup: validPowerVSResourceGroup, + Region: validRegion, + ServiceInstanceID: validServiceInstanceID, + UserID: validUserID, + Zone: validZone, + } +} + +func validMachinePool() *powervstypes.MachinePool { + return &powervstypes.MachinePool{} +} + +func TestValidate(t *testing.T) { + cases := []struct { + name string + edits editFunctions + errorMsg string + }{ + { + name: "valid install config", + edits: editFunctions{}, + errorMsg: "", + }, + { + name: "invalid architecture", + edits: editFunctions{invalidArchitecture}, + errorMsg: `^controlPlane.architecture\: Unsupported value\: \"ppc64\"\: supported values: \"ppc64le\"`, + }, + { + name: "invalid machine pool CIDR", + edits: editFunctions{invalidMachinePoolCIDR}, + errorMsg: `Networking.MachineNetwork.CIDR: Invalid value: "192.168.0.0/16": Machine Pool CIDR must be /24.`, + }, + { + name: "valid machine pool CIDR", + edits: editFunctions{validMachinePoolCIDR}, + errorMsg: "", + }, + } + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + editedInstallConfig := validInstallConfig() + for _, edit := range tc.edits { + edit(editedInstallConfig) + } + + aggregatedErrors := powervs.Validate(editedInstallConfig) + if tc.errorMsg != "" { + assert.Regexp(t, tc.errorMsg, aggregatedErrors) + } else { + assert.NoError(t, aggregatedErrors) + } + }) + } +} + +func TestValidatePreExistingPublicDNS(t *testing.T) { + cases := []struct { + name string + edits editFunctions + errorMsg string + }{ + { + name: "no pre-existing DNS records", + errorMsg: "", + }, + { + name: "pre-existing DNS records", + errorMsg: `^\[baseDomain\: Duplicate value\: \"record api\.valid-cluster-name\.valid\.base\.domain already exists in CIS zone \(valid-zone-id\) and might be in use by another cluster, please remove it to continue\", baseDomain\: Duplicate value\: \"record api-int\.valid-cluster-name\.valid\.base\.domain already exists in CIS zone \(valid-zone-id\) and might be in use by another cluster, please remove it to continue\"\]$`, + }, + { + name: "cannot get zone ID", + errorMsg: `^baseDomain: Internal error$`, + }, + { + name: "cannot get DNS records", + errorMsg: `^baseDomain: Internal error$`, + }, + } + setMockEnvVars() + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + powervsClient := mock.NewMockAPI(mockCtrl) + metadata := mock.NewMockMetadataAPI(mockCtrl) + + dnsRecordNames := [...]string{fmt.Sprintf("api.%s.%s", validClusterName, validBaseDomain), fmt.Sprintf("api-int.%s.%s", validClusterName, validBaseDomain)} + + // Mock common to all tests + metadata.EXPECT().CISInstanceCRN(gomock.Any()).Return(validCISInstanceCRN, nil).AnyTimes() + + // Mocks: no pre-existing DNS records + powervsClient.EXPECT().GetDNSZoneIDByName(gomock.Any(), validBaseDomain, types.ExternalPublishingStrategy).Return(validDNSZoneID, nil) + for _, dnsRecordName := range dnsRecordNames { + powervsClient.EXPECT().GetDNSRecordsByName(gomock.Any(), validCISInstanceCRN, validDNSZoneID, dnsRecordName, types.ExternalPublishingStrategy).Return(noDNSRecordsResponse, nil) + } + + // Mocks: pre-existing DNS records + powervsClient.EXPECT().GetDNSZoneIDByName(gomock.Any(), validBaseDomain, types.ExternalPublishingStrategy).Return(validDNSZoneID, nil) + for _, dnsRecordName := range dnsRecordNames { + powervsClient.EXPECT().GetDNSRecordsByName(gomock.Any(), validCISInstanceCRN, validDNSZoneID, dnsRecordName, types.ExternalPublishingStrategy).Return(existingDNSRecordsResponse, nil) + } + + // Mocks: cannot get zone ID + powervsClient.EXPECT().GetDNSZoneIDByName(gomock.Any(), validBaseDomain, types.ExternalPublishingStrategy).Return("", fmt.Errorf("")) + + // Mocks: cannot get DNS records + powervsClient.EXPECT().GetDNSZoneIDByName(gomock.Any(), validBaseDomain, types.ExternalPublishingStrategy).Return(validDNSZoneID, nil) + for _, dnsRecordName := range dnsRecordNames { + powervsClient.EXPECT().GetDNSRecordsByName(gomock.Any(), validCISInstanceCRN, validDNSZoneID, dnsRecordName, types.ExternalPublishingStrategy).Return(nil, fmt.Errorf("")) + } + + // Run tests + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + aggregatedErrors := powervs.ValidatePreExistingDNS(powervsClient, validInstallConfig(), metadata) + if tc.errorMsg != "" { + assert.Regexp(t, tc.errorMsg, aggregatedErrors) + } else { + assert.NoError(t, aggregatedErrors) + } + }) + } +} + +func TestValidateCustomVPCSettings(t *testing.T) { + cases := []struct { + name string + edits editFunctions + errorMsg string + }{ + { + name: "invalid VPC region supplied alone", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.VPCRegion = invalidVPCRegion + }, + }, + errorMsg: fmt.Sprintf(`VPC.vpcRegion: Not found: "%s"`, invalidVPCRegion), + }, + { + name: "valid VPC region supplied alone", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.VPCRegion = validVPCRegion + }, + }, + errorMsg: "", + }, + { + name: "invalid VPC name supplied, without VPC region, not found near PowerVS region", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.VPCName = invalidVPC + }, + }, + errorMsg: fmt.Sprintf(`VPC.vpcName: Not found: "%s"`, invalidVPC), + }, + { + name: "valid VPC name supplied, without VPC region, but found close to PowerVS region", + edits: editFunctions{ + setValidVPCName, + }, + errorMsg: "", + }, + { + name: "valid VPC name, with invalid VPC region", + edits: editFunctions{ + setValidVPCName, + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.VPCRegion = invalidVPCRegion + }, + }, + errorMsg: "VPC.vpcRegion: Internal error: unknown region", + }, + { + name: "valid VPC name, valid VPC region", + edits: editFunctions{ + setValidVPCName, + setValidVPCRegion, + }, + errorMsg: "", + }, + { + name: "VPC subnet supplied, without vpcName", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.VPCSubnets = []string{validVPCSubnet} + }, + }, + errorMsg: `VPC.vpcSubnets: Invalid value: "null": invalid without vpcName`, + }, + { + name: "VPC found, but not subnet", + edits: editFunctions{ + setValidVPCName, + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.VPCSubnets = []string{invalidVPCSubnet} + }, + }, + errorMsg: "VPC.vpcSubnets: Internal error", + }, + { + name: "VPC found, subnet found as well, but not attached to the VPC", + edits: editFunctions{ + setValidVPCName, + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.VPCSubnets = []string{wrongVPCSubnet} + }, + }, + errorMsg: `VPC.vpcSubnets: Invalid value: "null": not attached to VPC`, + }, + { + name: "region specified, VPC found, subnet found, and properly attached", + edits: editFunctions{ + setValidVPCName, + setValidVPCRegion, + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.VPCSubnets = []string{validVPCSubnet} + }, + }, + errorMsg: "", + }, + } + setMockEnvVars() + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + powervsClient := mock.NewMockAPI(mockCtrl) + + // Mocks: invalid VPC region only + // nothing to mock + + // Mocks: valid VPC region only + // nothing to mock + + // Mocks: invalid VPC name results in error + powervsClient.EXPECT().GetVPCs(gomock.Any(), validVPCRegion).Return(validVPCs, nil) + + // Mocks: valid VPC name only, no issues + powervsClient.EXPECT().GetVPCs(gomock.Any(), validVPCRegion).Return(validVPCs, nil) + + // Mocks: valid VPC name, invalid VPC region + powervsClient.EXPECT().GetVPCs(gomock.Any(), invalidVPCRegion).Return(nil, fmt.Errorf("unknown region")) + + // Mocks: valid VPC name, valid VPC region, all good + powervsClient.EXPECT().GetVPCs(gomock.Any(), validVPCRegion).Return(validVPCs, nil) + + // Mocks: subnet specified, without vpcName, invalid + // nothing to mock + + // Mocks: valid VPC name, but Subnet not found + powervsClient.EXPECT().GetVPCs(gomock.Any(), validVPCRegion).Return(validVPCs, nil) + powervsClient.EXPECT().GetSubnetByName(gomock.Any(), invalidVPCSubnet, validVPCRegion).Return(nil, fmt.Errorf("")) + + // Mocks: valid VPC name, but wrong Subnet (present, but not attached) + powervsClient.EXPECT().GetVPCs(gomock.Any(), validVPCRegion).Return(validVPCs, nil) + powervsClient.EXPECT().GetSubnetByName(gomock.Any(), wrongVPCSubnet, validVPCRegion).Return(wrongSubnet, nil) + + // Mocks: region specified, valid VPC, valid region, valid Subnet, all good + powervsClient.EXPECT().GetVPCs(gomock.Any(), validVPCRegion).Return(validVPCs, nil) + powervsClient.EXPECT().GetSubnetByName(gomock.Any(), validVPCSubnet, validVPCRegion).Return(validSubnet, nil) + + // Run tests + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + editedInstallConfig := validInstallConfig() + for _, edit := range tc.edits { + edit(editedInstallConfig) + } + + aggregatedErrors := powervs.ValidateCustomVPCSetup(powervsClient, editedInstallConfig) + if tc.errorMsg != "" { + assert.Regexp(t, tc.errorMsg, aggregatedErrors) + } else { + assert.NoError(t, aggregatedErrors) + } + }) + } +} + +func createControlPlanes(numControlPlanes int, controlPlane *machinev1.PowerVSMachineProviderConfig) []machinev1beta1.Machine { + controlPlanes := make([]machinev1beta1.Machine, numControlPlanes) + + for i := range controlPlanes { + masterName := fmt.Sprintf("rdr-hamzy-test3-syd04-zwmgs-master-%d", i) + controlPlanes[i].TypeMeta = metav1.TypeMeta{ + Kind: "Machine", + APIVersion: "machine.openshift.io/v1beta1", + } + controlPlanes[i].ObjectMeta = metav1.ObjectMeta{ + Name: masterName, + Namespace: "openshift-machine-api", + Labels: make(map[string]string), + } + controlPlanes[i].Labels["machine.openshift.io/cluster-api-cluster"] = "rdr-hamzy-test3-syd04-zwmgs" + controlPlanes[i].Labels["machine.openshift.io/cluster-api-machine-role"] = "master" + controlPlanes[i].Labels["machine.openshift.io/cluster-api-machine-type"] = "master" + + controlPlanes[i].Spec.ProviderSpec = machinev1beta1.ProviderSpec{ + Value: &runtime.RawExtension{ + Raw: nil, + Object: controlPlane, + }, + } + } + + return controlPlanes +} + +func createComputes(numComputes int32, compute *machinev1.PowerVSMachineProviderConfig) []machinev1beta1.MachineSet { + computes := make([]machinev1beta1.MachineSet, 1) + + computes[0].Spec.Replicas = &numComputes + + computes[0].Spec.Template.Spec.ProviderSpec = machinev1beta1.ProviderSpec{ + Value: &runtime.RawExtension{ + Raw: nil, + Object: compute, + }, + } + + return computes +} + +func TestSystemPool(t *testing.T) { + setMockEnvVars() + + dedicatedControlPlane := machinev1.PowerVSMachineProviderConfig{ + TypeMeta: metav1.TypeMeta{Kind: "PowerVSMachineProviderConfig", APIVersion: "machine.openshift.io/v1"}, + KeyPairName: "rdr-hamzy-test3-syd04-vcwtz-key", + SystemType: "e980", + ProcessorType: "Dedicated", + Processors: intstr.IntOrString{Type: intstr.Int, IntVal: 1}, + MemoryGiB: 32, + } + + dedicatedControlPlanes := createControlPlanes(5, &dedicatedControlPlane) + + dedicatedCompute := machinev1.PowerVSMachineProviderConfig{ + TypeMeta: metav1.TypeMeta{Kind: "PowerVSMachineProviderConfig", APIVersion: "machine.openshift.io/v1"}, + KeyPairName: "rdr-hamzy-test3-syd04-vcwtz-key", + SystemType: "e980", + ProcessorType: "Dedicated", + Processors: intstr.IntOrString{Type: intstr.Int, IntVal: 1}, + MemoryGiB: 32, + } + + dedicatedComputes := createComputes(3, &dedicatedCompute) + + systemPoolNEComputeCores := &models.System{ + Cores: func(f float64) *float64 { return &f }(2), + ID: 1, + Memory: func(i int64) *int64 { return &i }(256), + } + systemPoolsNEComputeCores := models.SystemPools{ + "NotEnoughComputeCores": models.SystemPool{ + Capacity: systemPoolNEComputeCores, + CoreMemoryRatio: float64(1.0), + MaxAvailable: systemPoolNEComputeCores, + MaxCoresAvailable: systemPoolNEComputeCores, + MaxMemoryAvailable: systemPoolNEComputeCores, + SharedCoreRatio: &models.MinMaxDefault{ + Default: func(f float64) *float64 { return &f }(4), + Max: func(f float64) *float64 { return &f }(4), + Min: func(f float64) *float64 { return &f }(1), + }, + Systems: []*models.System{ + systemPoolNEComputeCores, + }, + Type: "e980", + }, + } + systemPoolNEWorkerCores := &models.System{ + Cores: func(f float64) *float64 { return &f }(6), + ID: 1, + Memory: func(i int64) *int64 { return &i }(256), + } + systemPoolsNEWorkerCores := models.SystemPools{ + "NotEnoughWorkerCores": models.SystemPool{ + Capacity: systemPoolNEWorkerCores, + CoreMemoryRatio: float64(1.0), + MaxAvailable: systemPoolNEWorkerCores, + MaxCoresAvailable: systemPoolNEWorkerCores, + MaxMemoryAvailable: systemPoolNEWorkerCores, + SharedCoreRatio: &models.MinMaxDefault{ + Default: func(f float64) *float64 { return &f }(4), + Max: func(f float64) *float64 { return &f }(4), + Min: func(f float64) *float64 { return &f }(1), + }, + Systems: []*models.System{ + systemPoolNEWorkerCores, + }, + Type: "e980", + }, + } + systemPoolNEComputeMemory := &models.System{ + Cores: func(f float64) *float64 { return &f }(8), + ID: 1, + Memory: func(i int64) *int64 { return &i }(32), + } + systemPoolsNEComputeMemory := models.SystemPools{ + "NotEnoughComputeMemory": models.SystemPool{ + Capacity: systemPoolNEComputeMemory, + CoreMemoryRatio: float64(1.0), + MaxAvailable: systemPoolNEComputeMemory, + MaxCoresAvailable: systemPoolNEComputeMemory, + MaxMemoryAvailable: systemPoolNEComputeMemory, + SharedCoreRatio: &models.MinMaxDefault{ + Default: func(f float64) *float64 { return &f }(4), + Max: func(f float64) *float64 { return &f }(4), + Min: func(f float64) *float64 { return &f }(1), + }, + Systems: []*models.System{ + systemPoolNEComputeMemory, + }, + Type: "e980", + }, + } + systemPoolNEWorkerMemory := &models.System{ + Cores: func(f float64) *float64 { return &f }(8), + ID: 1, + Memory: func(i int64) *int64 { return &i }(192), + } + systemPoolsNEWorkerMemory := models.SystemPools{ + "NotEnoughWorkerMemory": models.SystemPool{ + Capacity: systemPoolNEWorkerMemory, + CoreMemoryRatio: float64(1.0), + MaxAvailable: systemPoolNEWorkerMemory, + MaxCoresAvailable: systemPoolNEWorkerMemory, + MaxMemoryAvailable: systemPoolNEWorkerMemory, + SharedCoreRatio: &models.MinMaxDefault{ + Default: func(f float64) *float64 { return &f }(4), + Max: func(f float64) *float64 { return &f }(4), + Min: func(f float64) *float64 { return &f }(1), + }, + Systems: []*models.System{ + systemPoolNEWorkerMemory, + }, + Type: "e980", + }, + } + systemPoolGood := &models.System{ + Cores: func(f float64) *float64 { return &f }(8), + ID: 1, + Memory: func(i int64) *int64 { return &i }(256), + } + systemPoolsGood := models.SystemPools{ + "Enough": models.SystemPool{ + Capacity: systemPoolGood, + CoreMemoryRatio: float64(1.0), + MaxAvailable: systemPoolGood, + MaxCoresAvailable: systemPoolGood, + MaxMemoryAvailable: systemPoolGood, + SharedCoreRatio: &models.MinMaxDefault{ + Default: func(f float64) *float64 { return &f }(4), + Max: func(f float64) *float64 { return &f }(4), + Min: func(f float64) *float64 { return &f }(1), + }, + Systems: []*models.System{ + systemPoolGood, + }, + Type: "e980", + }, + } + + err := powervs.ValidateCapacityWithPools(dedicatedControlPlanes, dedicatedComputes, systemPoolsNEComputeCores) + assert.EqualError(t, err, "Not enough cores available (2) for the compute nodes (need 5)") + + err = powervs.ValidateCapacityWithPools(dedicatedControlPlanes, dedicatedComputes, systemPoolsNEWorkerCores) + assert.EqualError(t, err, "Not enough cores available (1) for the worker nodes (need 3)") + + err = powervs.ValidateCapacityWithPools(dedicatedControlPlanes, dedicatedComputes, systemPoolsNEComputeMemory) + assert.EqualError(t, err, "Not enough memory available (32) for the compute nodes (need 160)") + + err = powervs.ValidateCapacityWithPools(dedicatedControlPlanes, dedicatedComputes, systemPoolsNEWorkerMemory) + assert.EqualError(t, err, "Not enough memory available (32) for the worker nodes (need 96)") + + err = powervs.ValidateCapacityWithPools(dedicatedControlPlanes, dedicatedComputes, systemPoolsGood) + assert.Empty(t, err) +} + +func TestValidatePERAvailability(t *testing.T) { + cases := []struct { + name string + edits editFunctions + errorMsg string + }{ + { + name: "Region without PER", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.Zone = regionWithoutPER + }, + }, + errorMsg: fmt.Sprintf("power-edge-router is not available at: %s", regionWithoutPER), + }, + { + name: "Region with PER", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.Zone = regionWithPER + ic.Platform.PowerVS.ServiceInstanceID = validServiceInstanceID + }, + }, + errorMsg: "", + }, + { + name: "Region with no PER availability info", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.Zone = regionPERUnknown + }, + }, + errorMsg: fmt.Sprintf("power-edge-router capability unknown at: %s", regionPERUnknown), + }, + { + name: "Region with PER, but with invalid Workspace ID", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.Zone = regionWithPER + ic.Platform.PowerVS.ServiceInstanceID = invalidServiceInstanceID + }, + }, + errorMsg: fmt.Sprintf("power-edge-router is not available in workspace: %s", invalidServiceInstanceID), + }, + } + setMockEnvVars() + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + powervsClient := mock.NewMockAPI(mockCtrl) + + // Mocks: PER-absent region results in false + powervsClient.EXPECT().GetDatacenterCapabilities(gomock.Any(), regionWithoutPER).Return(mapWithPERFalse, nil) + + // Mocks: PER-enabled region results in true + powervsClient.EXPECT().GetDatacenterCapabilities(gomock.Any(), regionWithPER).Return(mapWithPERTrue, nil) + powervsClient.EXPECT().GetWorkspaceCapabilities(gomock.Any(), validServiceInstanceID).Return(mapWithPERTrue, nil) + + // Mocks: PER-unknown region results in false + powervsClient.EXPECT().GetDatacenterCapabilities(gomock.Any(), regionPERUnknown).Return(mapPERUnknown, nil) + + // Mocks: PER-enabled region, but bogus Service Instance results in false + powervsClient.EXPECT().GetDatacenterCapabilities(gomock.Any(), regionWithPER).Return(mapWithPERTrue, nil) + powervsClient.EXPECT().GetWorkspaceCapabilities(gomock.Any(), invalidServiceInstanceID).Return(mapWithPERFalse, nil) + + // Run tests + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + editedInstallConfig := validInstallConfig() + for _, edit := range tc.edits { + edit(editedInstallConfig) + } + + aggregatedErrors := powervs.ValidatePERAvailability(powervsClient, editedInstallConfig) + if tc.errorMsg != "" { + assert.Regexp(t, tc.errorMsg, aggregatedErrors) + } else { + assert.NoError(t, aggregatedErrors) + } + }) + } +} + +func TestValidateSystemTypeForZone(t *testing.T) { + cases := []struct { + name string + edits editFunctions + errorMsg string + }{ + { + name: "Unknown Zone specified", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.Zone = invalidZone + ic.ControlPlane.Platform.PowerVS = validMachinePool() + ic.ControlPlane.Platform.PowerVS.SysType = defaultSysType + }, + }, + errorMsg: fmt.Sprintf("failed to obtain available SysTypes for: %s", invalidZone), + }, + { + name: "No Platform block", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.ControlPlane.Platform.PowerVS = nil + }, + }, + errorMsg: "", + }, + { + name: "Structure present, but no SysType specified", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.ControlPlane.Platform.PowerVS = validMachinePool() + }, + }, + errorMsg: "", + }, + { + name: "Unavailable SysType specified for dal12 zone", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.Region = validRegion + ic.Platform.PowerVS.Zone = validZone + ic.ControlPlane.Platform.PowerVS = validMachinePool() + ic.ControlPlane.Platform.PowerVS.SysType = newSysType + }, + }, + errorMsg: fmt.Sprintf("%s is not available in: %s", newSysType, validZone), + }, + { + name: "Good Zone/SysType combo specified", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.Region = validRegion + ic.Platform.PowerVS.Zone = validZone + ic.ControlPlane.Platform.PowerVS = validMachinePool() + ic.ControlPlane.Platform.PowerVS.SysType = defaultSysType + }, + }, + errorMsg: "", + }, + } + setMockEnvVars() + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + powervsClient := mock.NewMockAPI(mockCtrl) + + // Run tests + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + editedInstallConfig := validInstallConfig() + for _, edit := range tc.edits { + edit(editedInstallConfig) + } + + aggregatedErrors := powervs.ValidateSystemTypeForZone(powervsClient, editedInstallConfig) + if tc.errorMsg != "" { + assert.Regexp(t, tc.errorMsg, aggregatedErrors) + } else { + assert.NoError(t, aggregatedErrors) + } + }) + } +} + +func TestValidateServiceInstance(t *testing.T) { + cases := []struct { + name string + edits editFunctions + errorMsg string + }{ + { + name: "valid install config", + edits: editFunctions{}, + errorMsg: "", + }, + { + name: "invalid install config", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.ServiceInstanceGUID = "invalid-uuid" + }, + }, + errorMsg: "platform:powervs:serviceInstanceGUID has an invalid guid", + }, + } + setMockEnvVars() + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + powervsClient := mock.NewMockAPI(mockCtrl) + + // FIX: Unexpected call to *mock.MockAPI.ListServiceInstances([context.TODO.WithDeadline(2023-12-02 08:38:15.542340268 -0600 CST m=+300.012357408 [4m59.999979046s])]) at validation.go:289 because: there are no expected calls of the method "ListServiceInstances" for that receiver + powervsClient.EXPECT().ListServiceInstances(gomock.Any()).AnyTimes() + + // Run tests + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + editedInstallConfig := validInstallConfig() + for _, edit := range tc.edits { + edit(editedInstallConfig) + } + + aggregatedErrors := powervs.ValidateServiceInstance(powervsClient, editedInstallConfig) + if tc.errorMsg != "" { + assert.Regexp(t, tc.errorMsg, aggregatedErrors) + } else { + assert.NoError(t, aggregatedErrors) + } + }) + } +} + +func setMockEnvVars() { + os.Setenv("POWERVS_AUTH_FILEPATH", "./tmp/powervs/config.json") + os.Setenv("IBMID", "foo") + os.Setenv("IC_API_KEY", "foo") + os.Setenv("IBMCLOUD_REGION", "foo") + os.Setenv("IBMCLOUD_ZONE", "foo") +} diff --git a/.history/pkg/asset/machines/worker_20241219120242.go b/.history/pkg/asset/machines/worker_20241219120242.go new file mode 100644 index 00000000000..7398a22e1af --- /dev/null +++ b/.history/pkg/asset/machines/worker_20241219120242.go @@ -0,0 +1,842 @@ +package machines + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/pointer" + "sigs.k8s.io/yaml" + + configv1 "github.com/openshift/api/config/v1" + machinev1 "github.com/openshift/api/machine/v1" + machinev1alpha1 "github.com/openshift/api/machine/v1alpha1" + machinev1beta1 "github.com/openshift/api/machine/v1beta1" + baremetalapi "github.com/openshift/cluster-api-provider-baremetal/pkg/apis" + baremetalprovider "github.com/openshift/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1" + libvirtapi "github.com/openshift/cluster-api-provider-libvirt/pkg/apis" + libvirtprovider "github.com/openshift/cluster-api-provider-libvirt/pkg/apis/libvirtproviderconfig/v1beta1" + ovirtproviderapi "github.com/openshift/cluster-api-provider-ovirt/pkg/apis" + ovirtprovider "github.com/openshift/cluster-api-provider-ovirt/pkg/apis/ovirtprovider/v1beta1" + "github.com/openshift/installer/pkg/asset" + "github.com/openshift/installer/pkg/asset/ignition/machine" + "github.com/openshift/installer/pkg/asset/installconfig" + icaws "github.com/openshift/installer/pkg/asset/installconfig/aws" + icazure "github.com/openshift/installer/pkg/asset/installconfig/azure" + icgcp "github.com/openshift/installer/pkg/asset/installconfig/gcp" + "github.com/openshift/installer/pkg/asset/machines/alibabacloud" + "github.com/openshift/installer/pkg/asset/machines/aws" + "github.com/openshift/installer/pkg/asset/machines/azure" + "github.com/openshift/installer/pkg/asset/machines/baremetal" + "github.com/openshift/installer/pkg/asset/machines/gcp" + "github.com/openshift/installer/pkg/asset/machines/ibmcloud" + "github.com/openshift/installer/pkg/asset/machines/libvirt" + "github.com/openshift/installer/pkg/asset/machines/machineconfig" + "github.com/openshift/installer/pkg/asset/machines/nutanix" + "github.com/openshift/installer/pkg/asset/machines/openstack" + "github.com/openshift/installer/pkg/asset/machines/ovirt" + "github.com/openshift/installer/pkg/asset/machines/powervs" + "github.com/openshift/installer/pkg/asset/machines/vsphere" + "github.com/openshift/installer/pkg/asset/rhcos" + rhcosutils "github.com/openshift/installer/pkg/rhcos" + "github.com/openshift/installer/pkg/types" + alibabacloudtypes "github.com/openshift/installer/pkg/types/alibabacloud" + awstypes "github.com/openshift/installer/pkg/types/aws" + awsdefaults "github.com/openshift/installer/pkg/types/aws/defaults" + azuretypes "github.com/openshift/installer/pkg/types/azure" + azuredefaults "github.com/openshift/installer/pkg/types/azure/defaults" + baremetaltypes "github.com/openshift/installer/pkg/types/baremetal" + externaltypes "github.com/openshift/installer/pkg/types/external" + gcptypes "github.com/openshift/installer/pkg/types/gcp" + ibmcloudtypes "github.com/openshift/installer/pkg/types/ibmcloud" + libvirttypes "github.com/openshift/installer/pkg/types/libvirt" + nonetypes "github.com/openshift/installer/pkg/types/none" + nutanixtypes "github.com/openshift/installer/pkg/types/nutanix" + openstacktypes "github.com/openshift/installer/pkg/types/openstack" + ovirttypes "github.com/openshift/installer/pkg/types/ovirt" + powervstypes "github.com/openshift/installer/pkg/types/powervs" + vspheretypes "github.com/openshift/installer/pkg/types/vsphere" + ibmcloudapi "github.com/openshift/machine-api-provider-ibmcloud/pkg/apis" + ibmcloudprovider "github.com/openshift/machine-api-provider-ibmcloud/pkg/apis/ibmcloudprovider/v1" + mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" +) + +const ( + // workerMachineSetFileName is the format string for constructing the worker MachineSet filenames. + workerMachineSetFileName = "99_openshift-cluster-api_worker-machineset-%s.yaml" + + // workerMachineFileName is the format string for constructing the worker Machine filenames. + workerMachineFileName = "99_openshift-cluster-api_worker-machines-%s.yaml" + + // workerUserDataFileName is the filename used for the worker user-data secret. + workerUserDataFileName = "99_openshift-cluster-api_worker-user-data-secret.yaml" + + // decimalRootVolumeSize is the size in GB we use for some platforms. + // See below. + decimalRootVolumeSize = 120 + + // powerOfTwoRootVolumeSize is the size in GB we use for other platforms. + // The reasons for the specific choices between these two may boil down + // to which section of code the person adding a platform was copy-pasting from. + // https://github.com/openshift/openshift-docs/blob/main/modules/installation-requirements-user-infra.adoc#minimum-resource-requirements + powerOfTwoRootVolumeSize = 128 +) + +var ( + workerMachineSetFileNamePattern = fmt.Sprintf(workerMachineSetFileName, "*") + workerMachineFileNamePattern = fmt.Sprintf(workerMachineFileName, "*") + + _ asset.WritableAsset = (*Worker)(nil) +) + +func defaultAWSMachinePoolPlatform(poolName string) awstypes.MachinePool { + defaultEBSType := awstypes.VolumeTypeGp3 + + // gp3 is not offered in all local-zones locations used by Edge Pools. + // Once it is available, it can be used as default for all machine pools. + // https://aws.amazon.com/about-aws/global-infrastructure/localzones/features + if poolName == types.MachinePoolEdgeRoleName { + defaultEBSType = awstypes.VolumeTypeGp2 + } + return awstypes.MachinePool{ + EC2RootVolume: awstypes.EC2RootVolume{ + Type: defaultEBSType, + Size: decimalRootVolumeSize, + }, + } +} + +func defaultLibvirtMachinePoolPlatform() libvirttypes.MachinePool { + return libvirttypes.MachinePool{} +} + +func defaultAzureMachinePoolPlatform() azuretypes.MachinePool { + return azuretypes.MachinePool{ + OSDisk: azuretypes.OSDisk{ + DiskSizeGB: powerOfTwoRootVolumeSize, + DiskType: azuretypes.DefaultDiskType, + }, + } +} + +func defaultGCPMachinePoolPlatform(arch types.Architecture) gcptypes.MachinePool { + return gcptypes.MachinePool{ + InstanceType: icgcp.DefaultInstanceTypeForArch(arch), + OSDisk: gcptypes.OSDisk{ + DiskSizeGB: powerOfTwoRootVolumeSize, + DiskType: "pd-ssd", + }, + } +} + +func defaultIBMCloudMachinePoolPlatform() ibmcloudtypes.MachinePool { + return ibmcloudtypes.MachinePool{ + InstanceType: "bx2-4x16", + } +} + +func defaultOpenStackMachinePoolPlatform() openstacktypes.MachinePool { + return openstacktypes.MachinePool{ + Zones: []string{""}, + } +} + +func defaultBareMetalMachinePoolPlatform() baremetaltypes.MachinePool { + return baremetaltypes.MachinePool{} +} + +func defaultOvirtMachinePoolPlatform() ovirttypes.MachinePool { + return ovirttypes.MachinePool{ + CPU: &ovirttypes.CPU{ + Cores: 4, + Sockets: 1, + Threads: 1, + }, + MemoryMB: 16348, + OSDisk: &ovirttypes.Disk{ + SizeGB: decimalRootVolumeSize, + }, + VMType: ovirttypes.VMTypeServer, + AutoPinningPolicy: ovirttypes.AutoPinningNone, + } +} + +func defaultVSphereMachinePoolPlatform() vspheretypes.MachinePool { + return vspheretypes.MachinePool{ + NumCPUs: 4, + NumCoresPerSocket: 4, + MemoryMiB: 16384, + OSDisk: vspheretypes.OSDisk{ + DiskSizeGB: decimalRootVolumeSize, + }, + } +} + +func defaultPowerVSMachinePoolPlatform() powervstypes.MachinePool { + return powervstypes.MachinePool{ + MemoryGiB: 32, + Processors: intstr.FromString("0.5"), + ProcType: machinev1.PowerVSProcessorTypeShared, + SysType: "s922", + } +<<<<<<< HEAD +======= + + sysTypes, err = powervstypes.AvailableSysTypes(ic.PowerVS.Region, ic.PowerVS.Zone) + if err == nil { + defaultMp.SysType = sysTypes[0] + } else { + logrus.Warnf("For given zone %v, AvailableSysTypes returns %v", ic.PowerVS.Zone, err) + } + + return defaultMp +>>>>>>> 07c96aff2b (Create region-zone-sysType hierarchy) +} + +func defaultNutanixMachinePoolPlatform() nutanixtypes.MachinePool { + return nutanixtypes.MachinePool{ + NumCPUs: 4, + NumCoresPerSocket: 1, + MemoryMiB: 16384, + OSDisk: nutanixtypes.OSDisk{ + DiskSizeGiB: decimalRootVolumeSize, + }, + } +} + +// awsSetPreferredInstanceByEdgeZone discovers supported instanceType for each edge pool +// using the existing preferred instance list used by worker compute pool. +// Each machine set in the edge pool, created for each zone, can use different instance +// types depending on the instance offerings in the location (Local Zones). +func awsSetPreferredInstanceByEdgeZone(ctx context.Context, defaultTypes []string, meta *icaws.Metadata, zones icaws.Zones) (ok bool, err error) { + for zone := range zones { + preferredType, err := aws.PreferredInstanceType(ctx, meta, defaultTypes, []string{zone}) + if err != nil { + logrus.Warn(errors.Wrap(err, fmt.Sprintf("unable to select instanceType on the zone[%v] from the preferred list: %v. You must update the MachineSet manifest", zone, defaultTypes))) + continue + } + if _, ok := zones[zone]; !ok { + zones[zone] = &icaws.Zone{Name: zone} + } + zones[zone].PreferredInstanceType = preferredType + } + return true, nil +} + +// Worker generates the machinesets for `worker` machine pool. +type Worker struct { + UserDataFile *asset.File + MachineConfigFiles []*asset.File + MachineSetFiles []*asset.File + MachineFiles []*asset.File +} + +// Name returns a human friendly name for the Worker Asset. +func (w *Worker) Name() string { + return "Worker Machines" +} + +// Dependencies returns all of the dependencies directly needed by the +// Worker asset +func (w *Worker) Dependencies() []asset.Asset { + return []asset.Asset{ + &installconfig.ClusterID{}, + // PlatformCredsCheck just checks the creds (and asks, if needed) + // We do not actually use it in this asset directly, hence + // it is put in the dependencies but not fetched in Generate + &installconfig.PlatformCredsCheck{}, + &installconfig.InstallConfig{}, + new(rhcos.Image), + new(rhcos.Release), + &machine.Worker{}, + } +} + +// Generate generates the Worker asset. +func (w *Worker) Generate(dependencies asset.Parents) error { + ctx := context.TODO() + clusterID := &installconfig.ClusterID{} + installConfig := &installconfig.InstallConfig{} + rhcosImage := new(rhcos.Image) + rhcosRelease := new(rhcos.Release) + wign := &machine.Worker{} + dependencies.Get(clusterID, installConfig, rhcosImage, rhcosRelease, wign) + + workerUserDataSecretName := "worker-user-data" + + machines := []machinev1beta1.Machine{} + machineConfigs := []*mcfgv1.MachineConfig{} + machineSets := []runtime.Object{} + var err error + ic := installConfig.Config + for _, pool := range ic.Compute { + pool := pool // this makes golint happy... G601: Implicit memory aliasing in for loop. (gosec) + if pool.Hyperthreading == types.HyperthreadingDisabled { + ignHT, err := machineconfig.ForHyperthreadingDisabled("worker") + if err != nil { + return errors.Wrap(err, "failed to create ignition for hyperthreading disabled for worker machines") + } + machineConfigs = append(machineConfigs, ignHT) + } + if ic.SSHKey != "" { + ignSSH, err := machineconfig.ForAuthorizedKeys(ic.SSHKey, "worker") + if err != nil { + return errors.Wrap(err, "failed to create ignition for authorized SSH keys for worker machines") + } + machineConfigs = append(machineConfigs, ignSSH) + } + if ic.FIPS { + ignFIPS, err := machineconfig.ForFIPSEnabled("worker") + if err != nil { + return errors.Wrap(err, "failed to create ignition for FIPS enabled for worker machines") + } + machineConfigs = append(machineConfigs, ignFIPS) + } + if ic.Platform.Name() == powervstypes.Name { + // always enable multipath for powervs. + ignMultipath, err := machineconfig.ForMultipathEnabled("worker") + if err != nil { + return errors.Wrap(err, "failed to create ignition for multipath enabled for worker machines") + } + machineConfigs = append(machineConfigs, ignMultipath) + } + // The maximum number of networks supported on ServiceNetwork is two, one IPv4 and one IPv6 network. + // The cluster-network-operator handles the validation of this field. + // Reference: https://github.com/openshift/cluster-network-operator/blob/fc3e0e25b4cfa43e14122bdcdd6d7f2585017d75/pkg/network/cluster_config.go#L45-L52 + if ic.Networking != nil && len(ic.Networking.ServiceNetwork) == 2 && + (ic.Platform.Name() == openstacktypes.Name || ic.Platform.Name() == vspheretypes.Name) { + // Only configure kernel args for dual-stack clusters. + ignIPv6, err := machineconfig.ForDualStackAddresses("worker") + if err != nil { + return errors.Wrap(err, "failed to create ignition to configure IPv6 for worker machines") + } + machineConfigs = append(machineConfigs, ignIPv6) + } + + switch ic.Platform.Name() { + case alibabacloudtypes.Name: + client, err := installConfig.AlibabaCloud.Client() + if err != nil { + return err + } + vswitchMaps, err := installConfig.AlibabaCloud.VSwitchMaps() + if err != nil { + return errors.Wrap(err, "failed to get VSwitchs map") + } + + mpool := alibabacloudtypes.DefaultWorkerMachinePoolPlatform() + mpool.ImageID = string(*rhcosImage) + mpool.Set(ic.Platform.AlibabaCloud.DefaultMachinePlatform) + mpool.Set(pool.Platform.AlibabaCloud) + if len(mpool.Zones) == 0 { + if len(vswitchMaps) > 0 { + for zone := range vswitchMaps { + mpool.Zones = append(mpool.Zones, zone) + } + } else { + azs, err := client.GetAvailableZonesByInstanceType(mpool.InstanceType) + if err != nil || len(azs) == 0 { + return errors.Wrap(err, "failed to fetch availability zones") + } + mpool.Zones = azs + } + } + + pool.Platform.AlibabaCloud = &mpool + sets, err := alibabacloud.MachineSets( + clusterID.InfraID, + ic, + &pool, + "worker", + workerUserDataSecretName, + installConfig.Config.Platform.AlibabaCloud.Tags, + vswitchMaps, + ) + if err != nil { + return errors.Wrap(err, "failed to create worker machine objects") + } + for _, set := range sets { + machineSets = append(machineSets, set) + } + case awstypes.Name: + subnets := icaws.Subnets{} + zones := icaws.Zones{} + if len(ic.Platform.AWS.Subnets) > 0 { + var subnetsMeta icaws.Subnets + switch pool.Name { + case types.MachinePoolEdgeRoleName: + subnetsMeta, err = installConfig.AWS.EdgeSubnets(ctx) + if err != nil { + return err + } + default: + subnetsMeta, err = installConfig.AWS.PrivateSubnets(ctx) + if err != nil { + return err + } + } + for _, subnet := range subnetsMeta { + subnets[subnet.Zone.Name] = subnet + } + } + mpool := defaultAWSMachinePoolPlatform(pool.Name) + + osImage := strings.SplitN(string(*rhcosImage), ",", 2) + osImageID := osImage[0] + if len(osImage) == 2 { + osImageID = "" // the AMI will be generated later on + } + mpool.AMIID = osImageID + + mpool.Set(ic.Platform.AWS.DefaultMachinePlatform) + mpool.Set(pool.Platform.AWS) + zoneDefaults := false + if len(mpool.Zones) == 0 { + if len(subnets) > 0 { + for _, subnet := range subnets { + if subnet.Zone == nil { + return errors.Wrapf(err, "failed to find zone attributes for subnet %s", subnet.ID) + } + mpool.Zones = append(mpool.Zones, subnet.Zone.Name) + zones[subnet.Zone.Name] = subnets[subnet.Zone.Name].Zone + } + } else { + mpool.Zones, err = installConfig.AWS.AvailabilityZones(ctx) + if err != nil { + return err + } + zoneDefaults = true + } + } + + // Requirements when using edge compute pools to populate machine sets. + if pool.Name == types.MachinePoolEdgeRoleName { + err = installConfig.AWS.SetZoneAttributes(ctx, mpool.Zones, zones) + if err != nil { + return errors.Wrap(err, "failed to retrieve zone attributes for edge compute pool") + } + + if pool.Replicas == nil || *pool.Replicas == 0 { + pool.Replicas = pointer.Int64(int64(len(mpool.Zones))) + } + } + + if mpool.InstanceType == "" { + instanceTypes := awsdefaults.InstanceTypes(installConfig.Config.Platform.AWS.Region, installConfig.Config.ControlPlane.Architecture, configv1.HighlyAvailableTopologyMode) + switch pool.Name { + case types.MachinePoolEdgeRoleName: + ok, err := awsSetPreferredInstanceByEdgeZone(ctx, instanceTypes, installConfig.AWS, zones) + if err != nil { + return errors.Wrap(err, "failed to find default instance type for edge pool, you must define on the compute pool") + } + if !ok { + logrus.Warn(errors.Wrap(err, "failed to find preferred instance type for edge pool, using default")) + mpool.InstanceType = instanceTypes[0] + } + default: + mpool.InstanceType, err = aws.PreferredInstanceType(ctx, installConfig.AWS, instanceTypes, mpool.Zones) + if err != nil { + logrus.Warn(errors.Wrapf(err, "failed to find default instance type for %s pool", pool.Name)) + mpool.InstanceType = instanceTypes[0] + } + } + } + // if the list of zones is the default we need to try to filter the list in case there are some zones where the instance might not be available + if zoneDefaults { + mpool.Zones, err = aws.FilterZonesBasedOnInstanceType(ctx, installConfig.AWS, mpool.InstanceType, mpool.Zones) + if err != nil { + logrus.Warn(errors.Wrap(err, "failed to filter zone list")) + } + } + + pool.Platform.AWS = &mpool + sets, err := aws.MachineSets(&aws.MachineSetInput{ + ClusterID: clusterID.InfraID, + InstallConfigPlatformAWS: installConfig.Config.Platform.AWS, + Subnets: subnets, + Zones: zones, + Pool: &pool, + Role: pool.Name, + UserDataSecret: workerUserDataSecretName, + }) + if err != nil { + return errors.Wrap(err, "failed to create worker machine objects") + } + for _, set := range sets { + machineSets = append(machineSets, set) + } + case azuretypes.Name: + mpool := defaultAzureMachinePoolPlatform() + mpool.InstanceType = azuredefaults.ComputeInstanceType( + installConfig.Config.Platform.Azure.CloudName, + installConfig.Config.Platform.Azure.Region, + pool.Architecture, + ) + mpool.Set(ic.Platform.Azure.DefaultMachinePlatform) + mpool.Set(pool.Platform.Azure) + + session, err := installConfig.Azure.Session() + if err != nil { + return errors.Wrap(err, "failed to fetch session") + } + + client := icazure.NewClient(session) + if len(mpool.Zones) == 0 { + azs, err := client.GetAvailabilityZones(context.TODO(), ic.Platform.Azure.Region, mpool.InstanceType) + if err != nil { + return errors.Wrap(err, "failed to fetch availability zones") + } + mpool.Zones = azs + if len(azs) == 0 { + // if no azs are given we set to []string{""} for convenience over later operations. + // It means no-zoned for the machine API + mpool.Zones = []string{""} + } + } + + if mpool.OSImage.Publisher != "" { + img, ierr := client.GetMarketplaceImage(context.TODO(), ic.Platform.Azure.Region, mpool.OSImage.Publisher, mpool.OSImage.Offer, mpool.OSImage.SKU, mpool.OSImage.Version) + if ierr != nil { + return fmt.Errorf("failed to fetch marketplace image: %w", ierr) + } + // Publisher is case-sensitive and matched against exactly. Also + // the Plan's publisher might not be exactly the same as the + // Image's publisher + if img.Plan != nil && img.Plan.Publisher != nil { + mpool.OSImage.Publisher = *img.Plan.Publisher + } + } + pool.Platform.Azure = &mpool + + capabilities, err := client.GetVMCapabilities(context.TODO(), mpool.InstanceType, installConfig.Config.Platform.Azure.Region) + if err != nil { + return err + } + + useImageGallery := ic.Platform.Azure.CloudName != azuretypes.StackCloud + sets, err := azure.MachineSets(clusterID.InfraID, ic, &pool, string(*rhcosImage), "worker", workerUserDataSecretName, capabilities, useImageGallery) + if err != nil { + return errors.Wrap(err, "failed to create worker machine objects") + } + for _, set := range sets { + machineSets = append(machineSets, set) + } + case baremetaltypes.Name: + mpool := defaultBareMetalMachinePoolPlatform() + mpool.Set(ic.Platform.BareMetal.DefaultMachinePlatform) + mpool.Set(pool.Platform.BareMetal) + pool.Platform.BareMetal = &mpool + + // Use managed user data secret, since images used by MachineSet + // are always up to date + workerUserDataSecretName = "worker-user-data-managed" + sets, err := baremetal.MachineSets(clusterID.InfraID, ic, &pool, "", "worker", workerUserDataSecretName) + if err != nil { + return errors.Wrap(err, "failed to create worker machine objects") + } + for _, set := range sets { + machineSets = append(machineSets, set) + } + case gcptypes.Name: + mpool := defaultGCPMachinePoolPlatform(pool.Architecture) + mpool.Set(ic.Platform.GCP.DefaultMachinePlatform) + mpool.Set(pool.Platform.GCP) + if len(mpool.Zones) == 0 { + azs, err := gcp.ZonesForInstanceType(ic.Platform.GCP.ProjectID, ic.Platform.GCP.Region, mpool.InstanceType) + if err != nil { + return errors.Wrap(err, "failed to fetch availability zones") + } + mpool.Zones = azs + } + pool.Platform.GCP = &mpool + sets, err := gcp.MachineSets(clusterID.InfraID, ic, &pool, string(*rhcosImage), "worker", workerUserDataSecretName) + if err != nil { + return errors.Wrap(err, "failed to create worker machine objects") + } + for _, set := range sets { + machineSets = append(machineSets, set) + } + case ibmcloudtypes.Name: + subnets := map[string]string{} + if len(ic.Platform.IBMCloud.ComputeSubnets) > 0 { + subnetMetas, err := installConfig.IBMCloud.ComputeSubnets(ctx) + if err != nil { + return err + } + for _, subnet := range subnetMetas { + subnets[subnet.Zone] = subnet.Name + } + } + mpool := defaultIBMCloudMachinePoolPlatform() + mpool.Set(ic.Platform.IBMCloud.DefaultMachinePlatform) + mpool.Set(pool.Platform.IBMCloud) + if len(mpool.Zones) == 0 { + azs, err := ibmcloud.AvailabilityZones(ic.Platform.IBMCloud.Region) + if err != nil { + return errors.Wrap(err, "failed to fetch availability zones") + } + mpool.Zones = azs + } + pool.Platform.IBMCloud = &mpool + sets, err := ibmcloud.MachineSets(clusterID.InfraID, ic, subnets, &pool, "worker", workerUserDataSecretName) + if err != nil { + return errors.Wrap(err, "failed to create worker machine objects") + } + for _, set := range sets { + machineSets = append(machineSets, set) + } + case libvirttypes.Name: + mpool := defaultLibvirtMachinePoolPlatform() + mpool.Set(ic.Platform.Libvirt.DefaultMachinePlatform) + mpool.Set(pool.Platform.Libvirt) + pool.Platform.Libvirt = &mpool + sets, err := libvirt.MachineSets(clusterID.InfraID, ic, &pool, "worker", workerUserDataSecretName) + if err != nil { + return errors.Wrap(err, "failed to create worker machine objects") + } + for _, set := range sets { + machineSets = append(machineSets, set) + } + case openstacktypes.Name: + mpool := defaultOpenStackMachinePoolPlatform() + mpool.Set(ic.Platform.OpenStack.DefaultMachinePlatform) + mpool.Set(pool.Platform.OpenStack) + pool.Platform.OpenStack = &mpool + + imageName, _ := rhcosutils.GenerateOpenStackImageName(string(*rhcosImage), clusterID.InfraID) + + sets, err := openstack.MachineSets(clusterID.InfraID, ic, &pool, imageName, "worker", workerUserDataSecretName, nil) + if err != nil { + return errors.Wrap(err, "failed to create worker machine objects") + } + for _, set := range sets { + machineSets = append(machineSets, set) + } + case vspheretypes.Name: + mpool := defaultVSphereMachinePoolPlatform() + mpool.Set(ic.Platform.VSphere.DefaultMachinePlatform) + mpool.Set(pool.Platform.VSphere) + pool.Platform.VSphere = &mpool + templateName := clusterID.InfraID + "-rhcos" + + sets, err := vsphere.MachineSets(clusterID.InfraID, ic, &pool, templateName, "worker", workerUserDataSecretName) + if err != nil { + return errors.Wrap(err, "failed to create worker machine objects") + } + for _, set := range sets { + machineSets = append(machineSets, set) + } + + // If static IPs are configured, we must generate worker machines and scale the machinesets to 0. + if ic.Platform.VSphere.Hosts != nil { + logrus.Debug("Generating worker machines with static IPs.") + templateName := clusterID.InfraID + "-rhcos" + + machines, err = vsphere.Machines(clusterID.InfraID, ic, &pool, templateName, "worker", workerUserDataSecretName) + if err != nil { + return errors.Wrap(err, "failed to create worker machine objects") + } + logrus.Debugf("Generated %v worker machines.", len(machines)) + + for _, ms := range sets { + ms.Spec.Replicas = pointer.Int32(0) + } + } + case ovirttypes.Name: + mpool := defaultOvirtMachinePoolPlatform() + mpool.Set(ic.Platform.Ovirt.DefaultMachinePlatform) + mpool.Set(pool.Platform.Ovirt) + pool.Platform.Ovirt = &mpool + + imageName, _ := rhcosutils.GenerateOpenStackImageName(string(*rhcosImage), clusterID.InfraID) + + sets, err := ovirt.MachineSets(clusterID.InfraID, ic, &pool, imageName, "worker", workerUserDataSecretName) + if err != nil { + return errors.Wrap(err, "failed to create worker machine objects for ovirt provider") + } + for _, set := range sets { + machineSets = append(machineSets, set) + } + case powervstypes.Name: + mpool := defaultPowerVSMachinePoolPlatform() + mpool.Set(ic.Platform.PowerVS.DefaultMachinePlatform) + mpool.Set(pool.Platform.PowerVS) + pool.Platform.PowerVS = &mpool + sets, err := powervs.MachineSets(clusterID.InfraID, ic, &pool, "worker", "worker-user-data") + if err != nil { + return errors.Wrap(err, "failed to create worker machine objects for powervs provider") + } + for _, set := range sets { + machineSets = append(machineSets, set) + } + case externaltypes.Name, nonetypes.Name: + case nutanixtypes.Name: + mpool := defaultNutanixMachinePoolPlatform() + mpool.Set(ic.Platform.Nutanix.DefaultMachinePlatform) + mpool.Set(pool.Platform.Nutanix) + if err = mpool.ValidateConfig(ic.Platform.Nutanix); err != nil { + return errors.Wrap(err, "failed to create master machine objects") + } + pool.Platform.Nutanix = &mpool + imageName := nutanixtypes.RHCOSImageName(clusterID.InfraID) + + sets, err := nutanix.MachineSets(clusterID.InfraID, ic, &pool, imageName, "worker", workerUserDataSecretName) + if err != nil { + return errors.Wrap(err, "failed to create worker machine objects") + } + for _, set := range sets { + machineSets = append(machineSets, set) + } + default: + return fmt.Errorf("invalid Platform") + } + } + + data, err := userDataSecret(workerUserDataSecretName, wign.File.Data) + if err != nil { + return errors.Wrap(err, "failed to create user-data secret for worker machines") + } + w.UserDataFile = &asset.File{ + Filename: filepath.Join(directory, workerUserDataFileName), + Data: data, + } + + w.MachineConfigFiles, err = machineconfig.Manifests(machineConfigs, "worker", directory) + if err != nil { + return errors.Wrap(err, "failed to create MachineConfig manifests for worker machines") + } + + w.MachineSetFiles = make([]*asset.File, len(machineSets)) + padFormat := fmt.Sprintf("%%0%dd", len(fmt.Sprintf("%d", len(machineSets)))) + for i, machineSet := range machineSets { + data, err := yaml.Marshal(machineSet) + if err != nil { + return errors.Wrapf(err, "marshal worker %d", i) + } + + padded := fmt.Sprintf(padFormat, i) + w.MachineSetFiles[i] = &asset.File{ + Filename: filepath.Join(directory, fmt.Sprintf(workerMachineSetFileName, padded)), + Data: data, + } + } + + w.MachineFiles = make([]*asset.File, len(machines)) + for i, machineDef := range machines { + data, err := yaml.Marshal(machineDef) + if err != nil { + return errors.Wrapf(err, "marshal master %d", i) + } + + padded := fmt.Sprintf(padFormat, i) + w.MachineFiles[i] = &asset.File{ + Filename: filepath.Join(directory, fmt.Sprintf(workerMachineFileName, padded)), + Data: data, + } + } + return nil +} + +// Files returns the files generated by the asset. +func (w *Worker) Files() []*asset.File { + files := make([]*asset.File, 0, 1+len(w.MachineConfigFiles)+len(w.MachineSetFiles)) + if w.UserDataFile != nil { + files = append(files, w.UserDataFile) + } + files = append(files, w.MachineConfigFiles...) + files = append(files, w.MachineSetFiles...) + files = append(files, w.MachineFiles...) + return files +} + +// Load reads the asset files from disk. +func (w *Worker) Load(f asset.FileFetcher) (found bool, err error) { + file, err := f.FetchByName(filepath.Join(directory, workerUserDataFileName)) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + w.UserDataFile = file + + w.MachineConfigFiles, err = machineconfig.Load(f, "worker", directory) + if err != nil { + return true, err + } + + fileList, err := f.FetchByPattern(filepath.Join(directory, workerMachineSetFileNamePattern)) + if err != nil { + return true, err + } + + w.MachineSetFiles = fileList + + fileList, err = f.FetchByPattern(filepath.Join(directory, workerMachineFileNamePattern)) + if err != nil { + return true, err + } + w.MachineFiles = fileList + + return true, nil +} + +// MachineSets returns MachineSet manifest structures. +func (w *Worker) MachineSets() ([]machinev1beta1.MachineSet, error) { + scheme := runtime.NewScheme() + baremetalapi.AddToScheme(scheme) + ibmcloudapi.AddToScheme(scheme) + libvirtapi.AddToScheme(scheme) + ovirtproviderapi.AddToScheme(scheme) + scheme.AddKnownTypes(machinev1alpha1.GroupVersion, + &machinev1alpha1.OpenstackProviderSpec{}, + ) + scheme.AddKnownTypes(machinev1beta1.SchemeGroupVersion, + &machinev1beta1.AWSMachineProviderConfig{}, + &machinev1beta1.VSphereMachineProviderSpec{}, + &machinev1beta1.AzureMachineProviderSpec{}, + &machinev1beta1.GCPMachineProviderSpec{}, + ) + machinev1.Install(scheme) + scheme.AddKnownTypes(machinev1.GroupVersion, + &machinev1.AlibabaCloudMachineProviderConfig{}, + &machinev1.NutanixMachineProviderConfig{}, + &machinev1.PowerVSMachineProviderConfig{}, + ) + machinev1beta1.AddToScheme(scheme) + decoder := serializer.NewCodecFactory(scheme).UniversalDecoder( + baremetalprovider.SchemeGroupVersion, + ibmcloudprovider.SchemeGroupVersion, + libvirtprovider.SchemeGroupVersion, + machinev1.GroupVersion, + machinev1alpha1.GroupVersion, + ovirtprovider.SchemeGroupVersion, + machinev1beta1.SchemeGroupVersion, + ) + + machineSets := []machinev1beta1.MachineSet{} + for i, file := range w.MachineSetFiles { + machineSet := &machinev1beta1.MachineSet{} + err := yaml.Unmarshal(file.Data, &machineSet) + if err != nil { + return machineSets, errors.Wrapf(err, "unmarshal worker %d", i) + } + + obj, _, err := decoder.Decode(machineSet.Spec.Template.Spec.ProviderSpec.Value.Raw, nil, nil) + if err != nil { + return machineSets, errors.Wrapf(err, "unmarshal worker %d", i) + } + + machineSet.Spec.Template.Spec.ProviderSpec.Value = &runtime.RawExtension{Object: obj} + machineSets = append(machineSets, *machineSet) + } + + return machineSets, nil +} diff --git a/.history/pkg/asset/machines/worker_20241219120647.go b/.history/pkg/asset/machines/worker_20241219120647.go new file mode 100644 index 00000000000..9de23262736 --- /dev/null +++ b/.history/pkg/asset/machines/worker_20241219120647.go @@ -0,0 +1,839 @@ +package machines + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/pointer" + "sigs.k8s.io/yaml" + + configv1 "github.com/openshift/api/config/v1" + machinev1 "github.com/openshift/api/machine/v1" + machinev1alpha1 "github.com/openshift/api/machine/v1alpha1" + machinev1beta1 "github.com/openshift/api/machine/v1beta1" + baremetalapi "github.com/openshift/cluster-api-provider-baremetal/pkg/apis" + baremetalprovider "github.com/openshift/cluster-api-provider-baremetal/pkg/apis/baremetal/v1alpha1" + libvirtapi "github.com/openshift/cluster-api-provider-libvirt/pkg/apis" + libvirtprovider "github.com/openshift/cluster-api-provider-libvirt/pkg/apis/libvirtproviderconfig/v1beta1" + ovirtproviderapi "github.com/openshift/cluster-api-provider-ovirt/pkg/apis" + ovirtprovider "github.com/openshift/cluster-api-provider-ovirt/pkg/apis/ovirtprovider/v1beta1" + "github.com/openshift/installer/pkg/asset" + "github.com/openshift/installer/pkg/asset/ignition/machine" + "github.com/openshift/installer/pkg/asset/installconfig" + icaws "github.com/openshift/installer/pkg/asset/installconfig/aws" + icazure "github.com/openshift/installer/pkg/asset/installconfig/azure" + icgcp "github.com/openshift/installer/pkg/asset/installconfig/gcp" + "github.com/openshift/installer/pkg/asset/machines/alibabacloud" + "github.com/openshift/installer/pkg/asset/machines/aws" + "github.com/openshift/installer/pkg/asset/machines/azure" + "github.com/openshift/installer/pkg/asset/machines/baremetal" + "github.com/openshift/installer/pkg/asset/machines/gcp" + "github.com/openshift/installer/pkg/asset/machines/ibmcloud" + "github.com/openshift/installer/pkg/asset/machines/libvirt" + "github.com/openshift/installer/pkg/asset/machines/machineconfig" + "github.com/openshift/installer/pkg/asset/machines/nutanix" + "github.com/openshift/installer/pkg/asset/machines/openstack" + "github.com/openshift/installer/pkg/asset/machines/ovirt" + "github.com/openshift/installer/pkg/asset/machines/powervs" + "github.com/openshift/installer/pkg/asset/machines/vsphere" + "github.com/openshift/installer/pkg/asset/rhcos" + rhcosutils "github.com/openshift/installer/pkg/rhcos" + "github.com/openshift/installer/pkg/types" + alibabacloudtypes "github.com/openshift/installer/pkg/types/alibabacloud" + awstypes "github.com/openshift/installer/pkg/types/aws" + awsdefaults "github.com/openshift/installer/pkg/types/aws/defaults" + azuretypes "github.com/openshift/installer/pkg/types/azure" + azuredefaults "github.com/openshift/installer/pkg/types/azure/defaults" + baremetaltypes "github.com/openshift/installer/pkg/types/baremetal" + externaltypes "github.com/openshift/installer/pkg/types/external" + gcptypes "github.com/openshift/installer/pkg/types/gcp" + ibmcloudtypes "github.com/openshift/installer/pkg/types/ibmcloud" + libvirttypes "github.com/openshift/installer/pkg/types/libvirt" + nonetypes "github.com/openshift/installer/pkg/types/none" + nutanixtypes "github.com/openshift/installer/pkg/types/nutanix" + openstacktypes "github.com/openshift/installer/pkg/types/openstack" + ovirttypes "github.com/openshift/installer/pkg/types/ovirt" + powervstypes "github.com/openshift/installer/pkg/types/powervs" + vspheretypes "github.com/openshift/installer/pkg/types/vsphere" + ibmcloudapi "github.com/openshift/machine-api-provider-ibmcloud/pkg/apis" + ibmcloudprovider "github.com/openshift/machine-api-provider-ibmcloud/pkg/apis/ibmcloudprovider/v1" + mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" +) + +const ( + // workerMachineSetFileName is the format string for constructing the worker MachineSet filenames. + workerMachineSetFileName = "99_openshift-cluster-api_worker-machineset-%s.yaml" + + // workerMachineFileName is the format string for constructing the worker Machine filenames. + workerMachineFileName = "99_openshift-cluster-api_worker-machines-%s.yaml" + + // workerUserDataFileName is the filename used for the worker user-data secret. + workerUserDataFileName = "99_openshift-cluster-api_worker-user-data-secret.yaml" + + // decimalRootVolumeSize is the size in GB we use for some platforms. + // See below. + decimalRootVolumeSize = 120 + + // powerOfTwoRootVolumeSize is the size in GB we use for other platforms. + // The reasons for the specific choices between these two may boil down + // to which section of code the person adding a platform was copy-pasting from. + // https://github.com/openshift/openshift-docs/blob/main/modules/installation-requirements-user-infra.adoc#minimum-resource-requirements + powerOfTwoRootVolumeSize = 128 +) + +var ( + workerMachineSetFileNamePattern = fmt.Sprintf(workerMachineSetFileName, "*") + workerMachineFileNamePattern = fmt.Sprintf(workerMachineFileName, "*") + + _ asset.WritableAsset = (*Worker)(nil) +) + +func defaultAWSMachinePoolPlatform(poolName string) awstypes.MachinePool { + defaultEBSType := awstypes.VolumeTypeGp3 + + // gp3 is not offered in all local-zones locations used by Edge Pools. + // Once it is available, it can be used as default for all machine pools. + // https://aws.amazon.com/about-aws/global-infrastructure/localzones/features + if poolName == types.MachinePoolEdgeRoleName { + defaultEBSType = awstypes.VolumeTypeGp2 + } + return awstypes.MachinePool{ + EC2RootVolume: awstypes.EC2RootVolume{ + Type: defaultEBSType, + Size: decimalRootVolumeSize, + }, + } +} + +func defaultLibvirtMachinePoolPlatform() libvirttypes.MachinePool { + return libvirttypes.MachinePool{} +} + +func defaultAzureMachinePoolPlatform() azuretypes.MachinePool { + return azuretypes.MachinePool{ + OSDisk: azuretypes.OSDisk{ + DiskSizeGB: powerOfTwoRootVolumeSize, + DiskType: azuretypes.DefaultDiskType, + }, + } +} + +func defaultGCPMachinePoolPlatform(arch types.Architecture) gcptypes.MachinePool { + return gcptypes.MachinePool{ + InstanceType: icgcp.DefaultInstanceTypeForArch(arch), + OSDisk: gcptypes.OSDisk{ + DiskSizeGB: powerOfTwoRootVolumeSize, + DiskType: "pd-ssd", + }, + } +} + +func defaultIBMCloudMachinePoolPlatform() ibmcloudtypes.MachinePool { + return ibmcloudtypes.MachinePool{ + InstanceType: "bx2-4x16", + } +} + +func defaultOpenStackMachinePoolPlatform() openstacktypes.MachinePool { + return openstacktypes.MachinePool{ + Zones: []string{""}, + } +} + +func defaultBareMetalMachinePoolPlatform() baremetaltypes.MachinePool { + return baremetaltypes.MachinePool{} +} + +func defaultOvirtMachinePoolPlatform() ovirttypes.MachinePool { + return ovirttypes.MachinePool{ + CPU: &ovirttypes.CPU{ + Cores: 4, + Sockets: 1, + Threads: 1, + }, + MemoryMB: 16348, + OSDisk: &ovirttypes.Disk{ + SizeGB: decimalRootVolumeSize, + }, + VMType: ovirttypes.VMTypeServer, + AutoPinningPolicy: ovirttypes.AutoPinningNone, + } +} + +func defaultVSphereMachinePoolPlatform() vspheretypes.MachinePool { + return vspheretypes.MachinePool{ + NumCPUs: 4, + NumCoresPerSocket: 4, + MemoryMiB: 16384, + OSDisk: vspheretypes.OSDisk{ + DiskSizeGB: decimalRootVolumeSize, + }, + } +} + +func defaultPowerVSMachinePoolPlatform() powervstypes.MachinePool { + return powervstypes.MachinePool{ + MemoryGiB: 32, + Processors: intstr.FromString("0.5"), + ProcType: machinev1.PowerVSProcessorTypeShared, + SysType: "s922", + } + + sysTypes, err = powervstypes.AvailableSysTypes(ic.PowerVS.Region, ic.PowerVS.Zone) + if err == nil { + defaultMp.SysType = sysTypes[0] + } else { + logrus.Warnf("For given zone %v, AvailableSysTypes returns %v", ic.PowerVS.Zone, err) + } + + return defaultMp +} + +func defaultNutanixMachinePoolPlatform() nutanixtypes.MachinePool { + return nutanixtypes.MachinePool{ + NumCPUs: 4, + NumCoresPerSocket: 1, + MemoryMiB: 16384, + OSDisk: nutanixtypes.OSDisk{ + DiskSizeGiB: decimalRootVolumeSize, + }, + } +} + +// awsSetPreferredInstanceByEdgeZone discovers supported instanceType for each edge pool +// using the existing preferred instance list used by worker compute pool. +// Each machine set in the edge pool, created for each zone, can use different instance +// types depending on the instance offerings in the location (Local Zones). +func awsSetPreferredInstanceByEdgeZone(ctx context.Context, defaultTypes []string, meta *icaws.Metadata, zones icaws.Zones) (ok bool, err error) { + for zone := range zones { + preferredType, err := aws.PreferredInstanceType(ctx, meta, defaultTypes, []string{zone}) + if err != nil { + logrus.Warn(errors.Wrap(err, fmt.Sprintf("unable to select instanceType on the zone[%v] from the preferred list: %v. You must update the MachineSet manifest", zone, defaultTypes))) + continue + } + if _, ok := zones[zone]; !ok { + zones[zone] = &icaws.Zone{Name: zone} + } + zones[zone].PreferredInstanceType = preferredType + } + return true, nil +} + +// Worker generates the machinesets for `worker` machine pool. +type Worker struct { + UserDataFile *asset.File + MachineConfigFiles []*asset.File + MachineSetFiles []*asset.File + MachineFiles []*asset.File +} + +// Name returns a human friendly name for the Worker Asset. +func (w *Worker) Name() string { + return "Worker Machines" +} + +// Dependencies returns all of the dependencies directly needed by the +// Worker asset +func (w *Worker) Dependencies() []asset.Asset { + return []asset.Asset{ + &installconfig.ClusterID{}, + // PlatformCredsCheck just checks the creds (and asks, if needed) + // We do not actually use it in this asset directly, hence + // it is put in the dependencies but not fetched in Generate + &installconfig.PlatformCredsCheck{}, + &installconfig.InstallConfig{}, + new(rhcos.Image), + new(rhcos.Release), + &machine.Worker{}, + } +} + +// Generate generates the Worker asset. +func (w *Worker) Generate(dependencies asset.Parents) error { + ctx := context.TODO() + clusterID := &installconfig.ClusterID{} + installConfig := &installconfig.InstallConfig{} + rhcosImage := new(rhcos.Image) + rhcosRelease := new(rhcos.Release) + wign := &machine.Worker{} + dependencies.Get(clusterID, installConfig, rhcosImage, rhcosRelease, wign) + + workerUserDataSecretName := "worker-user-data" + + machines := []machinev1beta1.Machine{} + machineConfigs := []*mcfgv1.MachineConfig{} + machineSets := []runtime.Object{} + var err error + ic := installConfig.Config + for _, pool := range ic.Compute { + pool := pool // this makes golint happy... G601: Implicit memory aliasing in for loop. (gosec) + if pool.Hyperthreading == types.HyperthreadingDisabled { + ignHT, err := machineconfig.ForHyperthreadingDisabled("worker") + if err != nil { + return errors.Wrap(err, "failed to create ignition for hyperthreading disabled for worker machines") + } + machineConfigs = append(machineConfigs, ignHT) + } + if ic.SSHKey != "" { + ignSSH, err := machineconfig.ForAuthorizedKeys(ic.SSHKey, "worker") + if err != nil { + return errors.Wrap(err, "failed to create ignition for authorized SSH keys for worker machines") + } + machineConfigs = append(machineConfigs, ignSSH) + } + if ic.FIPS { + ignFIPS, err := machineconfig.ForFIPSEnabled("worker") + if err != nil { + return errors.Wrap(err, "failed to create ignition for FIPS enabled for worker machines") + } + machineConfigs = append(machineConfigs, ignFIPS) + } + if ic.Platform.Name() == powervstypes.Name { + // always enable multipath for powervs. + ignMultipath, err := machineconfig.ForMultipathEnabled("worker") + if err != nil { + return errors.Wrap(err, "failed to create ignition for multipath enabled for worker machines") + } + machineConfigs = append(machineConfigs, ignMultipath) + } + // The maximum number of networks supported on ServiceNetwork is two, one IPv4 and one IPv6 network. + // The cluster-network-operator handles the validation of this field. + // Reference: https://github.com/openshift/cluster-network-operator/blob/fc3e0e25b4cfa43e14122bdcdd6d7f2585017d75/pkg/network/cluster_config.go#L45-L52 + if ic.Networking != nil && len(ic.Networking.ServiceNetwork) == 2 && + (ic.Platform.Name() == openstacktypes.Name || ic.Platform.Name() == vspheretypes.Name) { + // Only configure kernel args for dual-stack clusters. + ignIPv6, err := machineconfig.ForDualStackAddresses("worker") + if err != nil { + return errors.Wrap(err, "failed to create ignition to configure IPv6 for worker machines") + } + machineConfigs = append(machineConfigs, ignIPv6) + } + + switch ic.Platform.Name() { + case alibabacloudtypes.Name: + client, err := installConfig.AlibabaCloud.Client() + if err != nil { + return err + } + vswitchMaps, err := installConfig.AlibabaCloud.VSwitchMaps() + if err != nil { + return errors.Wrap(err, "failed to get VSwitchs map") + } + + mpool := alibabacloudtypes.DefaultWorkerMachinePoolPlatform() + mpool.ImageID = string(*rhcosImage) + mpool.Set(ic.Platform.AlibabaCloud.DefaultMachinePlatform) + mpool.Set(pool.Platform.AlibabaCloud) + if len(mpool.Zones) == 0 { + if len(vswitchMaps) > 0 { + for zone := range vswitchMaps { + mpool.Zones = append(mpool.Zones, zone) + } + } else { + azs, err := client.GetAvailableZonesByInstanceType(mpool.InstanceType) + if err != nil || len(azs) == 0 { + return errors.Wrap(err, "failed to fetch availability zones") + } + mpool.Zones = azs + } + } + + pool.Platform.AlibabaCloud = &mpool + sets, err := alibabacloud.MachineSets( + clusterID.InfraID, + ic, + &pool, + "worker", + workerUserDataSecretName, + installConfig.Config.Platform.AlibabaCloud.Tags, + vswitchMaps, + ) + if err != nil { + return errors.Wrap(err, "failed to create worker machine objects") + } + for _, set := range sets { + machineSets = append(machineSets, set) + } + case awstypes.Name: + subnets := icaws.Subnets{} + zones := icaws.Zones{} + if len(ic.Platform.AWS.Subnets) > 0 { + var subnetsMeta icaws.Subnets + switch pool.Name { + case types.MachinePoolEdgeRoleName: + subnetsMeta, err = installConfig.AWS.EdgeSubnets(ctx) + if err != nil { + return err + } + default: + subnetsMeta, err = installConfig.AWS.PrivateSubnets(ctx) + if err != nil { + return err + } + } + for _, subnet := range subnetsMeta { + subnets[subnet.Zone.Name] = subnet + } + } + mpool := defaultAWSMachinePoolPlatform(pool.Name) + + osImage := strings.SplitN(string(*rhcosImage), ",", 2) + osImageID := osImage[0] + if len(osImage) == 2 { + osImageID = "" // the AMI will be generated later on + } + mpool.AMIID = osImageID + + mpool.Set(ic.Platform.AWS.DefaultMachinePlatform) + mpool.Set(pool.Platform.AWS) + zoneDefaults := false + if len(mpool.Zones) == 0 { + if len(subnets) > 0 { + for _, subnet := range subnets { + if subnet.Zone == nil { + return errors.Wrapf(err, "failed to find zone attributes for subnet %s", subnet.ID) + } + mpool.Zones = append(mpool.Zones, subnet.Zone.Name) + zones[subnet.Zone.Name] = subnets[subnet.Zone.Name].Zone + } + } else { + mpool.Zones, err = installConfig.AWS.AvailabilityZones(ctx) + if err != nil { + return err + } + zoneDefaults = true + } + } + + // Requirements when using edge compute pools to populate machine sets. + if pool.Name == types.MachinePoolEdgeRoleName { + err = installConfig.AWS.SetZoneAttributes(ctx, mpool.Zones, zones) + if err != nil { + return errors.Wrap(err, "failed to retrieve zone attributes for edge compute pool") + } + + if pool.Replicas == nil || *pool.Replicas == 0 { + pool.Replicas = pointer.Int64(int64(len(mpool.Zones))) + } + } + + if mpool.InstanceType == "" { + instanceTypes := awsdefaults.InstanceTypes(installConfig.Config.Platform.AWS.Region, installConfig.Config.ControlPlane.Architecture, configv1.HighlyAvailableTopologyMode) + switch pool.Name { + case types.MachinePoolEdgeRoleName: + ok, err := awsSetPreferredInstanceByEdgeZone(ctx, instanceTypes, installConfig.AWS, zones) + if err != nil { + return errors.Wrap(err, "failed to find default instance type for edge pool, you must define on the compute pool") + } + if !ok { + logrus.Warn(errors.Wrap(err, "failed to find preferred instance type for edge pool, using default")) + mpool.InstanceType = instanceTypes[0] + } + default: + mpool.InstanceType, err = aws.PreferredInstanceType(ctx, installConfig.AWS, instanceTypes, mpool.Zones) + if err != nil { + logrus.Warn(errors.Wrapf(err, "failed to find default instance type for %s pool", pool.Name)) + mpool.InstanceType = instanceTypes[0] + } + } + } + // if the list of zones is the default we need to try to filter the list in case there are some zones where the instance might not be available + if zoneDefaults { + mpool.Zones, err = aws.FilterZonesBasedOnInstanceType(ctx, installConfig.AWS, mpool.InstanceType, mpool.Zones) + if err != nil { + logrus.Warn(errors.Wrap(err, "failed to filter zone list")) + } + } + + pool.Platform.AWS = &mpool + sets, err := aws.MachineSets(&aws.MachineSetInput{ + ClusterID: clusterID.InfraID, + InstallConfigPlatformAWS: installConfig.Config.Platform.AWS, + Subnets: subnets, + Zones: zones, + Pool: &pool, + Role: pool.Name, + UserDataSecret: workerUserDataSecretName, + }) + if err != nil { + return errors.Wrap(err, "failed to create worker machine objects") + } + for _, set := range sets { + machineSets = append(machineSets, set) + } + case azuretypes.Name: + mpool := defaultAzureMachinePoolPlatform() + mpool.InstanceType = azuredefaults.ComputeInstanceType( + installConfig.Config.Platform.Azure.CloudName, + installConfig.Config.Platform.Azure.Region, + pool.Architecture, + ) + mpool.Set(ic.Platform.Azure.DefaultMachinePlatform) + mpool.Set(pool.Platform.Azure) + + session, err := installConfig.Azure.Session() + if err != nil { + return errors.Wrap(err, "failed to fetch session") + } + + client := icazure.NewClient(session) + if len(mpool.Zones) == 0 { + azs, err := client.GetAvailabilityZones(context.TODO(), ic.Platform.Azure.Region, mpool.InstanceType) + if err != nil { + return errors.Wrap(err, "failed to fetch availability zones") + } + mpool.Zones = azs + if len(azs) == 0 { + // if no azs are given we set to []string{""} for convenience over later operations. + // It means no-zoned for the machine API + mpool.Zones = []string{""} + } + } + + if mpool.OSImage.Publisher != "" { + img, ierr := client.GetMarketplaceImage(context.TODO(), ic.Platform.Azure.Region, mpool.OSImage.Publisher, mpool.OSImage.Offer, mpool.OSImage.SKU, mpool.OSImage.Version) + if ierr != nil { + return fmt.Errorf("failed to fetch marketplace image: %w", ierr) + } + // Publisher is case-sensitive and matched against exactly. Also + // the Plan's publisher might not be exactly the same as the + // Image's publisher + if img.Plan != nil && img.Plan.Publisher != nil { + mpool.OSImage.Publisher = *img.Plan.Publisher + } + } + pool.Platform.Azure = &mpool + + capabilities, err := client.GetVMCapabilities(context.TODO(), mpool.InstanceType, installConfig.Config.Platform.Azure.Region) + if err != nil { + return err + } + + useImageGallery := ic.Platform.Azure.CloudName != azuretypes.StackCloud + sets, err := azure.MachineSets(clusterID.InfraID, ic, &pool, string(*rhcosImage), "worker", workerUserDataSecretName, capabilities, useImageGallery) + if err != nil { + return errors.Wrap(err, "failed to create worker machine objects") + } + for _, set := range sets { + machineSets = append(machineSets, set) + } + case baremetaltypes.Name: + mpool := defaultBareMetalMachinePoolPlatform() + mpool.Set(ic.Platform.BareMetal.DefaultMachinePlatform) + mpool.Set(pool.Platform.BareMetal) + pool.Platform.BareMetal = &mpool + + // Use managed user data secret, since images used by MachineSet + // are always up to date + workerUserDataSecretName = "worker-user-data-managed" + sets, err := baremetal.MachineSets(clusterID.InfraID, ic, &pool, "", "worker", workerUserDataSecretName) + if err != nil { + return errors.Wrap(err, "failed to create worker machine objects") + } + for _, set := range sets { + machineSets = append(machineSets, set) + } + case gcptypes.Name: + mpool := defaultGCPMachinePoolPlatform(pool.Architecture) + mpool.Set(ic.Platform.GCP.DefaultMachinePlatform) + mpool.Set(pool.Platform.GCP) + if len(mpool.Zones) == 0 { + azs, err := gcp.ZonesForInstanceType(ic.Platform.GCP.ProjectID, ic.Platform.GCP.Region, mpool.InstanceType) + if err != nil { + return errors.Wrap(err, "failed to fetch availability zones") + } + mpool.Zones = azs + } + pool.Platform.GCP = &mpool + sets, err := gcp.MachineSets(clusterID.InfraID, ic, &pool, string(*rhcosImage), "worker", workerUserDataSecretName) + if err != nil { + return errors.Wrap(err, "failed to create worker machine objects") + } + for _, set := range sets { + machineSets = append(machineSets, set) + } + case ibmcloudtypes.Name: + subnets := map[string]string{} + if len(ic.Platform.IBMCloud.ComputeSubnets) > 0 { + subnetMetas, err := installConfig.IBMCloud.ComputeSubnets(ctx) + if err != nil { + return err + } + for _, subnet := range subnetMetas { + subnets[subnet.Zone] = subnet.Name + } + } + mpool := defaultIBMCloudMachinePoolPlatform() + mpool.Set(ic.Platform.IBMCloud.DefaultMachinePlatform) + mpool.Set(pool.Platform.IBMCloud) + if len(mpool.Zones) == 0 { + azs, err := ibmcloud.AvailabilityZones(ic.Platform.IBMCloud.Region) + if err != nil { + return errors.Wrap(err, "failed to fetch availability zones") + } + mpool.Zones = azs + } + pool.Platform.IBMCloud = &mpool + sets, err := ibmcloud.MachineSets(clusterID.InfraID, ic, subnets, &pool, "worker", workerUserDataSecretName) + if err != nil { + return errors.Wrap(err, "failed to create worker machine objects") + } + for _, set := range sets { + machineSets = append(machineSets, set) + } + case libvirttypes.Name: + mpool := defaultLibvirtMachinePoolPlatform() + mpool.Set(ic.Platform.Libvirt.DefaultMachinePlatform) + mpool.Set(pool.Platform.Libvirt) + pool.Platform.Libvirt = &mpool + sets, err := libvirt.MachineSets(clusterID.InfraID, ic, &pool, "worker", workerUserDataSecretName) + if err != nil { + return errors.Wrap(err, "failed to create worker machine objects") + } + for _, set := range sets { + machineSets = append(machineSets, set) + } + case openstacktypes.Name: + mpool := defaultOpenStackMachinePoolPlatform() + mpool.Set(ic.Platform.OpenStack.DefaultMachinePlatform) + mpool.Set(pool.Platform.OpenStack) + pool.Platform.OpenStack = &mpool + + imageName, _ := rhcosutils.GenerateOpenStackImageName(string(*rhcosImage), clusterID.InfraID) + + sets, err := openstack.MachineSets(clusterID.InfraID, ic, &pool, imageName, "worker", workerUserDataSecretName, nil) + if err != nil { + return errors.Wrap(err, "failed to create worker machine objects") + } + for _, set := range sets { + machineSets = append(machineSets, set) + } + case vspheretypes.Name: + mpool := defaultVSphereMachinePoolPlatform() + mpool.Set(ic.Platform.VSphere.DefaultMachinePlatform) + mpool.Set(pool.Platform.VSphere) + pool.Platform.VSphere = &mpool + templateName := clusterID.InfraID + "-rhcos" + + sets, err := vsphere.MachineSets(clusterID.InfraID, ic, &pool, templateName, "worker", workerUserDataSecretName) + if err != nil { + return errors.Wrap(err, "failed to create worker machine objects") + } + for _, set := range sets { + machineSets = append(machineSets, set) + } + + // If static IPs are configured, we must generate worker machines and scale the machinesets to 0. + if ic.Platform.VSphere.Hosts != nil { + logrus.Debug("Generating worker machines with static IPs.") + templateName := clusterID.InfraID + "-rhcos" + + machines, err = vsphere.Machines(clusterID.InfraID, ic, &pool, templateName, "worker", workerUserDataSecretName) + if err != nil { + return errors.Wrap(err, "failed to create worker machine objects") + } + logrus.Debugf("Generated %v worker machines.", len(machines)) + + for _, ms := range sets { + ms.Spec.Replicas = pointer.Int32(0) + } + } + case ovirttypes.Name: + mpool := defaultOvirtMachinePoolPlatform() + mpool.Set(ic.Platform.Ovirt.DefaultMachinePlatform) + mpool.Set(pool.Platform.Ovirt) + pool.Platform.Ovirt = &mpool + + imageName, _ := rhcosutils.GenerateOpenStackImageName(string(*rhcosImage), clusterID.InfraID) + + sets, err := ovirt.MachineSets(clusterID.InfraID, ic, &pool, imageName, "worker", workerUserDataSecretName) + if err != nil { + return errors.Wrap(err, "failed to create worker machine objects for ovirt provider") + } + for _, set := range sets { + machineSets = append(machineSets, set) + } + case powervstypes.Name: + mpool := defaultPowerVSMachinePoolPlatform() + mpool.Set(ic.Platform.PowerVS.DefaultMachinePlatform) + mpool.Set(pool.Platform.PowerVS) + pool.Platform.PowerVS = &mpool + sets, err := powervs.MachineSets(clusterID.InfraID, ic, &pool, "worker", "worker-user-data") + if err != nil { + return errors.Wrap(err, "failed to create worker machine objects for powervs provider") + } + for _, set := range sets { + machineSets = append(machineSets, set) + } + case externaltypes.Name, nonetypes.Name: + case nutanixtypes.Name: + mpool := defaultNutanixMachinePoolPlatform() + mpool.Set(ic.Platform.Nutanix.DefaultMachinePlatform) + mpool.Set(pool.Platform.Nutanix) + if err = mpool.ValidateConfig(ic.Platform.Nutanix); err != nil { + return errors.Wrap(err, "failed to create master machine objects") + } + pool.Platform.Nutanix = &mpool + imageName := nutanixtypes.RHCOSImageName(clusterID.InfraID) + + sets, err := nutanix.MachineSets(clusterID.InfraID, ic, &pool, imageName, "worker", workerUserDataSecretName) + if err != nil { + return errors.Wrap(err, "failed to create worker machine objects") + } + for _, set := range sets { + machineSets = append(machineSets, set) + } + default: + return fmt.Errorf("invalid Platform") + } + } + + data, err := userDataSecret(workerUserDataSecretName, wign.File.Data) + if err != nil { + return errors.Wrap(err, "failed to create user-data secret for worker machines") + } + w.UserDataFile = &asset.File{ + Filename: filepath.Join(directory, workerUserDataFileName), + Data: data, + } + + w.MachineConfigFiles, err = machineconfig.Manifests(machineConfigs, "worker", directory) + if err != nil { + return errors.Wrap(err, "failed to create MachineConfig manifests for worker machines") + } + + w.MachineSetFiles = make([]*asset.File, len(machineSets)) + padFormat := fmt.Sprintf("%%0%dd", len(fmt.Sprintf("%d", len(machineSets)))) + for i, machineSet := range machineSets { + data, err := yaml.Marshal(machineSet) + if err != nil { + return errors.Wrapf(err, "marshal worker %d", i) + } + + padded := fmt.Sprintf(padFormat, i) + w.MachineSetFiles[i] = &asset.File{ + Filename: filepath.Join(directory, fmt.Sprintf(workerMachineSetFileName, padded)), + Data: data, + } + } + + w.MachineFiles = make([]*asset.File, len(machines)) + for i, machineDef := range machines { + data, err := yaml.Marshal(machineDef) + if err != nil { + return errors.Wrapf(err, "marshal master %d", i) + } + + padded := fmt.Sprintf(padFormat, i) + w.MachineFiles[i] = &asset.File{ + Filename: filepath.Join(directory, fmt.Sprintf(workerMachineFileName, padded)), + Data: data, + } + } + return nil +} + +// Files returns the files generated by the asset. +func (w *Worker) Files() []*asset.File { + files := make([]*asset.File, 0, 1+len(w.MachineConfigFiles)+len(w.MachineSetFiles)) + if w.UserDataFile != nil { + files = append(files, w.UserDataFile) + } + files = append(files, w.MachineConfigFiles...) + files = append(files, w.MachineSetFiles...) + files = append(files, w.MachineFiles...) + return files +} + +// Load reads the asset files from disk. +func (w *Worker) Load(f asset.FileFetcher) (found bool, err error) { + file, err := f.FetchByName(filepath.Join(directory, workerUserDataFileName)) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + w.UserDataFile = file + + w.MachineConfigFiles, err = machineconfig.Load(f, "worker", directory) + if err != nil { + return true, err + } + + fileList, err := f.FetchByPattern(filepath.Join(directory, workerMachineSetFileNamePattern)) + if err != nil { + return true, err + } + + w.MachineSetFiles = fileList + + fileList, err = f.FetchByPattern(filepath.Join(directory, workerMachineFileNamePattern)) + if err != nil { + return true, err + } + w.MachineFiles = fileList + + return true, nil +} + +// MachineSets returns MachineSet manifest structures. +func (w *Worker) MachineSets() ([]machinev1beta1.MachineSet, error) { + scheme := runtime.NewScheme() + baremetalapi.AddToScheme(scheme) + ibmcloudapi.AddToScheme(scheme) + libvirtapi.AddToScheme(scheme) + ovirtproviderapi.AddToScheme(scheme) + scheme.AddKnownTypes(machinev1alpha1.GroupVersion, + &machinev1alpha1.OpenstackProviderSpec{}, + ) + scheme.AddKnownTypes(machinev1beta1.SchemeGroupVersion, + &machinev1beta1.AWSMachineProviderConfig{}, + &machinev1beta1.VSphereMachineProviderSpec{}, + &machinev1beta1.AzureMachineProviderSpec{}, + &machinev1beta1.GCPMachineProviderSpec{}, + ) + machinev1.Install(scheme) + scheme.AddKnownTypes(machinev1.GroupVersion, + &machinev1.AlibabaCloudMachineProviderConfig{}, + &machinev1.NutanixMachineProviderConfig{}, + &machinev1.PowerVSMachineProviderConfig{}, + ) + machinev1beta1.AddToScheme(scheme) + decoder := serializer.NewCodecFactory(scheme).UniversalDecoder( + baremetalprovider.SchemeGroupVersion, + ibmcloudprovider.SchemeGroupVersion, + libvirtprovider.SchemeGroupVersion, + machinev1.GroupVersion, + machinev1alpha1.GroupVersion, + ovirtprovider.SchemeGroupVersion, + machinev1beta1.SchemeGroupVersion, + ) + + machineSets := []machinev1beta1.MachineSet{} + for i, file := range w.MachineSetFiles { + machineSet := &machinev1beta1.MachineSet{} + err := yaml.Unmarshal(file.Data, &machineSet) + if err != nil { + return machineSets, errors.Wrapf(err, "unmarshal worker %d", i) + } + + obj, _, err := decoder.Decode(machineSet.Spec.Template.Spec.ProviderSpec.Value.Raw, nil, nil) + if err != nil { + return machineSets, errors.Wrapf(err, "unmarshal worker %d", i) + } + + machineSet.Spec.Template.Spec.ProviderSpec.Value = &runtime.RawExtension{Object: obj} + machineSets = append(machineSets, *machineSet) + } + + return machineSets, nil +} diff --git a/.history/pkg/types/powervs/powervs_regions_20241219120242.go b/.history/pkg/types/powervs/powervs_regions_20241219120242.go new file mode 100644 index 00000000000..501717489f6 --- /dev/null +++ b/.history/pkg/types/powervs/powervs_regions_20241219120242.go @@ -0,0 +1,324 @@ +package powervs + +import ( + "fmt" +) + +// Since there is no API to query these, we have to hard-code them here. + +// Region describes resources associated with a region in Power VS. +// We're using a few items from the IBM Cloud VPC offering. The region names +// for VPC are different so another function of this is to correlate those. +type Region struct { + Description string + VPCRegion string +<<<<<<< HEAD + Zones []string +======= + COSRegion string + Zones map[string]Zone + VPCZones []string +>>>>>>> 07c96aff2b (Create region-zone-sysType hierarchy) +} + +// Zone holds the sysTypes for a zone in a IBM Power VS region. +type Zone struct { + SysTypes []string +} + +// Regions holds the regions for IBM Power VS, and descriptions used during the survey. +var Regions = map[string]Region{ + "dal": { + Description: "Dallas, USA", + VPCRegion: "us-south", +<<<<<<< HEAD + Zones: []string{ + "dal10", + "dal12", + }, +======= + COSRegion: "us-south", + Zones: map[string]Zone{ + "dal10": { + SysTypes: []string{"s922", "s1022", "e980", "e1080"}, + }, + "dal12": { + SysTypes: []string{"s922", "e980"}, + }, + }, + VPCZones: []string{"us-south-1", "us-south-2", "us-south-3"}, +>>>>>>> 07c96aff2b (Create region-zone-sysType hierarchy) + }, + "eu-de": { + Description: "Frankfurt, Germany", + VPCRegion: "eu-de", +<<<<<<< HEAD + Zones: []string{ + "eu-de-1", + "eu-de-2", + }, +======= + COSRegion: "eu-de", + Zones: map[string]Zone{ + "eu-de-1": { + SysTypes: []string{"s922", "s1022", "e980"}, + }, + "eu-de-2": { + SysTypes: []string{"s922", "e980"}, + }, + }, + VPCZones: []string{"eu-de-1", "eu-de-2", "eu-de-3"}, +>>>>>>> 07c96aff2b (Create region-zone-sysType hierarchy) + }, + "lon": { + Description: "London, UK.", + VPCRegion: "eu-gb", +<<<<<<< HEAD + Zones: []string{ + "lon04", + "lon06", + }, + }, + "mon": { + Description: "Montreal, Canada", + VPCRegion: "ca-tor", + Zones: []string{"mon01"}, +======= + COSRegion: "eu-gb", + Zones: map[string]Zone{ + "lon06": { + SysTypes: []string{"s922", "e980"}, + }, + }, + VPCZones: []string{"eu-gb-1", "eu-gb-2", "eu-gb-3"}, + }, + "mad": { + Description: "Madrid, Spain", + VPCRegion: "eu-es", + COSRegion: "eu-de", // @HACK - PowerVS says COS not supported in this region + Zones: map[string]Zone{ + "mad02": { + SysTypes: []string{"s922", "s1022", "e980"}, + }, + "mad04": { + SysTypes: []string{"s1022", "e980", "e1080"}, + }, + }, + VPCZones: []string{"eu-es-1", "eu-es-2"}, +>>>>>>> 07c96aff2b (Create region-zone-sysType hierarchy) + }, + "osa": { + Description: "Osaka, Japan", + VPCRegion: "jp-osa", +<<<<<<< HEAD + Zones: []string{"osa21"}, +======= + COSRegion: "jp-osa", + Zones: map[string]Zone{ + "osa21": { + SysTypes: []string{"s922", "s1022", "e980"}, + }, + }, + VPCZones: []string{"jp-osa-1", "jp-osa-2", "jp-osa-3"}, + }, + "sao": { + Description: "São Paulo, Brazil", + VPCRegion: "br-sao", + COSRegion: "br-sao", + Zones: map[string]Zone{ + "sao01": { + SysTypes: []string{"s922", "e980"}, + }, + "sao04": { + SysTypes: []string{"s922", "e980"}, + }, + }, + VPCZones: []string{"br-sao-1", "br-sao-2", "br-sao-3"}, +>>>>>>> 07c96aff2b (Create region-zone-sysType hierarchy) + }, + "syd": { + Description: "Sydney, Australia", + VPCRegion: "au-syd", +<<<<<<< HEAD + Zones: []string{ + "syd04", + "syd05", + }, +======= + COSRegion: "au-syd", + Zones: map[string]Zone{ + "syd04": { + SysTypes: []string{"s922", "e980"}, + }, + }, + VPCZones: []string{"au-syd-1", "au-syd-2", "au-syd-3"}, +>>>>>>> 07c96aff2b (Create region-zone-sysType hierarchy) + }, + "sao": { + Description: "São Paulo, Brazil", + VPCRegion: "br-sao", + Zones: []string{"sao01"}, + }, + "tor": { + Description: "Toronto, Canada", + VPCRegion: "ca-tor", + Zones: []string{"tor01"}, + }, + "tok": { + Description: "Tokyo, Japan", + VPCRegion: "jp-tok", + Zones: []string{"tok04"}, + }, + "us-east": { + Description: "Washington DC, USA", + VPCRegion: "us-east", +<<<<<<< HEAD + Zones: []string{"us-east"}, +======= + COSRegion: "us-east", + Zones: map[string]Zone{ + "wdc06": { + SysTypes: []string{"s922", "e980"}, + }, + "wdc07": { + SysTypes: []string{"s922", "s1022", "e980", "e1080"}, + }, + }, + VPCZones: []string{"us-east-1", "us-east-2", "us-east-3"}, +>>>>>>> 07c96aff2b (Create region-zone-sysType hierarchy) + }, +} + +// VPCRegionForPowerVSRegion returns the VPC region for the specified PowerVS region. +func VPCRegionForPowerVSRegion(region string) (string, error) { + if r, ok := Regions[region]; ok { + return r.VPCRegion, nil + } + + return "", fmt.Errorf("VPC region corresponding to a PowerVS region %s not found ", region) +} + +// RegionShortNames returns the list of region names +func RegionShortNames() []string { + keys := make([]string, len(Regions)) + i := 0 + for r := range Regions { + keys[i] = r + i++ + } + return keys +} + +// ValidateVPCRegion validates that given VPC region is known/tested. +func ValidateVPCRegion(region string) bool { + found := false + for r := range Regions { + if region == Regions[r].VPCRegion { + found = true + break + } + } + return found +} + +// ValidateZone validates that the given zone is known/tested. +func ValidateZone(zone string) bool { + for r := range Regions { + for z := range Regions[r].Zones { + if zone == z { + return true + } + } + } + return false +} + +// ZoneNames returns the list of zone names. +func ZoneNames() []string { + zones := []string{} + for r := range Regions { + for z := range Regions[r].Zones { + zones = append(zones, z) + } + } + return zones +} + +// RegionFromZone returns the region name for a given zone name. +func RegionFromZone(zone string) string { + for r := range Regions { + for z := range Regions[r].Zones { + if zone == z { + return r + } + } + } + return "" +} +<<<<<<< HEAD +======= + +// AvailableSysTypes returns the default system type for the zone. +func AvailableSysTypes(region string, zone string) ([]string, error) { + knownRegion, ok := Regions[region] + if !ok { + return nil, fmt.Errorf("unknown region name provided") + } + var knownZone Zone + knownZone, ok = knownRegion.Zones[zone] + if !ok { + return nil, fmt.Errorf("unknown zone name provided") + } + return knownZone.SysTypes, nil +} + +// AllKnownSysTypes returns aggregated known system types from all regions. +func AllKnownSysTypes() sets.Set[string] { + sysTypes := sets.New[string]() + for region := range Regions { + for _, zones := range Regions[region].Zones { + sysTypes.Insert(zones.SysTypes...) + } + } + return sysTypes +} + +// AvailableVPCZones returns the known VPC zones for a specified region. +func AvailableVPCZones(region string) ([]string, error) { + knownRegion, ok := Regions[region] + if !ok { + return nil, fmt.Errorf("unknown region name provided") + } + return knownRegion.VPCZones, nil +} + +// COSRegionForVPCRegion returns the corresponding COS region for the given VPC region. +func COSRegionForVPCRegion(vpcRegion string) (string, error) { + for r := range Regions { + if vpcRegion == Regions[r].VPCRegion { + return Regions[r].COSRegion, nil + } + } + + return "", fmt.Errorf("COS region corresponding to a VPC region %s not found ", vpcRegion) +} + +// COSRegionForPowerVSRegion returns the IBM COS region for the specified PowerVS region. +func COSRegionForPowerVSRegion(region string) (string, error) { + if r, ok := Regions[region]; ok { + return r.COSRegion, nil + } + + return "", fmt.Errorf("COS region corresponding to a PowerVS region %s not found ", region) +} + +// ValidateCOSRegion validates that given COS region is known/tested. +func ValidateCOSRegion(region string) bool { + for r := range Regions { + if region == Regions[r].COSRegion { + return true + } + } + return false +} +>>>>>>> 07c96aff2b (Create region-zone-sysType hierarchy) diff --git a/.history/pkg/types/powervs/powervs_regions_20241219120532.go b/.history/pkg/types/powervs/powervs_regions_20241219120532.go new file mode 100644 index 00000000000..63fe809bdd6 --- /dev/null +++ b/.history/pkg/types/powervs/powervs_regions_20241219120532.go @@ -0,0 +1,261 @@ +package powervs + +import ( + "fmt" +) + +// Since there is no API to query these, we have to hard-code them here. + +// Region describes resources associated with a region in Power VS. +// We're using a few items from the IBM Cloud VPC offering. The region names +// for VPC are different so another function of this is to correlate those. +type Region struct { + Description string + VPCRegion string + COSRegion string + Zones map[string]Zone + VPCZones []string +} + +// Zone holds the sysTypes for a zone in a IBM Power VS region. +type Zone struct { + SysTypes []string +} + +// Regions holds the regions for IBM Power VS, and descriptions used during the survey. +var Regions = map[string]Region{ + "dal": { + Description: "Dallas, USA", + VPCRegion: "us-south", + COSRegion: "us-south", + Zones: map[string]Zone{ + "dal10": { + SysTypes: []string{"s922", "s1022", "e980", "e1080"}, + }, + "dal12": { + SysTypes: []string{"s922", "e980"}, + }, + }, + VPCZones: []string{"us-south-1", "us-south-2", "us-south-3"}, + }, + "eu-de": { + Description: "Frankfurt, Germany", + VPCRegion: "eu-de", + COSRegion: "eu-de", + Zones: map[string]Zone{ + "eu-de-1": { + SysTypes: []string{"s922", "s1022", "e980"}, + }, + "eu-de-2": { + SysTypes: []string{"s922", "e980"}, + }, + }, + VPCZones: []string{"eu-de-1", "eu-de-2", "eu-de-3"}, + }, + "lon": { + Description: "London, UK.", + VPCRegion: "eu-gb", + COSRegion: "eu-gb", + Zones: map[string]Zone{ + "lon06": { + SysTypes: []string{"s922", "e980"}, + }, + }, + VPCZones: []string{"eu-gb-1", "eu-gb-2", "eu-gb-3"}, + }, + "mad": { + Description: "Madrid, Spain", + VPCRegion: "eu-es", + COSRegion: "eu-de", // @HACK - PowerVS says COS not supported in this region + Zones: map[string]Zone{ + "mad02": { + SysTypes: []string{"s922", "s1022", "e980"}, + }, + "mad04": { + SysTypes: []string{"s1022", "e980", "e1080"}, + }, + }, + VPCZones: []string{"eu-es-1", "eu-es-2"}, + }, + "osa": { + Description: "Osaka, Japan", + VPCRegion: "jp-osa", + COSRegion: "jp-osa", + Zones: map[string]Zone{ + "osa21": { + SysTypes: []string{"s922", "s1022", "e980"}, + }, + }, + VPCZones: []string{"jp-osa-1", "jp-osa-2", "jp-osa-3"}, + }, + "sao": { + Description: "São Paulo, Brazil", + VPCRegion: "br-sao", + COSRegion: "br-sao", + Zones: map[string]Zone{ + "sao01": { + SysTypes: []string{"s922", "e980"}, + }, + "sao04": { + SysTypes: []string{"s922", "e980"}, + }, + }, + VPCZones: []string{"br-sao-1", "br-sao-2", "br-sao-3"}, + }, + "syd": { + Description: "Sydney, Australia", + VPCRegion: "au-syd", + COSRegion: "au-syd", + Zones: map[string]Zone{ + "syd04": { + SysTypes: []string{"s922", "e980"}, + }, + }, + VPCZones: []string{"au-syd-1", "au-syd-2", "au-syd-3"}, + }, + "wdc": { + Description: "Washington DC, USA", + VPCRegion: "us-east", + COSRegion: "us-east", + Zones: map[string]Zone{ + "wdc06": { + SysTypes: []string{"s922", "e980"}, + }, + "wdc07": { + SysTypes: []string{"s922", "s1022", "e980", "e1080"}, + }, + }, + VPCZones: []string{"us-east-1", "us-east-2", "us-east-3"}, + }, +} + +// VPCRegionForPowerVSRegion returns the VPC region for the specified PowerVS region. +func VPCRegionForPowerVSRegion(region string) (string, error) { + if r, ok := Regions[region]; ok { + return r.VPCRegion, nil + } + + return "", fmt.Errorf("VPC region corresponding to a PowerVS region %s not found ", region) +} + +// RegionShortNames returns the list of region names +func RegionShortNames() []string { + keys := make([]string, len(Regions)) + i := 0 + for r := range Regions { + keys[i] = r + i++ + } + return keys +} + +// ValidateVPCRegion validates that given VPC region is known/tested. +func ValidateVPCRegion(region string) bool { + found := false + for r := range Regions { + if region == Regions[r].VPCRegion { + found = true + break + } + } + return found +} + +// ValidateZone validates that the given zone is known/tested. +func ValidateZone(zone string) bool { + for r := range Regions { + for z := range Regions[r].Zones { + if zone == z { + return true + } + } + } + return false +} + +// ZoneNames returns the list of zone names. +func ZoneNames() []string { + zones := []string{} + for r := range Regions { + for z := range Regions[r].Zones { + zones = append(zones, z) + } + } + return zones +} + +// RegionFromZone returns the region name for a given zone name. +func RegionFromZone(zone string) string { + for r := range Regions { + for z := range Regions[r].Zones { + if zone == z { + return r + } + } + } + return "" +} + +// AvailableSysTypes returns the default system type for the zone. +func AvailableSysTypes(region string, zone string) ([]string, error) { + knownRegion, ok := Regions[region] + if !ok { + return nil, fmt.Errorf("unknown region name provided") + } + var knownZone Zone + knownZone, ok = knownRegion.Zones[zone] + if !ok { + return nil, fmt.Errorf("unknown zone name provided") + } + return knownZone.SysTypes, nil +} + +// AllKnownSysTypes returns aggregated known system types from all regions. +func AllKnownSysTypes() sets.Set[string] { + sysTypes := sets.New[string]() + for region := range Regions { + for _, zones := range Regions[region].Zones { + sysTypes.Insert(zones.SysTypes...) + } + } + return sysTypes +} + +// AvailableVPCZones returns the known VPC zones for a specified region. +func AvailableVPCZones(region string) ([]string, error) { + knownRegion, ok := Regions[region] + if !ok { + return nil, fmt.Errorf("unknown region name provided") + } + return knownRegion.VPCZones, nil +} + +// COSRegionForVPCRegion returns the corresponding COS region for the given VPC region. +func COSRegionForVPCRegion(vpcRegion string) (string, error) { + for r := range Regions { + if vpcRegion == Regions[r].VPCRegion { + return Regions[r].COSRegion, nil + } + } + + return "", fmt.Errorf("COS region corresponding to a VPC region %s not found ", vpcRegion) +} + +// COSRegionForPowerVSRegion returns the IBM COS region for the specified PowerVS region. +func COSRegionForPowerVSRegion(region string) (string, error) { + if r, ok := Regions[region]; ok { + return r.COSRegion, nil + } + + return "", fmt.Errorf("COS region corresponding to a PowerVS region %s not found ", region) +} + +// ValidateCOSRegion validates that given COS region is known/tested. +func ValidateCOSRegion(region string) bool { + for r := range Regions { + if region == Regions[r].COSRegion { + return true + } + } + return false +} diff --git a/.history/pkg/types/powervs/powervs_regions_20241219162541.go b/.history/pkg/types/powervs/powervs_regions_20241219162541.go new file mode 100644 index 00000000000..d8bc1ac5562 --- /dev/null +++ b/.history/pkg/types/powervs/powervs_regions_20241219162541.go @@ -0,0 +1,263 @@ +package powervs + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// Since there is no API to query these, we have to hard-code them here. + +// Region describes resources associated with a region in Power VS. +// We're using a few items from the IBM Cloud VPC offering. The region names +// for VPC are different so another function of this is to correlate those. +type Region struct { + Description string + VPCRegion string + COSRegion string + Zones map[string]Zone + VPCZones []string +} + +// Zone holds the sysTypes for a zone in a IBM Power VS region. +type Zone struct { + SysTypes []string +} + +// Regions holds the regions for IBM Power VS, and descriptions used during the survey. +var Regions = map[string]Region{ + "dal": { + Description: "Dallas, USA", + VPCRegion: "us-south", + COSRegion: "us-south", + Zones: map[string]Zone{ + "dal10": { + SysTypes: []string{"s922", "s1022", "e980", "e1080"}, + }, + "dal12": { + SysTypes: []string{"s922", "e980"}, + }, + }, + VPCZones: []string{"us-south-1", "us-south-2", "us-south-3"}, + }, + "eu-de": { + Description: "Frankfurt, Germany", + VPCRegion: "eu-de", + COSRegion: "eu-de", + Zones: map[string]Zone{ + "eu-de-1": { + SysTypes: []string{"s922", "s1022", "e980"}, + }, + "eu-de-2": { + SysTypes: []string{"s922", "e980"}, + }, + }, + VPCZones: []string{"eu-de-1", "eu-de-2", "eu-de-3"}, + }, + "lon": { + Description: "London, UK.", + VPCRegion: "eu-gb", + COSRegion: "eu-gb", + Zones: map[string]Zone{ + "lon06": { + SysTypes: []string{"s922", "e980"}, + }, + }, + VPCZones: []string{"eu-gb-1", "eu-gb-2", "eu-gb-3"}, + }, + "mad": { + Description: "Madrid, Spain", + VPCRegion: "eu-es", + COSRegion: "eu-de", // @HACK - PowerVS says COS not supported in this region + Zones: map[string]Zone{ + "mad02": { + SysTypes: []string{"s922", "s1022", "e980"}, + }, + "mad04": { + SysTypes: []string{"s1022", "e980", "e1080"}, + }, + }, + VPCZones: []string{"eu-es-1", "eu-es-2"}, + }, + "osa": { + Description: "Osaka, Japan", + VPCRegion: "jp-osa", + COSRegion: "jp-osa", + Zones: map[string]Zone{ + "osa21": { + SysTypes: []string{"s922", "s1022", "e980"}, + }, + }, + VPCZones: []string{"jp-osa-1", "jp-osa-2", "jp-osa-3"}, + }, + "sao": { + Description: "São Paulo, Brazil", + VPCRegion: "br-sao", + COSRegion: "br-sao", + Zones: map[string]Zone{ + "sao01": { + SysTypes: []string{"s922", "e980"}, + }, + "sao04": { + SysTypes: []string{"s922", "e980"}, + }, + }, + VPCZones: []string{"br-sao-1", "br-sao-2", "br-sao-3"}, + }, + "syd": { + Description: "Sydney, Australia", + VPCRegion: "au-syd", + COSRegion: "au-syd", + Zones: map[string]Zone{ + "syd04": { + SysTypes: []string{"s922", "e980"}, + }, + }, + VPCZones: []string{"au-syd-1", "au-syd-2", "au-syd-3"}, + }, + "wdc": { + Description: "Washington DC, USA", + VPCRegion: "us-east", + COSRegion: "us-east", + Zones: map[string]Zone{ + "wdc06": { + SysTypes: []string{"s922", "e980"}, + }, + "wdc07": { + SysTypes: []string{"s922", "s1022", "e980", "e1080"}, + }, + }, + VPCZones: []string{"us-east-1", "us-east-2", "us-east-3"}, + }, +} + +// VPCRegionForPowerVSRegion returns the VPC region for the specified PowerVS region. +func VPCRegionForPowerVSRegion(region string) (string, error) { + if r, ok := Regions[region]; ok { + return r.VPCRegion, nil + } + + return "", fmt.Errorf("VPC region corresponding to a PowerVS region %s not found ", region) +} + +// RegionShortNames returns the list of region names +func RegionShortNames() []string { + keys := make([]string, len(Regions)) + i := 0 + for r := range Regions { + keys[i] = r + i++ + } + return keys +} + +// ValidateVPCRegion validates that given VPC region is known/tested. +func ValidateVPCRegion(region string) bool { + found := false + for r := range Regions { + if region == Regions[r].VPCRegion { + found = true + break + } + } + return found +} + +// ValidateZone validates that the given zone is known/tested. +func ValidateZone(zone string) bool { + for r := range Regions { + for z := range Regions[r].Zones { + if zone == z { + return true + } + } + } + return false +} + +// ZoneNames returns the list of zone names. +func ZoneNames() []string { + zones := []string{} + for r := range Regions { + for z := range Regions[r].Zones { + zones = append(zones, z) + } + } + return zones +} + +// RegionFromZone returns the region name for a given zone name. +func RegionFromZone(zone string) string { + for r := range Regions { + for z := range Regions[r].Zones { + if zone == z { + return r + } + } + } + return "" +} + +// AvailableSysTypes returns the default system type for the zone. +func AvailableSysTypes(region string, zone string) ([]string, error) { + knownRegion, ok := Regions[region] + if !ok { + return nil, fmt.Errorf("unknown region name provided") + } + var knownZone Zone + knownZone, ok = knownRegion.Zones[zone] + if !ok { + return nil, fmt.Errorf("unknown zone name provided") + } + return knownZone.SysTypes, nil +} + +// AllKnownSysTypes returns aggregated known system types from all regions. +func AllKnownSysTypes() sets.Set[string] { + sysTypes := sets.New[string]() + for region := range Regions { + for _, zones := range Regions[region].Zones { + sysTypes.Insert(zones.SysTypes...) + } + } + return sysTypes +} + +// AvailableVPCZones returns the known VPC zones for a specified region. +func AvailableVPCZones(region string) ([]string, error) { + knownRegion, ok := Regions[region] + if !ok { + return nil, fmt.Errorf("unknown region name provided") + } + return knownRegion.VPCZones, nil +} + +// COSRegionForVPCRegion returns the corresponding COS region for the given VPC region. +func COSRegionForVPCRegion(vpcRegion string) (string, error) { + for r := range Regions { + if vpcRegion == Regions[r].VPCRegion { + return Regions[r].COSRegion, nil + } + } + + return "", fmt.Errorf("COS region corresponding to a VPC region %s not found ", vpcRegion) +} + +// COSRegionForPowerVSRegion returns the IBM COS region for the specified PowerVS region. +func COSRegionForPowerVSRegion(region string) (string, error) { + if r, ok := Regions[region]; ok { + return r.COSRegion, nil + } + + return "", fmt.Errorf("COS region corresponding to a PowerVS region %s not found ", region) +} + +// ValidateCOSRegion validates that given COS region is known/tested. +func ValidateCOSRegion(region string) bool { + for r := range Regions { + if region == Regions[r].COSRegion { + return true + } + } + return false +} diff --git a/.history/pkg/types/powervs/powervs_regions_20241219162543.go b/.history/pkg/types/powervs/powervs_regions_20241219162543.go new file mode 100644 index 00000000000..d8bc1ac5562 --- /dev/null +++ b/.history/pkg/types/powervs/powervs_regions_20241219162543.go @@ -0,0 +1,263 @@ +package powervs + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// Since there is no API to query these, we have to hard-code them here. + +// Region describes resources associated with a region in Power VS. +// We're using a few items from the IBM Cloud VPC offering. The region names +// for VPC are different so another function of this is to correlate those. +type Region struct { + Description string + VPCRegion string + COSRegion string + Zones map[string]Zone + VPCZones []string +} + +// Zone holds the sysTypes for a zone in a IBM Power VS region. +type Zone struct { + SysTypes []string +} + +// Regions holds the regions for IBM Power VS, and descriptions used during the survey. +var Regions = map[string]Region{ + "dal": { + Description: "Dallas, USA", + VPCRegion: "us-south", + COSRegion: "us-south", + Zones: map[string]Zone{ + "dal10": { + SysTypes: []string{"s922", "s1022", "e980", "e1080"}, + }, + "dal12": { + SysTypes: []string{"s922", "e980"}, + }, + }, + VPCZones: []string{"us-south-1", "us-south-2", "us-south-3"}, + }, + "eu-de": { + Description: "Frankfurt, Germany", + VPCRegion: "eu-de", + COSRegion: "eu-de", + Zones: map[string]Zone{ + "eu-de-1": { + SysTypes: []string{"s922", "s1022", "e980"}, + }, + "eu-de-2": { + SysTypes: []string{"s922", "e980"}, + }, + }, + VPCZones: []string{"eu-de-1", "eu-de-2", "eu-de-3"}, + }, + "lon": { + Description: "London, UK.", + VPCRegion: "eu-gb", + COSRegion: "eu-gb", + Zones: map[string]Zone{ + "lon06": { + SysTypes: []string{"s922", "e980"}, + }, + }, + VPCZones: []string{"eu-gb-1", "eu-gb-2", "eu-gb-3"}, + }, + "mad": { + Description: "Madrid, Spain", + VPCRegion: "eu-es", + COSRegion: "eu-de", // @HACK - PowerVS says COS not supported in this region + Zones: map[string]Zone{ + "mad02": { + SysTypes: []string{"s922", "s1022", "e980"}, + }, + "mad04": { + SysTypes: []string{"s1022", "e980", "e1080"}, + }, + }, + VPCZones: []string{"eu-es-1", "eu-es-2"}, + }, + "osa": { + Description: "Osaka, Japan", + VPCRegion: "jp-osa", + COSRegion: "jp-osa", + Zones: map[string]Zone{ + "osa21": { + SysTypes: []string{"s922", "s1022", "e980"}, + }, + }, + VPCZones: []string{"jp-osa-1", "jp-osa-2", "jp-osa-3"}, + }, + "sao": { + Description: "São Paulo, Brazil", + VPCRegion: "br-sao", + COSRegion: "br-sao", + Zones: map[string]Zone{ + "sao01": { + SysTypes: []string{"s922", "e980"}, + }, + "sao04": { + SysTypes: []string{"s922", "e980"}, + }, + }, + VPCZones: []string{"br-sao-1", "br-sao-2", "br-sao-3"}, + }, + "syd": { + Description: "Sydney, Australia", + VPCRegion: "au-syd", + COSRegion: "au-syd", + Zones: map[string]Zone{ + "syd04": { + SysTypes: []string{"s922", "e980"}, + }, + }, + VPCZones: []string{"au-syd-1", "au-syd-2", "au-syd-3"}, + }, + "wdc": { + Description: "Washington DC, USA", + VPCRegion: "us-east", + COSRegion: "us-east", + Zones: map[string]Zone{ + "wdc06": { + SysTypes: []string{"s922", "e980"}, + }, + "wdc07": { + SysTypes: []string{"s922", "s1022", "e980", "e1080"}, + }, + }, + VPCZones: []string{"us-east-1", "us-east-2", "us-east-3"}, + }, +} + +// VPCRegionForPowerVSRegion returns the VPC region for the specified PowerVS region. +func VPCRegionForPowerVSRegion(region string) (string, error) { + if r, ok := Regions[region]; ok { + return r.VPCRegion, nil + } + + return "", fmt.Errorf("VPC region corresponding to a PowerVS region %s not found ", region) +} + +// RegionShortNames returns the list of region names +func RegionShortNames() []string { + keys := make([]string, len(Regions)) + i := 0 + for r := range Regions { + keys[i] = r + i++ + } + return keys +} + +// ValidateVPCRegion validates that given VPC region is known/tested. +func ValidateVPCRegion(region string) bool { + found := false + for r := range Regions { + if region == Regions[r].VPCRegion { + found = true + break + } + } + return found +} + +// ValidateZone validates that the given zone is known/tested. +func ValidateZone(zone string) bool { + for r := range Regions { + for z := range Regions[r].Zones { + if zone == z { + return true + } + } + } + return false +} + +// ZoneNames returns the list of zone names. +func ZoneNames() []string { + zones := []string{} + for r := range Regions { + for z := range Regions[r].Zones { + zones = append(zones, z) + } + } + return zones +} + +// RegionFromZone returns the region name for a given zone name. +func RegionFromZone(zone string) string { + for r := range Regions { + for z := range Regions[r].Zones { + if zone == z { + return r + } + } + } + return "" +} + +// AvailableSysTypes returns the default system type for the zone. +func AvailableSysTypes(region string, zone string) ([]string, error) { + knownRegion, ok := Regions[region] + if !ok { + return nil, fmt.Errorf("unknown region name provided") + } + var knownZone Zone + knownZone, ok = knownRegion.Zones[zone] + if !ok { + return nil, fmt.Errorf("unknown zone name provided") + } + return knownZone.SysTypes, nil +} + +// AllKnownSysTypes returns aggregated known system types from all regions. +func AllKnownSysTypes() sets.Set[string] { + sysTypes := sets.New[string]() + for region := range Regions { + for _, zones := range Regions[region].Zones { + sysTypes.Insert(zones.SysTypes...) + } + } + return sysTypes +} + +// AvailableVPCZones returns the known VPC zones for a specified region. +func AvailableVPCZones(region string) ([]string, error) { + knownRegion, ok := Regions[region] + if !ok { + return nil, fmt.Errorf("unknown region name provided") + } + return knownRegion.VPCZones, nil +} + +// COSRegionForVPCRegion returns the corresponding COS region for the given VPC region. +func COSRegionForVPCRegion(vpcRegion string) (string, error) { + for r := range Regions { + if vpcRegion == Regions[r].VPCRegion { + return Regions[r].COSRegion, nil + } + } + + return "", fmt.Errorf("COS region corresponding to a VPC region %s not found ", vpcRegion) +} + +// COSRegionForPowerVSRegion returns the IBM COS region for the specified PowerVS region. +func COSRegionForPowerVSRegion(region string) (string, error) { + if r, ok := Regions[region]; ok { + return r.COSRegion, nil + } + + return "", fmt.Errorf("COS region corresponding to a PowerVS region %s not found ", region) +} + +// ValidateCOSRegion validates that given COS region is known/tested. +func ValidateCOSRegion(region string) bool { + for r := range Regions { + if region == Regions[r].COSRegion { + return true + } + } + return false +} diff --git a/pkg/asset/cluster/tfvars.go b/pkg/asset/cluster/tfvars.go index f1b066373ab..c2cc6b4f819 100644 --- a/pkg/asset/cluster/tfvars.go +++ b/pkg/asset/cluster/tfvars.go @@ -875,6 +875,16 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error { err = powervsconfig.ValidatePERAvailability(client, installConfig.Config) transitGatewayEnabled := err == nil + cpStanza := installConfig.Config.ControlPlane + if cpStanza == nil || cpStanza.Platform.PowerVS == nil || cpStanza.Platform.PowerVS.SysType == "" { + sysTypes, err := powervs.AvailableSysTypes(installConfig.Config.PowerVS.Region, installConfig.Config.PowerVS.Zone) + if err != nil { + return err + } + for i := range masters { + masterConfigs[i].SystemType = sysTypes[0] + } + } serviceInstanceCRN, err := client.ServiceInstanceIDToCRN(ctx, installConfig.Config.PowerVS.ServiceInstanceID) if err != nil { diff --git a/pkg/asset/installconfig/platformprovisioncheck.go b/pkg/asset/installconfig/platformprovisioncheck.go index 8d34b3b7c5b..2e2b5510f28 100644 --- a/pkg/asset/installconfig/platformprovisioncheck.go +++ b/pkg/asset/installconfig/platformprovisioncheck.go @@ -175,6 +175,11 @@ func (a *PlatformProvisionCheck) Generate(dependencies asset.Parents) error { return err } + err = powervsconfig.ValidateSystemTypeForZone(client, ic.Config) + if err != nil { + return err + } + err = powervsconfig.ValidateServiceInstance(client, ic.Config) if err != nil { return err diff --git a/pkg/asset/installconfig/powervs/regions.go b/pkg/asset/installconfig/powervs/regions.go index 26d9426eef4..91873252894 100644 --- a/pkg/asset/installconfig/powervs/regions.go +++ b/pkg/asset/installconfig/powervs/regions.go @@ -31,7 +31,11 @@ func IsKnownRegion(region string) bool { } func knownZones(region string) []string { - return powervs.Regions[region].Zones + zones := make([]string, 0, len(powervs.Regions[region].Zones)) + for z := range powervs.Regions[region].Zones { + zones = append(zones, z) + } + return zones } // IsKnownZone return true is a specified zone is Known to the installer. diff --git a/pkg/asset/installconfig/powervs/validation.go b/pkg/asset/installconfig/powervs/validation.go index c6c0534fcb3..79df6c89948 100644 --- a/pkg/asset/installconfig/powervs/validation.go +++ b/pkg/asset/installconfig/powervs/validation.go @@ -263,7 +263,30 @@ func ValidateResourceGroup(client API, ic *types.InstallConfig) error { return nil } -// ValidateServiceInstance validates the service instance in our install config. +// ValidateSystemTypeForZone checks if the specified sysType is available in the target zone. +func ValidateSystemTypeForZone(client API, ic *types.InstallConfig) error { + if ic.ControlPlane == nil || ic.ControlPlane.Platform.PowerVS == nil || ic.ControlPlane.Platform.PowerVS.SysType == "" { + return nil + } + availableOnes, err := powervstypes.AvailableSysTypes(ic.PowerVS.Region, ic.PowerVS.Zone) + if err != nil { + return fmt.Errorf("failed to obtain available SysTypes for: %s", ic.PowerVS.Zone) + } + requested := ic.ControlPlane.Platform.PowerVS.SysType + found := false + for i := range availableOnes { + if requested == availableOnes[i] { + found = true + break + } + } + if found { + return nil + } + return fmt.Errorf("%s is not available in: %s", requested, ic.PowerVS.Zone) +} + +// ValidateServiceInstance validates the optional service instance GUID in our install config. func ValidateServiceInstance(client API, ic *types.InstallConfig) error { ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Minute) defer cancel() diff --git a/pkg/asset/installconfig/powervs/validation_test.go b/pkg/asset/installconfig/powervs/validation_test.go index 34b9c488817..6d5563f0535 100644 --- a/pkg/asset/installconfig/powervs/validation_test.go +++ b/pkg/asset/installconfig/powervs/validation_test.go @@ -45,7 +45,7 @@ var ( validPrivateSubnetUSSouth2ID, } validUserID = "valid-user@example.com" - validZone = "dal10" + validZone = "dal12" existingDNSRecordsResponse = []powervs.DNSRecordResponse{ { @@ -134,6 +134,10 @@ var ( "disaster-recover-site": true, "power-vpn-connections": false, } + defaultSysType = "s922" + newSysType = "s1022" + invalidZone = "dal11" + validServiceInstanceGUID = "" ) func validInstallConfig() *types.InstallConfig { @@ -734,6 +738,140 @@ func TestValidatePERAvailability(t *testing.T) { } } +func TestValidateSystemTypeForZone(t *testing.T) { + cases := []struct { + name string + edits editFunctions + errorMsg string + }{ + { + name: "Unknown Zone specified", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.Zone = invalidZone + ic.ControlPlane.Platform.PowerVS = validMachinePool() + ic.ControlPlane.Platform.PowerVS.SysType = defaultSysType + }, + }, + errorMsg: fmt.Sprintf("failed to obtain available SysTypes for: %s", invalidZone), + }, + { + name: "No Platform block", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.ControlPlane.Platform.PowerVS = nil + }, + }, + errorMsg: "", + }, + { + name: "Structure present, but no SysType specified", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.ControlPlane.Platform.PowerVS = validMachinePool() + }, + }, + errorMsg: "", + }, + { + name: "Unavailable SysType specified for dal12 zone", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.Region = validRegion + ic.Platform.PowerVS.Zone = validZone + ic.ControlPlane.Platform.PowerVS = validMachinePool() + ic.ControlPlane.Platform.PowerVS.SysType = newSysType + }, + }, + errorMsg: fmt.Sprintf("%s is not available in: %s", newSysType, validZone), + }, + { + name: "Good Zone/SysType combo specified", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.Region = validRegion + ic.Platform.PowerVS.Zone = validZone + ic.ControlPlane.Platform.PowerVS = validMachinePool() + ic.ControlPlane.Platform.PowerVS.SysType = defaultSysType + }, + }, + errorMsg: "", + }, + } + setMockEnvVars() + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + powervsClient := mock.NewMockAPI(mockCtrl) + + // Run tests + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + editedInstallConfig := validInstallConfig() + for _, edit := range tc.edits { + edit(editedInstallConfig) + } + + aggregatedErrors := powervs.ValidateSystemTypeForZone(powervsClient, editedInstallConfig) + if tc.errorMsg != "" { + assert.Regexp(t, tc.errorMsg, aggregatedErrors) + } else { + assert.NoError(t, aggregatedErrors) + } + }) + } +} + +func TestValidateServiceInstance(t *testing.T) { + cases := []struct { + name string + edits editFunctions + errorMsg string + }{ + { + name: "valid install config", + edits: editFunctions{}, + errorMsg: "", + }, + { + name: "invalid install config", + edits: editFunctions{ + func(ic *types.InstallConfig) { + ic.Platform.PowerVS.ServiceInstanceGUID = "invalid-uuid" + }, + }, + errorMsg: "platform:powervs:serviceInstanceGUID has an invalid guid", + }, + } + setMockEnvVars() + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + powervsClient := mock.NewMockAPI(mockCtrl) + + // FIX: Unexpected call to *mock.MockAPI.ListServiceInstances([context.TODO.WithDeadline(2023-12-02 08:38:15.542340268 -0600 CST m=+300.012357408 [4m59.999979046s])]) at validation.go:289 because: there are no expected calls of the method "ListServiceInstances" for that receiver + powervsClient.EXPECT().ListServiceInstances(gomock.Any()).AnyTimes() + + // Run tests + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + editedInstallConfig := validInstallConfig() + for _, edit := range tc.edits { + edit(editedInstallConfig) + } + + aggregatedErrors := powervs.ValidateServiceInstance(powervsClient, editedInstallConfig) + if tc.errorMsg != "" { + assert.Regexp(t, tc.errorMsg, aggregatedErrors) + } else { + assert.NoError(t, aggregatedErrors) + } + }) + } +} + func setMockEnvVars() { os.Setenv("POWERVS_AUTH_FILEPATH", "./tmp/powervs/config.json") os.Setenv("IBMID", "foo") diff --git a/pkg/asset/machines/worker.go b/pkg/asset/machines/worker.go index 39ed23ae15d..9de23262736 100644 --- a/pkg/asset/machines/worker.go +++ b/pkg/asset/machines/worker.go @@ -186,6 +186,15 @@ func defaultPowerVSMachinePoolPlatform() powervstypes.MachinePool { ProcType: machinev1.PowerVSProcessorTypeShared, SysType: "s922", } + + sysTypes, err = powervstypes.AvailableSysTypes(ic.PowerVS.Region, ic.PowerVS.Zone) + if err == nil { + defaultMp.SysType = sysTypes[0] + } else { + logrus.Warnf("For given zone %v, AvailableSysTypes returns %v", ic.PowerVS.Zone, err) + } + + return defaultMp } func defaultNutanixMachinePoolPlatform() nutanixtypes.MachinePool { diff --git a/pkg/types/powervs/powervs_regions.go b/pkg/types/powervs/powervs_regions.go index b7fe244cde7..d8bc1ac5562 100644 --- a/pkg/types/powervs/powervs_regions.go +++ b/pkg/types/powervs/powervs_regions.go @@ -2,6 +2,8 @@ package powervs import ( "fmt" + + "k8s.io/apimachinery/pkg/util/sets" ) // Since there is no API to query these, we have to hard-code them here. @@ -12,7 +14,14 @@ import ( type Region struct { Description string VPCRegion string - Zones []string + COSRegion string + Zones map[string]Zone + VPCZones []string +} + +// Zone holds the sysTypes for a zone in a IBM Power VS region. +type Zone struct { + SysTypes []string } // Regions holds the regions for IBM Power VS, and descriptions used during the survey. @@ -20,64 +29,105 @@ var Regions = map[string]Region{ "dal": { Description: "Dallas, USA", VPCRegion: "us-south", - Zones: []string{ - "dal10", - "dal12", + COSRegion: "us-south", + Zones: map[string]Zone{ + "dal10": { + SysTypes: []string{"s922", "s1022", "e980", "e1080"}, + }, + "dal12": { + SysTypes: []string{"s922", "e980"}, + }, }, + VPCZones: []string{"us-south-1", "us-south-2", "us-south-3"}, }, "eu-de": { Description: "Frankfurt, Germany", VPCRegion: "eu-de", - Zones: []string{ - "eu-de-1", - "eu-de-2", + COSRegion: "eu-de", + Zones: map[string]Zone{ + "eu-de-1": { + SysTypes: []string{"s922", "s1022", "e980"}, + }, + "eu-de-2": { + SysTypes: []string{"s922", "e980"}, + }, }, + VPCZones: []string{"eu-de-1", "eu-de-2", "eu-de-3"}, }, "lon": { Description: "London, UK.", VPCRegion: "eu-gb", - Zones: []string{ - "lon04", - "lon06", + COSRegion: "eu-gb", + Zones: map[string]Zone{ + "lon06": { + SysTypes: []string{"s922", "e980"}, + }, }, + VPCZones: []string{"eu-gb-1", "eu-gb-2", "eu-gb-3"}, }, - "mon": { - Description: "Montreal, Canada", - VPCRegion: "ca-tor", - Zones: []string{"mon01"}, + "mad": { + Description: "Madrid, Spain", + VPCRegion: "eu-es", + COSRegion: "eu-de", // @HACK - PowerVS says COS not supported in this region + Zones: map[string]Zone{ + "mad02": { + SysTypes: []string{"s922", "s1022", "e980"}, + }, + "mad04": { + SysTypes: []string{"s1022", "e980", "e1080"}, + }, + }, + VPCZones: []string{"eu-es-1", "eu-es-2"}, }, "osa": { Description: "Osaka, Japan", VPCRegion: "jp-osa", - Zones: []string{"osa21"}, - }, - "syd": { - Description: "Sydney, Australia", - VPCRegion: "au-syd", - Zones: []string{ - "syd04", - "syd05", + COSRegion: "jp-osa", + Zones: map[string]Zone{ + "osa21": { + SysTypes: []string{"s922", "s1022", "e980"}, + }, }, + VPCZones: []string{"jp-osa-1", "jp-osa-2", "jp-osa-3"}, }, "sao": { Description: "São Paulo, Brazil", VPCRegion: "br-sao", - Zones: []string{"sao01"}, - }, - "tor": { - Description: "Toronto, Canada", - VPCRegion: "ca-tor", - Zones: []string{"tor01"}, + COSRegion: "br-sao", + Zones: map[string]Zone{ + "sao01": { + SysTypes: []string{"s922", "e980"}, + }, + "sao04": { + SysTypes: []string{"s922", "e980"}, + }, + }, + VPCZones: []string{"br-sao-1", "br-sao-2", "br-sao-3"}, }, - "tok": { - Description: "Tokyo, Japan", - VPCRegion: "jp-tok", - Zones: []string{"tok04"}, + "syd": { + Description: "Sydney, Australia", + VPCRegion: "au-syd", + COSRegion: "au-syd", + Zones: map[string]Zone{ + "syd04": { + SysTypes: []string{"s922", "e980"}, + }, + }, + VPCZones: []string{"au-syd-1", "au-syd-2", "au-syd-3"}, }, - "us-east": { + "wdc": { Description: "Washington DC, USA", VPCRegion: "us-east", - Zones: []string{"us-east"}, + COSRegion: "us-east", + Zones: map[string]Zone{ + "wdc06": { + SysTypes: []string{"s922", "e980"}, + }, + "wdc07": { + SysTypes: []string{"s922", "s1022", "e980", "e1080"}, + }, + }, + VPCZones: []string{"us-east-1", "us-east-2", "us-east-3"}, }, } @@ -117,7 +167,7 @@ func ValidateVPCRegion(region string) bool { func ValidateZone(zone string) bool { for r := range Regions { for z := range Regions[r].Zones { - if zone == Regions[r].Zones[z] { + if zone == z { return true } } @@ -130,7 +180,7 @@ func ZoneNames() []string { zones := []string{} for r := range Regions { for z := range Regions[r].Zones { - zones = append(zones, Regions[r].Zones[z]) + zones = append(zones, z) } } return zones @@ -140,10 +190,74 @@ func ZoneNames() []string { func RegionFromZone(zone string) string { for r := range Regions { for z := range Regions[r].Zones { - if zone == Regions[r].Zones[z] { + if zone == z { return r } } } return "" } + +// AvailableSysTypes returns the default system type for the zone. +func AvailableSysTypes(region string, zone string) ([]string, error) { + knownRegion, ok := Regions[region] + if !ok { + return nil, fmt.Errorf("unknown region name provided") + } + var knownZone Zone + knownZone, ok = knownRegion.Zones[zone] + if !ok { + return nil, fmt.Errorf("unknown zone name provided") + } + return knownZone.SysTypes, nil +} + +// AllKnownSysTypes returns aggregated known system types from all regions. +func AllKnownSysTypes() sets.Set[string] { + sysTypes := sets.New[string]() + for region := range Regions { + for _, zones := range Regions[region].Zones { + sysTypes.Insert(zones.SysTypes...) + } + } + return sysTypes +} + +// AvailableVPCZones returns the known VPC zones for a specified region. +func AvailableVPCZones(region string) ([]string, error) { + knownRegion, ok := Regions[region] + if !ok { + return nil, fmt.Errorf("unknown region name provided") + } + return knownRegion.VPCZones, nil +} + +// COSRegionForVPCRegion returns the corresponding COS region for the given VPC region. +func COSRegionForVPCRegion(vpcRegion string) (string, error) { + for r := range Regions { + if vpcRegion == Regions[r].VPCRegion { + return Regions[r].COSRegion, nil + } + } + + return "", fmt.Errorf("COS region corresponding to a VPC region %s not found ", vpcRegion) +} + +// COSRegionForPowerVSRegion returns the IBM COS region for the specified PowerVS region. +func COSRegionForPowerVSRegion(region string) (string, error) { + if r, ok := Regions[region]; ok { + return r.COSRegion, nil + } + + return "", fmt.Errorf("COS region corresponding to a PowerVS region %s not found ", region) +} + +// ValidateCOSRegion validates that given COS region is known/tested. +func ValidateCOSRegion(region string) bool { + for r := range Regions { + if region == Regions[r].COSRegion { + return true + } + } + return false +}