diff --git a/CHANGELOG.md b/CHANGELOG.md index 868288cda..39ca949b2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## Unreleased +## 1.207.0 (January, 14 2025) +ENHANCEMENTS: +* resource/spotinst_ocean_aks_np: Added support for `vng_template_scheduling`, `logging` and `suspension_hours` object. + ## 1.206.0 (January, 10 2025) ENHANCEMENTS: * resource/spotinst_ocean_gke_import: Added support for `auto_update` object. diff --git a/docs/resources/ocean_aks_np.md b/docs/resources/ocean_aks_np.md index 22c96a972..ee52a11e3 100644 --- a/docs/resources/ocean_aks_np.md +++ b/docs/resources/ocean_aks_np.md @@ -104,6 +104,18 @@ resource "spotinst_ocean_aks_np" "example" { grace_period = 600 } + // ---------------------------------------------------------------------- + + // ---- Logging --------------------------------------------------------- + + logging { + export { + azure_blob { + id = "di-abcd123" + } + } + } + // ---------------------------------------------------------------------- // --- virtualNodeGroupTemplate ----------------------------------------- @@ -164,6 +176,16 @@ resource "spotinst_ocean_aks_np" "example" { value = "taintValue" effect = "NoSchedule" } + + vng_template_scheduling { + vng_template_shutdown_hours { + is_enabled = true + time_windows = [ + "Fri:15:30-Sat:13:30", + "Sun:15:30-Mon:13:30", + ] + } + } tags = { tagKey = "env" @@ -208,19 +230,19 @@ The following arguments are supported: * `aks_region` - (Required) The cluster's region. * `aks_resource_group_name` - (Required) The name of the cluster's resource group. * `autoscaler` - (Optional) The Ocean Kubernetes Autoscaler object. - * `autoscale_is_enabled` - (Optional) Enable the Ocean Kubernetes Autoscaler. - * `autoscale_down` - (Optional) Auto Scaling scale down operations. - * `max_scale_down_percentage` - (Optional) The maximum percentage allowed to scale down in a single scaling action. - * `resource_limits` - (Optional) Optionally set upper and lower bounds on the resource usage of the cluster. - * `max_vcpu` - (Optional) The maximum cpu in vCpu units that can be allocated to the cluster. - * `max_memory_gib` - (Optional) The maximum memory in GiB units that can be allocated to the cluster. - * `autoscale_headroom` - (Optional) Spare resource capacity management enabling fast assignment of pods without waiting for new resources to launch. - * `automatic` - (Optional) [Automatic headroom](https://docs.spot.io/ocean/features/headroom?id=automatic-headroom) configuration. - * `is_enabled` - (Optional, Default - false) Enable automatic headroom. When set to `true`, Ocean configures and optimizes headroom automatically. - * `percentage` - (Optional) Optionally set a number between 0-100 to control the percentage of total cluster resources dedicated to headroom. + * `autoscale_is_enabled` - (Optional) Enable the Ocean Kubernetes Autoscaler. + * `autoscale_down` - (Optional) Auto Scaling scale down operations. + * `max_scale_down_percentage` - (Optional) The maximum percentage allowed to scale down in a single scaling action. + * `resource_limits` - (Optional) Optionally set upper and lower bounds on the resource usage of the cluster. + * `max_vcpu` - (Optional) The maximum cpu in vCpu units that can be allocated to the cluster. + * `max_memory_gib` - (Optional) The maximum memory in GiB units that can be allocated to the cluster. + * `autoscale_headroom` - (Optional) Spare resource capacity management enabling fast assignment of pods without waiting for new resources to launch. + * `automatic` - (Optional) [Automatic headroom](https://docs.spot.io/ocean/features/headroom?id=automatic-headroom) configuration. + * `is_enabled` - (Optional, Default - false) Enable automatic headroom. When set to `true`, Ocean configures and optimizes headroom automatically. + * `percentage` - (Optional) Optionally set a number between 0-100 to control the percentage of total cluster resources dedicated to headroom. * `controller_cluster_id` - (Required) Enter a unique Ocean cluster identifier. Cannot be updated. This needs to match with string that was used to install the controller in the cluster, typically clusterName + 8 digit string. * `health` - (Optional) The Ocean AKS Health object. - * `grace_period` - (Optional, Default: `600`) The amount of time to wait, in seconds, from the moment the instance has launched until monitoring of its health checks begins. + * `grace_period` - (Optional, Default: `600`) The amount of time to wait, in seconds, from the moment the instance has launched until monitoring of its health checks begins. * `name` - (Required) Add a name for the Ocean cluster. * `headrooms` - (Optional) Specify the custom headroom per VNG. Provide a list of headroom objects. * `cpu_per_unit` - (Optional) Configure the number of CPUs to allocate the headroom. CPUs are denoted in millicores, where 1000 millicores = 1 vCPU. @@ -229,8 +251,8 @@ The following arguments are supported: * `num_of_units` - (Optional) The number of units to retain as headroom, where each unit has the defined headroom CPU and memory. * `availability_zones` - (Required) An Array holding Availability Zones, this configures the availability zones the Ocean may launch instances in per VNG. * `labels` - (Optional) An array of labels to add to the virtual node group. Only custom user labels are allowed, and not [Kubernetes well-known labels](https://kubernetes.io/docs/reference/labels-annotations-taints/) or [ Azure AKS labels](https://learn.microsoft.com/en-us/azure/aks/use-labels) or [Spot labels](https://docs.spot.io/ocean/features/labels-and-taints?id=spot-labels). - * `key` - (Required) Set label key [spot labels](https://docs.spot.io/ocean/features/labels-and-taints?id=spotinstionode-lifecycle) and [Azure labels](https://learn.microsoft.com/en-us/azure/aks/use-labels). The following are not allowed: ["kubernetes.azure.com/agentpool","kubernetes.io/arch","kubernetes.io/os","node.kubernetes.io/instance-type", "topology.kubernetes.io/region", "topology.kubernetes.io/zone", "kubernetes.azure.com/cluster", "kubernetes.azure.com/mode", "kubernetes.azure.com/role", "kubernetes.azure.com/scalesetpriority", "kubernetes.io/hostname", "kubernetes.azure.com/storageprofile", "kubernetes.azure.com/storagetier", "kubernetes.azure.com/instance-sku", "kubernetes.azure.com/node-image-version", "kubernetes.azure.com/subnet", "kubernetes.azure.com/vnet", "kubernetes.azure.com/ppg", "kubernetes.azure.com/encrypted-set", "kubernetes.azure.com/accelerator", "kubernetes.azure.com/fips_enabled", "kubernetes.azure.com/os-sku"] - * `value` - (Required) Set label value. + * `key` - (Required) Set label key [spot labels](https://docs.spot.io/ocean/features/labels-and-taints?id=spotinstionode-lifecycle) and [Azure labels](https://learn.microsoft.com/en-us/azure/aks/use-labels). The following are not allowed: ["kubernetes.azure.com/agentpool","kubernetes.io/arch","kubernetes.io/os","node.kubernetes.io/instance-type", "topology.kubernetes.io/region", "topology.kubernetes.io/zone", "kubernetes.azure.com/cluster", "kubernetes.azure.com/mode", "kubernetes.azure.com/role", "kubernetes.azure.com/scalesetpriority", "kubernetes.io/hostname", "kubernetes.azure.com/storageprofile", "kubernetes.azure.com/storagetier", "kubernetes.azure.com/instance-sku", "kubernetes.azure.com/node-image-version", "kubernetes.azure.com/subnet", "kubernetes.azure.com/vnet", "kubernetes.azure.com/ppg", "kubernetes.azure.com/encrypted-set", "kubernetes.azure.com/accelerator", "kubernetes.azure.com/fips_enabled", "kubernetes.azure.com/os-sku"] + * `value` - (Required) Set label value. * `max_count` - (Optional, Default: 1000) Maximum node count limit. * `min_count` - (Optional, Default: 0) Minimum node count limit. * `enable_node_public_ip` - (Optional) Enable node public IP. @@ -248,28 +270,36 @@ The following arguments are supported: * `fallback_to_ondemand` - (Optional) If no spot VM markets are available, enable Ocean to launch regular (pay-as-you-go) nodes instead. * `spot_percentage` - (Optional) Percentage of spot VMs to maintain. * `tag` - (Optional) A maximum of 10 unique key-value pairs for VM tags in the virtual node group. - * `key` - (Optional) Tag key for VMs in the cluster. - * `value` - (Optional) Tag value for VMs in the cluster. + * `key` - (Optional) Tag key for VMs in the cluster. + * `value` - (Optional) Tag value for VMs in the cluster. * `taints` - (Optional) Add taints to a virtual node group. Only custom user taints are allowed, and not [Kubernetes well-known taints](https://kubernetes.io/docs/reference/labels-annotations-taints/) or Azure AKS [ScaleSetPrioirty (Spot VM) taint](https://learn.microsoft.com/en-us/azure/aks/spot-node-pool). For all Spot VMs, AKS injects a taint kubernetes.azure.com/scalesetpriority=spot:NoSchedule, to ensure that only workloads that can handle interruptions are scheduled on Spot nodes. To [schedule a pod to run on Spot node](https://learn.microsoft.com/en-us/azure/aks/spot-node-pool#schedule-a-pod-to-run-on-the-spot-node), add a toleration but dont include the nodeAffinity (not supported for Spot Ocean), this will prevent the pod from being scheduled using Spot Ocean. - * `key` - (Optional) Set taint key. The following taint keys are not allowed: ["node.kubernetes.io/not-ready", "node.kubernetes.io/unreachable", "node.kubernetes.io/unschedulable", "node.kubernetes.io/memory-pressure", "node.kubernetes.io/disk-pressure", "node.kubernetes.io/network-unavailable", "node.kubernetes.io/pid-pressure", "node.kubernetes.io/out-of-service", "node.cloudprovider.kubernetes.io/uninitialized", "node.cloudprovider.kubernetes.io/shutdown", "kubernetes.azure.com/scalesetpriority"] - * `value` - (Optional) Set taint value. - * `effect` - (Optional, Enum: `"NoSchedule", "PreferNoSchedule", "NoExecute", "PreferNoExecute"`) Set taint effect. + * `key` - (Optional) Set taint key. The following taint keys are not allowed: ["node.kubernetes.io/not-ready", "node.kubernetes.io/unreachable", "node.kubernetes.io/unschedulable", "node.kubernetes.io/memory-pressure", "node.kubernetes.io/disk-pressure", "node.kubernetes.io/network-unavailable", "node.kubernetes.io/pid-pressure", "node.kubernetes.io/out-of-service", "node.cloudprovider.kubernetes.io/uninitialized", "node.cloudprovider.kubernetes.io/shutdown", "kubernetes.azure.com/scalesetpriority"] + * `value` - (Optional) Set taint value. + * `effect` - (Optional, Enum: `"NoSchedule", "PreferNoSchedule", "NoExecute", "PreferNoExecute"`) Set taint effect. * `filters` - (Optional) Filters for the VM sizes that can be launched from the virtual node group. - * `architectures` - (Optional, Enum `"x86_64", "intel64", "amd64", "arm64"`) The filtered vm sizes will support at least one of the architectures from this list. x86_64 includes both intel64 and amd64. - * `max_memory_gib` - (Optional) Maximum amount of Memory (GiB). - * `max_vcpu` - (Optional) Maximum number of vcpus available. - * `min_memory_gib` - (Optional) Minimum amount of Memory (GiB). - * `min_vcpu` - (Optional) Minimum number of vcpus available. - * `series` - (Optional) Vm sizes belonging to a series from the list will be available for scaling. We can specify include list and series can be specified with capital or small letters, with space, without space or with underscore '_' . For example all of these "DSv2", "Ds v2", "ds_v2" refer to same DS_v2 series. - * `exclude_series` - (Optional) Vm sizes belonging to a series from the list will not be available for scaling - * `accelerated_networking` - (Optional, Enum `"Enabled", "Disabled"`) In case acceleratedNetworking is set to Enabled, accelerated networking applies only to the VM that enables it. - * `disk_performance` - (Optional, Enum `"Standard", "Premium"`) The filtered vm sizes will support at least one of the classes from this list. - * `min_gpu` - (Optional) Minimum number of GPUs available. - * `max_gpu` - (Optional) Maximum number of GPUs available. - * `min_nics` - (Optional) Minimum number of network interfaces. - * `min_disk` - (Optional) Minimum number of data disks available. - * `vm_types` - (Optional, Enum `"generalPurpose", "memoryOptimized", "computeOptimized", "highPerformanceCompute", "storageOptimized", "GPU"`) The filtered vm types will belong to one of the vm types from this list. - * `gpu_types` - (Optional, Enum `"nvidia-tesla-v100", "amd-radeon-instinct-mi25", "nvidia-a10", "nvidia-tesla-a100", "nvidia-tesla-k80", "nvidia-tesla-m60", "nvidia-tesla-p100", "nvidia-tesla-p40", "nvidia-tesla-t4", "nvidia-tesla-h100"`) The filtered gpu types will belong to one of the gpu types from this list. + * `architectures` - (Optional, Enum `"x86_64", "intel64", "amd64", "arm64"`) The filtered vm sizes will support at least one of the architectures from this list. x86_64 includes both intel64 and amd64. + * `max_memory_gib` - (Optional) Maximum amount of Memory (GiB). + * `max_vcpu` - (Optional) Maximum number of vcpus available. + * `min_memory_gib` - (Optional) Minimum amount of Memory (GiB). + * `min_vcpu` - (Optional) Minimum number of vcpus available. + * `series` - (Optional) Vm sizes belonging to a series from the list will be available for scaling. We can specify include list and series can be specified with capital or small letters, with space, without space or with underscore '_' . For example all of these "DSv2", "Ds v2", "ds_v2" refer to same DS_v2 series. + * `exclude_series` - (Optional) Vm sizes belonging to a series from the list will not be available for scaling + * `accelerated_networking` - (Optional, Enum `"Enabled", "Disabled"`) In case acceleratedNetworking is set to Enabled, accelerated networking applies only to the VM that enables it. + * `disk_performance` - (Optional, Enum `"Standard", "Premium"`) The filtered vm sizes will support at least one of the classes from this list. + * `min_gpu` - (Optional) Minimum number of GPUs available. + * `max_gpu` - (Optional) Maximum number of GPUs available. + * `min_nics` - (Optional) Minimum number of network interfaces. + * `min_disk` - (Optional) Minimum number of data disks available. + * `vm_types` - (Optional, Enum `"generalPurpose", "memoryOptimized", "computeOptimized", "highPerformanceCompute", "storageOptimized", "GPU"`) The filtered vm types will belong to one of the vm types from this list. + * `gpu_types` - (Optional, Enum `"nvidia-tesla-v100", "amd-radeon-instinct-mi25", "nvidia-a10", "nvidia-tesla-a100", "nvidia-tesla-k80", "nvidia-tesla-m60", "nvidia-tesla-p100", "nvidia-tesla-p40", "nvidia-tesla-t4", "nvidia-tesla-h100"`) The filtered gpu types will belong to one of the gpu types from this list. +* `logging` - (Optional) The OCean AKS Logging Object. + * `export` - The Ocean AKS Logging Export object. + * `azure_blob` - Exports your cluster's logs to the storage account and container configured on the storage account [data integration](https://docs.spot.io/#operation/DataIntegrationCreate) given. Each file contains logs of 3 minutes where each log is separated by a new line and saved as a JSON. The file formats are `container`/`accountId``oceanId``oceanName`_`startTime`.log + * `id` - (Required) The identifier of The Azure Blob data integration to export the logs to. +* `vng_template_scheduling` - (Optional) An object used to specify times when the virtual node group will turn off all its node pools. Once the shutdown time will be over, the virtual node group will return to its previous state. + * `shutdown_hours` - (Optional) An object used to specify times that the nodes in the virtual node group will be stopped. + * `is_enabled` - (Optional) Flag to enable or disable the shutdown hours mechanism. When False, the mechanism is deactivated, and the virtual node gorup remains in its current state. + * `time_windows` - (Optional) The times that the shutdown hours will apply. Required if `is_enabled` is true. ## Update Policy @@ -305,10 +335,13 @@ update_policy { * `shutdown_hours` - (Optional) An object used to specify times that the nodes in the cluster will be taken down. * `is_enabled` - (Optional) Flag to enable or disable the shutdown hours mechanism. When `false`, the mechanism is deactivated, and the cluster remains in its current state. * `time_windows` - (Optional) The times that the shutdown hours will apply. Required if isEnabled is true. + * `suspension_hours` - (Optional) An object used to specify times that the cluster should be exempted from Ocean's scaling-down activities to ensure uninterrupted operations during critical periods. + * `is_enabled` - (Optional) Flag to enable or disable the suspension hours mechanism. When `false`, the mechanism is deactivated, and the cluster remains in its current state. + * `time_windows` - (Optional) The times that the suspension hours will apply. Required if isEnabled is true. * `tasks` - (Optional) A list of scheduling tasks to preform on the cluster at a specific cron time. * `is_enabled` - (Required) Describes whether the task is enabled. When true the task should run when false it should not run. Required for `cluster.scheduling.tasks` object. * `cron_expression` - (Required) A valid cron expression. The cron is running in UTC time zone and is in Unix cron format Cron Expression Validator Script. Only one of `frequency` or `cronExpression` should be used at a time. Required for `cluster.scheduling.tasks` object. (Example: `0 1 * * *`). - * `task_type` - (Required) Valid values: `clusterRoll` The type of the scheduling task. + * `task_type` - (Required) The type of the scheduling task. Valid values: "`clusterRoll`,`autoUpgradeVersion`". * `parameters` - (Optional) The parameters of the scheduling task. Each task type will have properties relevant only to it. * `parameters_cluster_roll` - (Optional) The parameters of the cluster roll scheduling task. * `batch_min_healthy_percentage` - (Optional) The minimum percentage of the scaled nodes that should be healthy at each batch. Valid values are 1-100. @@ -317,6 +350,14 @@ update_policy { * `respect_pdb` - (Optional) During the roll, if the parameter is set to true we honor PDB during the instance replacement. * `respect_restrict_scale_down` - (Optional) During the roll, if the parameter is set to true we honor Restrict Scale Down label during the nodes replacement. * `vng_ids` - (Optional) List of Virtual Node Group IDs to be rolled. If not set or set to null, cluster roll will be applied. + * `parameters_upgrade_config` - (Optional) The parameters of the upgrade config scheduling task. + * `apply_roll` - (Optional) - When set to True, a cluster roll will be initiated if a new version is available to upgrade in the dedicated virtual node groups. + * `roll_parameters` - (Optional) - The parameters of the cluster roll that will be initiated. + * `batch_min_healthy_percentage` - (Optional) The minimum percentage of the scaled nodes that should be healthy at each batch. Valid values are 1-100. + * `batch_size_percentage` - (Optional) The percentage of the cluster that will be rolled at each batch. Valid values are 1-100. + * `comment` - (Optional) A comment to be added to the cluster roll. + * `respect_pdb` - (Optional) During the roll, if the parameter is set to true we honor PDB during the instance replacement. + * `respect_restrict_scale_down` - (Optional) During the roll, if the parameter is set to true we honor Restrict Scale Down label during the nodes replacement. ```hcl scheduling { @@ -327,12 +368,20 @@ scheduling { "Sun:15:30-Mon:13:30", ] } + suspension_hours { + is_enabled = true + time_windows = [ + "Fri:15:30-Sun:13:30", + "Mon:15:30-Tue:13:30", + ] + } + #task for clusterRoll tasks { is_enabled = true cron_expression = "* 1 * * *" task_type = "clusterRoll" parameters { - parameters_cluster_roll{ + parameters_cluster_roll { batch_min_healthy_percentage = 50 batch_size_percentage = 20 comment = "Scheduled cluster roll" @@ -342,5 +391,24 @@ scheduling { } } } + #task for autoUpgradeVersion + tasks { + is_enabled = true + cron_expression = "* 10 * * *" + task_type = "autoUpgradeVersion" + parameters { + parameters_upgrade_config { + apply_roll = true + scope_version = "patch" + roll_parameters { + batch_min_healthy_percentage = 75 + batch_size_percentage = 50 + comment = "Scheduled upgrade roll" + respect_pdb = false + respect_restrict_scale_down = false + } + } + } + } } ``` \ No newline at end of file diff --git a/go.mod b/go.mod index cdb27515c..33ccdd384 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/hashicorp/terraform-plugin-docs v0.5.1 github.com/hashicorp/terraform-plugin-sdk/v2 v2.5.0 github.com/sethvargo/go-password v0.3.1 - github.com/spotinst/spotinst-sdk-go v1.382.0 + github.com/spotinst/spotinst-sdk-go v1.384.0 golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 ) diff --git a/go.sum b/go.sum index b8d8d644a..98091eb76 100644 --- a/go.sum +++ b/go.sum @@ -587,8 +587,8 @@ github.com/skeema/knownhosts v1.3.0 h1:AM+y0rI04VksttfwjkSTNQorvGqmwATnvnAHpSgc0 github.com/skeema/knownhosts v1.3.0/go.mod h1:sPINvnADmT/qYH1kfv+ePMmOBTH6Tbl7b5LvTDjFK7M= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spotinst/spotinst-sdk-go v1.382.0 h1:EbRLMORlFdN27urjc0QyThjlKUEmpYp+89FvZWZAK80= -github.com/spotinst/spotinst-sdk-go v1.382.0/go.mod h1:Tn4/eb0SFY6IXmxz71CClujvbD/PuT+EO6Ta8v6AML4= +github.com/spotinst/spotinst-sdk-go v1.384.0 h1:tp5vq/Kxn2wfloiMAgv7HR5JJyy/tyIXt1N9n00Od0M= +github.com/spotinst/spotinst-sdk-go v1.384.0/go.mod h1:Tn4/eb0SFY6IXmxz71CClujvbD/PuT+EO6Ta8v6AML4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= diff --git a/spotinst/commons/consts.go b/spotinst/commons/consts.go index 043fea3e4..c7aa98bb6 100644 --- a/spotinst/commons/consts.go +++ b/spotinst/commons/consts.go @@ -166,6 +166,7 @@ const ( OceanAKSNPGroupAutoScale ResourceAffinity = "Ocean_AKS_NP_Auto_Scale" OceanAKSNPScheduling ResourceAffinity = "Ocean_AKS_NP_Scheduling" OceanAKSNPVmSizes ResourceAffinity = "Ocean_AKS_NP_Vm_Sizes" + OceanAKSNPLogging ResourceAffinity = "Ocean_AKS_NP_Logging" OceanAKSNPVirtualNodeGroup ResourceAffinity = "Ocean_AKS_NP_Virtual_Node_Group" OceanAKSNPVirtualNodeGroupStrategy ResourceAffinity = "Ocean_AKS_NP_Virtual_Node_Group_Strategy" diff --git a/spotinst/ocean_aks_np/consts.go b/spotinst/ocean_aks_np/consts.go index f321a2c94..98f0b7d70 100644 --- a/spotinst/ocean_aks_np/consts.go +++ b/spotinst/ocean_aks_np/consts.go @@ -25,3 +25,9 @@ const ( RespectRestrictScaleDown commons.FieldName = "respect_restrict_scale_down" NodeNames commons.FieldName = "node_names" ) +const ( + VNG_Template_Scheduling commons.FieldName = "vng_template_scheduling" + ShutdownHours commons.FieldName = "vng_template_shutdown_hours" + TimeWindows commons.FieldName = "time_windows" + ShutdownHoursIsEnabled commons.FieldName = "is_enabled" +) diff --git a/spotinst/ocean_aks_np/fields_spotinst_ocean_aks_np.go b/spotinst/ocean_aks_np/fields_spotinst_ocean_aks_np.go index 60e10a54f..93793511a 100644 --- a/spotinst/ocean_aks_np/fields_spotinst_ocean_aks_np.go +++ b/spotinst/ocean_aks_np/fields_spotinst_ocean_aks_np.go @@ -2,6 +2,7 @@ package ocean_aks_np import ( "fmt" + "github.com/spotinst/spotinst-sdk-go/service/ocean/providers/azure_np" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/spotinst/spotinst-sdk-go/spotinst" @@ -322,6 +323,83 @@ func Setup(fieldsMap map[commons.FieldName]*commons.GenericField) { }, nil, nil, nil, nil, ) + + fieldsMap[VNG_Template_Scheduling] = commons.NewGenericField( + commons.OceanAKSNP, + VNG_Template_Scheduling, + &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + string(ShutdownHours): { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + string(ShutdownHoursIsEnabled): { + Type: schema.TypeBool, + Optional: true, + }, + + string(TimeWindows): { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + }, + }, + func(resourceObject interface{}, resourceData *schema.ResourceData, meta interface{}) error { + clusterWrapper := resourceObject.(*commons.AKSNPClusterWrapper) + cluster := clusterWrapper.GetNPCluster() + var result []interface{} = nil + if cluster != nil && cluster.VirtualNodeGroupTemplate != nil && cluster.VirtualNodeGroupTemplate.Scheduling != nil { + result = flattenScheduling(cluster.VirtualNodeGroupTemplate.Scheduling) + } + + if len(result) > 0 { + if err := resourceData.Set(string(VNG_Template_Scheduling), result); err != nil { + return fmt.Errorf(string(commons.FailureFieldReadPattern), string(VNG_Template_Scheduling), err) + } + } + return nil + }, + func(resourceObject interface{}, resourceData *schema.ResourceData, meta interface{}) error { + clusterWrapper := resourceObject.(*commons.AKSNPClusterWrapper) + cluster := clusterWrapper.GetNPCluster() + var value *azure_np.Scheduling = nil + if v, ok := resourceData.GetOkExists(string(VNG_Template_Scheduling)); ok { + if scheduling, err := expandScheduling(v); err != nil { + return err + } else { + value = scheduling + } + } + cluster.VirtualNodeGroupTemplate.SetScheduling(value) + return nil + }, + func(resourceObject interface{}, resourceData *schema.ResourceData, meta interface{}) error { + clusterWrapper := resourceObject.(*commons.AKSNPClusterWrapper) + cluster := clusterWrapper.GetNPCluster() + var value *azure_np.Scheduling = nil + if v, ok := resourceData.GetOk(string(VNG_Template_Scheduling)); ok { + if scheduling, err := expandScheduling(v); err != nil { + return err + } else { + value = scheduling + } + } + cluster.VirtualNodeGroupTemplate.SetScheduling(value) + return nil + }, + nil, + ) } func expandZones(data interface{}) ([]string, error) { @@ -335,3 +413,79 @@ func expandZones(data interface{}) ([]string, error) { } return result, nil } + +func flattenScheduling(scheduling *azure_np.Scheduling) []interface{} { + var out []interface{} + + if scheduling != nil { + result := make(map[string]interface{}) + if scheduling.ShutdownHours != nil { + result[string(ShutdownHours)] = flattenShutdownHours(scheduling.ShutdownHours) + } + if len(result) > 0 { + out = append(out, result) + } + } + return out +} + +func flattenShutdownHours(shutdownHours *azure_np.ShutdownHours) []interface{} { + result := make(map[string]interface{}) + result[string(ShutdownHoursIsEnabled)] = spotinst.BoolValue(shutdownHours.IsEnabled) + if len(shutdownHours.TimeWindows) > 0 { + result[string(TimeWindows)] = shutdownHours.TimeWindows + } + return []interface{}{result} +} + +func expandScheduling(data interface{}) (*azure_np.Scheduling, error) { + scheduling := &azure_np.Scheduling{} + if list := data.([]interface{}); len(list) > 0 { + if list[0] != nil { + m := list[0].(map[string]interface{}) + + if v, ok := m[string(ShutdownHours)]; ok { + shutdownHours, err := expandShutdownHours(v) + if err != nil { + return nil, err + } + if shutdownHours != nil { + if scheduling.ShutdownHours == nil { + scheduling.SetShutdownHours(&azure_np.ShutdownHours{}) + } + scheduling.SetShutdownHours(shutdownHours) + } + } + } + return scheduling, nil + } + return nil, nil +} + +func expandShutdownHours(data interface{}) (*azure_np.ShutdownHours, error) { + shutDownHours := &azure_np.ShutdownHours{} + if list := data.([]interface{}); len(list) > 0 && list[0] != nil { + m := list[0].(map[string]interface{}) + + var isEnabled = spotinst.Bool(false) + if v, ok := m[string(ShutdownHoursIsEnabled)].(bool); ok { + isEnabled = spotinst.Bool(v) + } + shutDownHours.SetIsEnabled(isEnabled) + + var timeWindows []string = nil + if v, ok := m[string(TimeWindows)].([]interface{}); ok && len(v) > 0 { + timeWindowList := make([]string, 0, len(v)) + for _, timeWindow := range v { + if v, ok := timeWindow.(string); ok && len(v) > 0 { + timeWindowList = append(timeWindowList, v) + } + } + timeWindows = timeWindowList + } + shutDownHours.SetTimeWindows(timeWindows) + + return shutDownHours, nil + } + return nil, nil +} diff --git a/spotinst/ocean_aks_np_logging/consts.go b/spotinst/ocean_aks_np_logging/consts.go new file mode 100644 index 000000000..a05d3cd25 --- /dev/null +++ b/spotinst/ocean_aks_np_logging/consts.go @@ -0,0 +1,10 @@ +package ocean_aks_np_logging + +import "github.com/spotinst/terraform-provider-spotinst/spotinst/commons" + +const ( + Logging commons.FieldName = "logging" + Export commons.FieldName = "export" + AzureBlob commons.FieldName = "azure_blob" + Id commons.FieldName = "id" +) diff --git a/spotinst/ocean_aks_np_logging/fields_spotinst_ocean_aks_np_logging.go b/spotinst/ocean_aks_np_logging/fields_spotinst_ocean_aks_np_logging.go new file mode 100644 index 000000000..dd929b170 --- /dev/null +++ b/spotinst/ocean_aks_np_logging/fields_spotinst_ocean_aks_np_logging.go @@ -0,0 +1,207 @@ +package ocean_aks_np_logging + +import ( + "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/spotinst/spotinst-sdk-go/service/ocean/providers/azure_np" + "github.com/spotinst/spotinst-sdk-go/spotinst" + "github.com/spotinst/terraform-provider-spotinst/spotinst/commons" +) + +func Setup(fieldsMap map[commons.FieldName]*commons.GenericField) { + + fieldsMap[Logging] = commons.NewGenericField( + commons.OceanAKSNPLogging, + Logging, + &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + string(Export): { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + string(AzureBlob): { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + string(Id): { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + func(resourceObject interface{}, resourceData *schema.ResourceData, meta interface{}) error { + clusterWrapper := resourceObject.(*commons.AKSNPClusterWrapper) + cluster := clusterWrapper.GetNPCluster() + var result []interface{} = nil + if cluster != nil && cluster.Logging != nil { + result = flattenLogging(cluster.Logging) + } + if len(result) > 0 { + if err := resourceData.Set(string(Logging), result); err != nil { + return fmt.Errorf(string(commons.FailureFieldReadPattern), string(Logging), err) + } + } + return nil + }, + func(resourceObject interface{}, resourceData *schema.ResourceData, meta interface{}) error { + clusterWrapper := resourceObject.(*commons.AKSNPClusterWrapper) + cluster := clusterWrapper.GetNPCluster() + if v, ok := resourceData.GetOk(string(Logging)); ok { + if logging, err := expandLogging(v); err != nil { + return err + } else { + cluster.SetLogging(logging) + } + } + return nil + }, + func(resourceObject interface{}, resourceData *schema.ResourceData, meta interface{}) error { + clusterWrapper := resourceObject.(*commons.AKSNPClusterWrapper) + cluster := clusterWrapper.GetNPCluster() + var value *azure_np.Logging = nil + + if v, ok := resourceData.GetOk(string(Logging)); ok { + if logging, err := expandLogging(v); err != nil { + return err + } else { + value = logging + } + } + cluster.SetLogging(value) + return nil + }, + nil, + ) +} + +func flattenLogging(logging *azure_np.Logging) []interface{} { + var out []interface{} + + if logging != nil { + result := make(map[string]interface{}) + + if logging.Export != nil { + result[string(Export)] = flattenExport(logging.Export) + } + + if len(result) > 0 { + out = append(out, result) + } + } + + return out +} + +func flattenExport(export *azure_np.Export) []interface{} { + var out []interface{} + + if export != nil { + result := make(map[string]interface{}) + + if export.AzureBlob != nil { + result[string(AzureBlob)] = flattenAzureBlob(export.AzureBlob) + } + + if len(result) > 0 { + out = append(out, result) + } + } + + return out +} + +func flattenAzureBlob(azureBlob *azure_np.AzureBlob) []interface{} { + var out []interface{} + + if azureBlob != nil { + result := make(map[string]interface{}) + + if azureBlob.Id != nil { + result[string(Id)] = azureBlob.Id + } + + if len(result) > 0 { + out = append(out, result) + } + } + + return out +} + +func expandLogging(data interface{}) (*azure_np.Logging, error) { + logging := &azure_np.Logging{} + list := data.([]interface{}) + + if list == nil || list[0] == nil { + return logging, nil + } + m := list[0].(map[string]interface{}) + + if v, ok := m[string(Export)]; ok { + export, err := expandExport(v) + if err != nil { + return nil, err + } + if export != nil { + logging.SetExport(export) + } else { + logging.Export = nil + } + } + + return logging, nil +} + +func expandExport(data interface{}) (*azure_np.Export, error) { + export := &azure_np.Export{} + list := data.([]interface{}) + + if list == nil || list[0] == nil { + return export, nil + } + m := list[0].(map[string]interface{}) + + if v, ok := m[string(AzureBlob)]; ok { + azureBlob, err := expandAzureBlob(v) + if err != nil { + return nil, err + } + if azureBlob != nil { + export.SetAzureBlob(azureBlob) + } else { + export.AzureBlob = nil + } + } + + return export, nil +} + +func expandAzureBlob(data interface{}) (*azure_np.AzureBlob, error) { + azureBlob := &azure_np.AzureBlob{} + list := data.([]interface{}) + + if list == nil || list[0] == nil { + return azureBlob, nil + } + m := list[0].(map[string]interface{}) + + if v, ok := m[string(Id)].(string); ok && v != "" { + azureBlob.SetId(spotinst.String(v)) + } + + return azureBlob, nil +} diff --git a/spotinst/ocean_aks_np_scheduling/consts.go b/spotinst/ocean_aks_np_scheduling/consts.go index 0fa801df0..6a37cd58b 100644 --- a/spotinst/ocean_aks_np_scheduling/consts.go +++ b/spotinst/ocean_aks_np_scheduling/consts.go @@ -3,10 +3,11 @@ package ocean_aks_np_scheduling import "github.com/spotinst/terraform-provider-spotinst/spotinst/commons" const ( - Scheduling commons.FieldName = "scheduling" - ShutdownHours commons.FieldName = "shutdown_hours" - TimeWindows commons.FieldName = "time_windows" - ShutdownHoursIsEnabled commons.FieldName = "is_enabled" + Scheduling commons.FieldName = "scheduling" + ShutdownHours commons.FieldName = "shutdown_hours" + TimeWindows commons.FieldName = "time_windows" + SchedulingIsEnabled commons.FieldName = "is_enabled" + SuspensionHours commons.FieldName = "suspension_hours" ) const ( Tasks commons.FieldName = "tasks" @@ -22,3 +23,9 @@ const ( RespectRestrictScaleDown commons.FieldName = "respect_restrict_scale_down" VngIDs commons.FieldName = "vng_ids" ) +const ( + ParametersUpgradeConfig commons.FieldName = "parameters_upgrade_config" + ApplyRoll commons.FieldName = "apply_roll" + ScopeVersion commons.FieldName = "scope_version" + RollParameters commons.FieldName = "roll_parameters" +) diff --git a/spotinst/ocean_aks_np_scheduling/fields_spotinst_ocean_aks_np_scheduling.go b/spotinst/ocean_aks_np_scheduling/fields_spotinst_ocean_aks_np_scheduling.go index 7845c44a5..03d523802 100644 --- a/spotinst/ocean_aks_np_scheduling/fields_spotinst_ocean_aks_np_scheduling.go +++ b/spotinst/ocean_aks_np_scheduling/fields_spotinst_ocean_aks_np_scheduling.go @@ -26,7 +26,26 @@ func Setup(fieldsMap map[commons.FieldName]*commons.GenericField) { MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - string(ShutdownHoursIsEnabled): { + string(SchedulingIsEnabled): { + Type: schema.TypeBool, + Optional: true, + }, + + string(TimeWindows): { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + string(SuspensionHours): { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + string(SchedulingIsEnabled): { Type: schema.TypeBool, Optional: true, }, @@ -101,6 +120,54 @@ func Setup(fieldsMap map[commons.FieldName]*commons.GenericField) { }, }, }, + string(ParametersUpgradeConfig): { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + string(ApplyRoll): { + Type: schema.TypeBool, + Optional: true, + }, + string(ScopeVersion): { + Type: schema.TypeString, + Optional: true, + }, + string(RollParameters): { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + string(BatchMinHealthyPercentage): { + Type: schema.TypeInt, + Optional: true, + Default: -1, + }, + string(BatchSizePercentage): { + Type: schema.TypeInt, + Optional: true, + Default: -1, + }, + string(Comment): { + Type: schema.TypeString, + Optional: true, + }, + string(RespectPdb): { + Type: schema.TypeBool, + Optional: true, + }, + string(RespectRestrictScaleDown): { + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, + }, + }, + }, }, }, }, @@ -166,6 +233,9 @@ func flattenScheduling(scheduling *azure_np.Scheduling) []interface{} { if scheduling.ShutdownHours != nil { result[string(ShutdownHours)] = flattenShutdownHours(scheduling.ShutdownHours) } + if scheduling.SuspensionHours != nil { + result[string(SuspensionHours)] = flattenSuspensionHours(scheduling.SuspensionHours) + } if scheduling.Tasks != nil { result[string(Tasks)] = flattenTasks(scheduling.Tasks) } @@ -178,13 +248,22 @@ func flattenScheduling(scheduling *azure_np.Scheduling) []interface{} { func flattenShutdownHours(shutdownHours *azure_np.ShutdownHours) []interface{} { result := make(map[string]interface{}) - result[string(ShutdownHoursIsEnabled)] = spotinst.BoolValue(shutdownHours.IsEnabled) + result[string(SchedulingIsEnabled)] = spotinst.BoolValue(shutdownHours.IsEnabled) if len(shutdownHours.TimeWindows) > 0 { result[string(TimeWindows)] = shutdownHours.TimeWindows } return []interface{}{result} } +func flattenSuspensionHours(suspensionHours *azure_np.SuspensionHours) []interface{} { + result := make(map[string]interface{}) + result[string(SchedulingIsEnabled)] = spotinst.BoolValue(suspensionHours.IsEnabled) + if len(suspensionHours.TimeWindows) > 0 { + result[string(TimeWindows)] = suspensionHours.TimeWindows + } + return []interface{}{result} +} + func expandScheduling(data interface{}) (*azure_np.Scheduling, error) { scheduling := &azure_np.Scheduling{} if list := data.([]interface{}); len(list) > 0 { @@ -203,6 +282,18 @@ func expandScheduling(data interface{}) (*azure_np.Scheduling, error) { scheduling.SetShutdownHours(shutdownHours) } } + if v, ok := m[string(SuspensionHours)]; ok { + suspensionHours, err := expandSuspensionHours(v) + if err != nil { + return nil, err + } + if suspensionHours != nil { + if scheduling.SuspensionHours == nil { + scheduling.SetSuspensionHours(&azure_np.SuspensionHours{}) + } + scheduling.SetSuspensionHours(suspensionHours) + } + } if v, ok := m[string(Tasks)]; ok { tasks, err := expandtasks(v) if err != nil { @@ -226,7 +317,7 @@ func expandShutdownHours(data interface{}) (*azure_np.ShutdownHours, error) { m := list[0].(map[string]interface{}) var isEnabled = spotinst.Bool(false) - if v, ok := m[string(ShutdownHoursIsEnabled)].(bool); ok { + if v, ok := m[string(SchedulingIsEnabled)].(bool); ok { isEnabled = spotinst.Bool(v) } shutDownHours.SetIsEnabled(isEnabled) @@ -248,6 +339,34 @@ func expandShutdownHours(data interface{}) (*azure_np.ShutdownHours, error) { return nil, nil } +func expandSuspensionHours(data interface{}) (*azure_np.SuspensionHours, error) { + suspensionHours := &azure_np.SuspensionHours{} + if list := data.([]interface{}); len(list) > 0 && list[0] != nil { + m := list[0].(map[string]interface{}) + + var isEnabled = spotinst.Bool(false) + if v, ok := m[string(SchedulingIsEnabled)].(bool); ok { + isEnabled = spotinst.Bool(v) + } + suspensionHours.SetIsEnabled(isEnabled) + + var timeWindows []string = nil + if v, ok := m[string(TimeWindows)].([]interface{}); ok && len(v) > 0 { + timeWindowList := make([]string, 0, len(v)) + for _, timeWindow := range v { + if v, ok := timeWindow.(string); ok && len(v) > 0 { + timeWindowList = append(timeWindowList, v) + } + } + timeWindows = timeWindowList + } + suspensionHours.SetTimeWindows(timeWindows) + + return suspensionHours, nil + } + return nil, nil +} + func expandtasks(data interface{}) ([]*azure_np.Tasks, error) { if list := data.([]interface{}); list != nil && len(list) > 0 && list[0] != nil { tasks := make([]*azure_np.Tasks, 0, len(list)) @@ -303,6 +422,18 @@ func expandParameters(data interface{}) (*azure_np.Parameters, error) { } } + if v, ok := m[string(ParametersUpgradeConfig)]; ok { + expandUpgradeConfig, err := expandParameterUpgradeConfig(v) + if err != nil { + return nil, err + } + if expandUpgradeConfig != nil { + parameter.SetUpgradeConfig(expandUpgradeConfig) + } else { + parameter.UpgradeConfig = nil + } + } + return parameter, nil } @@ -356,6 +487,85 @@ func expandParameterClusterRoll(data interface{}) (*azure_np.ParameterClusterRol return nil, nil } + +func expandParameterUpgradeConfig(data interface{}) (*azure_np.UpgradeConfig, error) { + if list := data.([]interface{}); list != nil && len(list) > 0 && list[0] != nil { + parameterUpgradeConfig := &azure_np.UpgradeConfig{} + m := list[0].(map[string]interface{}) + + if v, ok := m[string(ScopeVersion)].(string); ok && v != "" { + parameterUpgradeConfig.SetScopeVersion(spotinst.String(v)) + } else { + parameterUpgradeConfig.SetScopeVersion(nil) + } + + var isApplyRoll = spotinst.Bool(false) + if v, ok := m[string(ApplyRoll)].(bool); ok { + isApplyRoll = spotinst.Bool(v) + } + parameterUpgradeConfig.SetApplyRoll(isApplyRoll) + + if v, ok := m[string(RollParameters)]; ok { + expandRollParameters, err := expandRollParameters(v) + if err != nil { + return nil, err + } + if expandRollParameters != nil { + parameterUpgradeConfig.SetRollParameters(expandRollParameters) + } else { + parameterUpgradeConfig.RollParameters = nil + } + } + + return parameterUpgradeConfig, nil + } + + return nil, nil +} +func expandRollParameters(data interface{}) (*azure_np.ParameterClusterRoll, error) { + if list := data.([]interface{}); list != nil && len(list) > 0 && list[0] != nil { + rollParameters := &azure_np.ParameterClusterRoll{} + m := list[0].(map[string]interface{}) + + if v, ok := m[string(BatchMinHealthyPercentage)].(int); ok { + if v == -1 { + rollParameters.SetBatchMinHealthyPercentage(nil) + } else { + rollParameters.SetBatchMinHealthyPercentage(spotinst.Int(v)) + } + } + + if v, ok := m[string(BatchSizePercentage)].(int); ok { + if v == -1 { + rollParameters.SetBatchSizePercentage(nil) + } else { + rollParameters.SetBatchSizePercentage(spotinst.Int(v)) + } + } + + if v, ok := m[string(Comment)].(string); ok && v != "" { + rollParameters.SetComment(spotinst.String(v)) + } else { + rollParameters.SetComment(nil) + } + + var isRespectPdb = spotinst.Bool(false) + if v, ok := m[string(RespectPdb)].(bool); ok { + isRespectPdb = spotinst.Bool(v) + } + rollParameters.SetRespectPdb(isRespectPdb) + + var isRespectRestrictScaleDown = spotinst.Bool(false) + if v, ok := m[string(RespectRestrictScaleDown)].(bool); ok { + isRespectRestrictScaleDown = spotinst.Bool(v) + } + rollParameters.SetRespectRestrictScaleDown(isRespectRestrictScaleDown) + + return rollParameters, nil + } + + return nil, nil +} func expandListVNG(data interface{}) []string { list := data.([]interface{}) result := make([]string, 0, len(list)) @@ -391,6 +601,10 @@ func flattenParameters(parameters *azure_np.Parameters) []interface{} { result[string(ParametersClusterRoll)] = flattenParameterClusterRoll(parameters.ClusterRoll) } + if parameters.UpgradeConfig != nil { + result[string(ParametersUpgradeConfig)] = flattenParameterUpgradeConfig(parameters.UpgradeConfig) + } + return []interface{}{result} } func flattenParameterClusterRoll(clusterRoll *azure_np.ParameterClusterRoll) []interface{} { @@ -412,3 +626,32 @@ func flattenParameterClusterRoll(clusterRoll *azure_np.ParameterClusterRoll) []i return []interface{}{result} } +func flattenParameterUpgradeConfig(upgradeConfig *azure_np.UpgradeConfig) []interface{} { + result := make(map[string]interface{}) + + result[string(ScopeVersion)] = spotinst.StringValue(upgradeConfig.ScopeVersion) + result[string(ApplyRoll)] = spotinst.BoolValue(upgradeConfig.ApplyRoll) + if upgradeConfig.RollParameters != nil { + result[string(RollParameters)] = flattenRollParameters(upgradeConfig.RollParameters) + } + + return []interface{}{result} +} +func flattenRollParameters(clusterRoll *azure_np.ParameterClusterRoll) []interface{} { + result := make(map[string]interface{}) + value := spotinst.Int(-1) + result[string(BatchMinHealthyPercentage)] = value + result[string(BatchSizePercentage)] = value + + if clusterRoll.BatchMinHealthyPercentage != nil { + result[string(BatchMinHealthyPercentage)] = spotinst.IntValue(clusterRoll.BatchMinHealthyPercentage) + } + if clusterRoll.BatchSizePercentage != nil { + result[string(BatchSizePercentage)] = spotinst.IntValue(clusterRoll.BatchSizePercentage) + } + result[string(Comment)] = spotinst.StringValue(clusterRoll.Comment) + result[string(RespectPdb)] = spotinst.BoolValue(clusterRoll.RespectPdb) + result[string(RespectRestrictScaleDown)] = spotinst.BoolValue(clusterRoll.RespectRestrictScaleDown) + + return []interface{}{result} +} diff --git a/spotinst/resource_spotinst_ocean_aks_np.go b/spotinst/resource_spotinst_ocean_aks_np.go index 0d6c6ef2d..71f62bc40 100644 --- a/spotinst/resource_spotinst_ocean_aks_np.go +++ b/spotinst/resource_spotinst_ocean_aks_np.go @@ -11,6 +11,7 @@ import ( "github.com/spotinst/terraform-provider-spotinst/spotinst/ocean_aks_np_auto_scale" "github.com/spotinst/terraform-provider-spotinst/spotinst/ocean_aks_np_auto_scaler" "github.com/spotinst/terraform-provider-spotinst/spotinst/ocean_aks_np_health" + "github.com/spotinst/terraform-provider-spotinst/spotinst/ocean_aks_np_logging" "github.com/spotinst/terraform-provider-spotinst/spotinst/ocean_aks_np_node_count_limits" "github.com/spotinst/terraform-provider-spotinst/spotinst/ocean_aks_np_node_pool_properties" "github.com/spotinst/terraform-provider-spotinst/spotinst/ocean_aks_np_scheduling" @@ -52,6 +53,7 @@ func setupClusterAKSNPResource() { ocean_aks_np_auto_scale.Setup(fieldsMap) ocean_aks_np_scheduling.Setup(fieldsMap) ocean_aks_np_vm_sizes.Setup(fieldsMap) + ocean_aks_np_logging.Setup(fieldsMap) commons.OceanAKSNPResource = commons.NewOceanAKSNPResource(fieldsMap) } diff --git a/spotinst/resource_spotinst_ocean_aks_np_test.go b/spotinst/resource_spotinst_ocean_aks_np_test.go index b59d749aa..7bb89d483 100644 --- a/spotinst/resource_spotinst_ocean_aks_np_test.go +++ b/spotinst/resource_spotinst_ocean_aks_np_test.go @@ -538,7 +538,9 @@ func TestAccSpotinstOceanAKSNP_Scheduling(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "scheduling.#", "1"), resource.TestCheckResourceAttr(resourceName, "scheduling.0.shutdown_hours.#", "1"), resource.TestCheckResourceAttr(resourceName, "scheduling.0.shutdown_hours.0.is_enabled", "false"), - resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.#", "1"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.suspension_hours.#", "1"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.suspension_hours.0.is_enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.#", "2"), resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.0.cron_expression", "0 1 * * *"), resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.0.is_enabled", "true"), resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.0.task_type", "clusterRoll"), @@ -550,6 +552,19 @@ func TestAccSpotinstOceanAKSNP_Scheduling(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.0.parameters.0.parameters_cluster_roll.0.respect_pdb", "true"), resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.0.parameters.0.parameters_cluster_roll.0.respect_restrict_scale_down", "true"), resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.0.parameters.0.parameters_cluster_roll.0.vng_ids.0", "vng123"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.1.is_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.1.cron_expression", "0 4 * * *"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.1.task_type", "autoUpgradeVersion"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.1.parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.1.parameters.0.parameters_upgrade_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.1.parameters.0.parameters_upgrade_config.0.apply_roll", "false"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.1.parameters.0.parameters_upgrade_config.0.scope_version", "patch"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.1.parameters.0.parameters_upgrade_config.0.roll_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.1.parameters.0.parameters_upgrade_config.0.roll_parameters.0.batch_min_healthy_percentage", "70"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.1.parameters.0.parameters_upgrade_config.0.roll_parameters.0.batch_size_percentage", "10"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.1.parameters.0.parameters_upgrade_config.0.roll_parameters.0.comment", "Scheduled autoUpgrade roll"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.1.parameters.0.parameters_upgrade_config.0.roll_parameters.0.respect_pdb", "false"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.1.parameters.0.parameters_upgrade_config.0.roll_parameters.0.respect_restrict_scale_down", "false"), ), }, { @@ -566,6 +581,10 @@ func TestAccSpotinstOceanAKSNP_Scheduling(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "scheduling.0.shutdown_hours.0.is_enabled", "true"), resource.TestCheckResourceAttr(resourceName, "scheduling.0.shutdown_hours.0.time_windows.#", "1"), resource.TestCheckResourceAttr(resourceName, "scheduling.0.shutdown_hours.0.time_windows.0", "Sat:08:00-Sun:08:00"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.suspension_hours.#", "1"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.suspension_hours.0.is_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.suspension_hours.0.time_windows.#", "1"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.suspension_hours.0.time_windows.0", "Mon:08:00-Tue:08:00"), resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.0.is_enabled", "false"), resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.0.cron_expression", "0 2 * * *"), resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.0.task_type", "clusterRoll"), @@ -576,6 +595,19 @@ func TestAccSpotinstOceanAKSNP_Scheduling(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.0.parameters.0.parameters_cluster_roll.0.comment", "Scheduled cluster roll_updated"), resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.0.parameters.0.parameters_cluster_roll.0.respect_pdb", "false"), resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.0.parameters.0.parameters_cluster_roll.0.respect_restrict_scale_down", "false"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.1.is_enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.1.cron_expression", "0 5 * * *"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.1.task_type", "autoUpgradeVersion"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.1.parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.1.parameters.0.parameters_upgrade_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.1.parameters.0.parameters_upgrade_config.0.apply_roll", "true"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.1.parameters.0.parameters_upgrade_config.0.scope_version", "patch"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.1.parameters.0.parameters_upgrade_config.0.roll_parameters.#", "1"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.1.parameters.0.parameters_upgrade_config.0.roll_parameters.0.batch_min_healthy_percentage", "80"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.1.parameters.0.parameters_upgrade_config.0.roll_parameters.0.batch_size_percentage", "5"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.1.parameters.0.parameters_upgrade_config.0.roll_parameters.0.comment", "Scheduled autoUpgrade roll_updated"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.1.parameters.0.parameters_upgrade_config.0.roll_parameters.0.respect_pdb", "true"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.tasks.1.parameters.0.parameters_upgrade_config.0.roll_parameters.0.respect_restrict_scale_down", "true"), ), }, { @@ -599,13 +631,16 @@ const testSchedulingOceanAKSNPConfig_Create = ` scheduling { shutdown_hours{ is_enabled = false + } + suspension_hours{ + is_enabled = false } tasks { is_enabled = true cron_expression = "0 1 * * *" task_type = "clusterRoll" parameters { - parameters_cluster_roll{ + parameters_cluster_roll { batch_min_healthy_percentage = 80 batch_size_percentage = 20 comment = "Scheduled cluster roll" @@ -614,6 +649,24 @@ const testSchedulingOceanAKSNPConfig_Create = ` vng_ids=["vng123"] } } + } + tasks { + is_enabled = true + cron_expression = "0 4 * * *" + task_type = "autoUpgradeVersion" + parameters { + parameters_upgrade_config { + apply_roll = false + scope_version = "patch" + roll_parameters { + batch_min_healthy_percentage = 70 + batch_size_percentage = 10 + comment = "Scheduled autoUpgrade roll" + respect_pdb = false + respect_restrict_scale_down = false + } + } + } } } // ------------------------------------------------------------------- @@ -625,6 +678,10 @@ const testSchedulingOceanAKSNPConfig_Update = ` shutdown_hours{ is_enabled = true time_windows = ["Sat:08:00-Sun:08:00"] + } + suspension_hours{ + is_enabled = true + time_windows = ["Mon:08:00-Tue:08:00"] } tasks { is_enabled = false @@ -639,6 +696,24 @@ const testSchedulingOceanAKSNPConfig_Update = ` respect_restrict_scale_down=false } } + } + tasks { + is_enabled = false + cron_expression = "0 5 * * *" + task_type = "autoUpgradeVersion" + parameters { + parameters_upgrade_config { + apply_roll = true + scope_version = "patch" + roll_parameters { + batch_min_healthy_percentage = 80 + batch_size_percentage = 5 + comment = "Scheduled autoUpgrade roll_updated" + respect_pdb = true + respect_restrict_scale_down = true + } + } + } } } // -------------------------------------------------------------------