diff --git a/apis/apps/v1/cluster_types.go b/apis/apps/v1/cluster_types.go index 5870eb02c0d..a3f17c8f1d7 100644 --- a/apis/apps/v1/cluster_types.go +++ b/apis/apps/v1/cluster_types.go @@ -591,7 +591,7 @@ type ClusterSharding struct { // +kubebuilder:validation:Required // +kubebuilder:validation:MaxLength=15 // +kubebuilder:validation:Pattern:=`^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$` - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="name is immutable" + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="the name is immutable" Name string `json:"name"` // Specifies the ShardingDefinition custom resource (CR) that defines the sharding's characteristics and behavior. @@ -602,19 +602,6 @@ type ClusterSharding struct { // +optional ShardingDef string `json:"shardingDef,omitempty"` - // The template for generating Components for shards, where each shard consists of one Component. - // - // This field is of type ClusterComponentSpec, which encapsulates all the required details and - // definitions for creating and managing the Components. - // KubeBlocks uses this template to generate a set of identical Components of shards. - // All the generated Components will have the same specifications and definitions as specified in the `template` field. - // - // This allows for the creation of multiple Components with consistent configurations, - // enabling sharding and distribution of workloads across Components. - // - // +kubebuilder:validation:Required - Template ClusterComponentSpec `json:"template"` - // Specifies the desired number of shards. // // Users can declare the desired number of shards through this field. @@ -633,12 +620,124 @@ type ClusterSharding struct { // +kubebuilder:validation:Required Shards int32 `json:"shards,omitempty"` + // The default template for generating Components for shards, where each shard consists of one Component. + // + // This field is of type ClusterComponentSpec, which encapsulates all the required details and + // definitions for creating and managing the Components. + // KubeBlocks uses this template to generate a set of identical Components of shards. + // All the generated Components will have the same specifications and definitions as specified in the `template` field. + // + // This allows for the creation of multiple Components with consistent configurations, + // enabling sharding and distribution of workloads across Components. + // + // +optional + Template ClusterComponentSpec `json:"template,omitempty"` + + // Specifies a list of heterogeneous shard templates, allowing different groups of shards + // to be created with distinct configurations. + // + // +optional + ShardTemplates []ShardTemplate `json:"shardTemplates,omitempty"` + // Specifies the names of shards (components) to be transitioned to offline status. // // +optional Offline []string `json:"offline,omitempty"` } +type ShardTemplate struct { + // The unique name of this ShardTemplate. + // + // The name can't be empty. + // + // +kubebuilder:validation:MaxLength=15 + // +kubebuilder:validation:Pattern:=`^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$` + // +kubebuilder:validation:Required + Name string `json:"name"` + + // Specifies the ShardingDefinition custom resource (CR) that defines the sharding's characteristics and behavior. + // + // The full name or regular expression is supported to match the ShardingDefinition. + // + // +kubebuilder:validation:MaxLength=64 + // +optional + ShardingDef *string `json:"shardingDef,omitempty"` + + // The number of shards to create from this ShardTemplate. + // + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=2048 + // +optional + Shards *int32 `json:"shards,omitempty"` + + // Specifies the shard IDs to take over from the existing shards. + // + // +optional + ShardIDs []string `json:"shardIDs,omitempty"` + + // ServiceVersion specifies the version of the Service expected to be provisioned by this template. + // The version should follow the syntax and semantics of the "Semantic Versioning" specification (http://semver.org/). + // + // +kubebuilder:validation:MaxLength=32 + // +optional + ServiceVersion *string `json:"serviceVersion,omitempty"` + + // Specifies the name of the referenced ComponentDefinition. + // + // +kubebuilder:validation:MaxLength=64 + // +optional + CompDef *string `json:"compDef,omitempty"` + + // Specifies Labels to override or add for underlying Pods, PVCs, Account & TLS Secrets, Services Owned by Component. + // + // +optional + Labels map[string]string `json:"labels,omitempty"` + + // Specifies Annotations to override or add for underlying Pods, PVCs, Account & TLS Secrets, Services Owned by Component. + // + // +optional + Annotations map[string]string `json:"annotations,omitempty"` + + // Defines Env to override. + // Add new or override existing envs. + // + // +optional + Env []corev1.EnvVar `json:"env,omitempty"` + + // Specifies the desired number of replicas for the shard which are created from this template. + // + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:default=1 + // +optional + Replicas *int32 `json:"replicas,omitempty"` + + // Specifies the scheduling policy for the shard. + // If defined, it will overwrite the scheduling policy defined in ClusterSpec and/or default template. + // + // +optional + SchedulingPolicy *SchedulingPolicy `json:"schedulingPolicy,omitempty"` + + // Specifies an override for the resource requirements of the shard. + // + // +optional + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` + + // Specifies an override for the storage requirements of the shard. + // + // +optional + VolumeClaimTemplates []PersistentVolumeClaimTemplate `json:"volumeClaimTemplates,omitempty"` + + // Specifies an override for the custom instances of the shard. + // + // +optional + Instances []InstanceTemplate `json:"instances,omitempty"` + + // Specifies an override for the instance naming of the shard. + // + // +optional + FlatInstanceOrdinal *bool `json:"flatInstanceOrdinal,omitempty"` +} + // ClusterService defines a service that is exposed externally, allowing entities outside the cluster to access it. // For example, external applications, or other Clusters. // And another Cluster managed by the same KubeBlocks operator can resolve the address exposed by a ClusterService diff --git a/apis/apps/v1/zz_generated.deepcopy.go b/apis/apps/v1/zz_generated.deepcopy.go index e4a6799af9e..7824887c54d 100644 --- a/apis/apps/v1/zz_generated.deepcopy.go +++ b/apis/apps/v1/zz_generated.deepcopy.go @@ -595,6 +595,13 @@ func (in *ClusterService) DeepCopy() *ClusterService { func (in *ClusterSharding) DeepCopyInto(out *ClusterSharding) { *out = *in in.Template.DeepCopyInto(&out.Template) + if in.ShardTemplates != nil { + in, out := &in.ShardTemplates, &out.ShardTemplates + *out = make([]ShardTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.Offline != nil { in, out := &in.Offline, &out.Offline *out = make([]string, len(*in)) @@ -3042,6 +3049,101 @@ func (in *ServiceVars) DeepCopy() *ServiceVars { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ShardTemplate) DeepCopyInto(out *ShardTemplate) { + *out = *in + if in.ShardingDef != nil { + in, out := &in.ShardingDef, &out.ShardingDef + *out = new(string) + **out = **in + } + if in.Shards != nil { + in, out := &in.Shards, &out.Shards + *out = new(int32) + **out = **in + } + if in.ShardIDs != nil { + in, out := &in.ShardIDs, &out.ShardIDs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ServiceVersion != nil { + in, out := &in.ServiceVersion, &out.ServiceVersion + *out = new(string) + **out = **in + } + if in.CompDef != nil { + in, out := &in.CompDef, &out.CompDef + *out = new(string) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.SchedulingPolicy != nil { + in, out := &in.SchedulingPolicy, &out.SchedulingPolicy + *out = new(SchedulingPolicy) + (*in).DeepCopyInto(*out) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(corev1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.VolumeClaimTemplates != nil { + in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]PersistentVolumeClaimTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = make([]InstanceTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FlatInstanceOrdinal != nil { + in, out := &in.FlatInstanceOrdinal, &out.FlatInstanceOrdinal + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShardTemplate. +func (in *ShardTemplate) DeepCopy() *ShardTemplate { + if in == nil { + return nil + } + out := new(ShardTemplate) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ShardingDefinition) DeepCopyInto(out *ShardingDefinition) { *out = *in diff --git a/config/crd/bases/apps.kubeblocks.io_clusters.yaml b/config/crd/bases/apps.kubeblocks.io_clusters.yaml index 21c212f0ad3..79030d37f81 100644 --- a/config/crd/bases/apps.kubeblocks.io_clusters.yaml +++ b/config/crd/bases/apps.kubeblocks.io_clusters.yaml @@ -7828,7 +7828,7 @@ spec: pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ type: string x-kubernetes-validations: - - message: name is immutable + - message: the name is immutable rule: self == oldSelf offline: description: Specifies the names of shards (components) to be @@ -7836,6 +7836,3379 @@ spec: items: type: string type: array + shardTemplates: + description: |- + Specifies a list of heterogeneous shard templates, allowing different groups of shards + to be created with distinct configurations. + items: + properties: + annotations: + additionalProperties: + type: string + description: Specifies Annotations to override or add + for underlying Pods, PVCs, Account & TLS Secrets, Services + Owned by Component. + type: object + compDef: + description: Specifies the name of the referenced ComponentDefinition. + maxLength: 64 + type: string + env: + description: |- + Defines Env to override. + Add new or override existing envs. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + flatInstanceOrdinal: + description: Specifies an override for the instance naming + of the shard. + type: boolean + instances: + description: Specifies an override for the custom instances + of the shard. + items: + description: InstanceTemplate allows customization of + individual replica configurations in a Component. + properties: + annotations: + additionalProperties: + type: string + description: |- + Specifies a map of key-value pairs to be merged into the Pod's existing annotations. + Existing keys will have their values overwritten, while new keys will be added to the annotations. + type: object + canary: + description: Indicate whether the instances belonging + to this template are canary instances. + type: boolean + compDef: + description: Specifies the name of the referenced + ComponentDefinition. + maxLength: 64 + type: string + env: + description: |- + Defines Env to override. + Add new or override existing envs. + items: + description: EnvVar represents an environment + variable present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret + in the pod's namespace + properties: + key: + description: The key of the secret + to select from. Must be a valid + secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + labels: + additionalProperties: + type: string + description: |- + Specifies a map of key-value pairs that will be merged into the Pod's existing labels. + Values for existing keys will be overwritten, and new keys will be added. + type: object + name: + description: |- + Name specifies the unique name of the instance Pod created using this InstanceTemplate. + This name is constructed by concatenating the Component's name, the template's name, and the instance's ordinal + using the pattern: $(cluster.name)-$(component.name)-$(template.name)-$(ordinal). Ordinals start from 0. + The name can't be empty. + maxLength: 54 + pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: string + ordinals: + description: |- + Specifies the desired Ordinals of this InstanceTemplate. + The Ordinals used to specify the ordinal of the instance (pod) names to be generated under this InstanceTemplate. + If Ordinals are defined, their number must be equal to or more than the corresponding replicas. + + + For example, if Ordinals is {ranges: [{start: 0, end: 1}], discrete: [7]}, + then the instance names generated under this InstanceTemplate would be + $(cluster.name)-$(component.name)-$(template.name)-0、$(cluster.name)-$(component.name)-$(template.name)-1 and + $(cluster.name)-$(component.name)-$(template.name)-7 + properties: + discrete: + items: + format: int32 + type: integer + type: array + ranges: + items: + description: |- + Range represents a range with a start and an end value. Both start and end are included. + It is used to define a continuous segment. + properties: + end: + format: int32 + type: integer + start: + format: int32 + type: integer + required: + - end + - start + type: object + type: array + type: object + replicas: + default: 1 + description: |- + Specifies the number of instances (Pods) to create from this InstanceTemplate. + This field allows setting how many replicated instances of the Component, + with the specific overrides in the InstanceTemplate, are created. + The default value is 1. A value of 0 disables instance creation. + format: int32 + minimum: 0 + type: integer + resources: + description: |- + Specifies an override for the resource requirements of the first container in the Pod. + This field allows for customizing resource allocation (CPU, memory, etc.) for the container. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one + entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + schedulingPolicy: + description: |- + Specifies the scheduling policy for the instance. + If defined, it will overwrite the scheduling policy defined in ClusterSpec and/or ClusterComponentSpec. + properties: + affinity: + description: Specifies a group of affinity scheduling + rules of the Cluster, including NodeAffinity, + PodAffinity, and PodAntiAffinity. + properties: + nodeAffinity: + description: Describes node affinity scheduling + rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, + associated with the corresponding + weight. + properties: + matchExpressions: + description: A list of node + selector requirements by + node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node + selector requirements by + node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated + with matching the corresponding + nodeSelectorTerm, in the range + 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of + node selector terms. The terms + are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node + selector requirements by + node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node + selector requirements by + node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling + rules (e.g. co-locate this pod in the + same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of + the matched WeightedPodAffinityTerm + fields are added per-node to find + the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity + term, associated with the corresponding + weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key + is the label key + that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key + is the label key + that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity + scheduling rules (e.g. avoid putting this + pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of + the matched WeightedPodAffinityTerm + fields are added per-node to find + the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity + term, associated with the corresponding + weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key + is the label key + that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key + is the label key + that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeName: + description: |- + NodeName is a request to schedule this Pod onto a specific node. If it is non-empty, + the scheduler simply schedules this Pod onto that node, assuming that it fits resource + requirements. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the Pod to fit on a node. + Selector which must match a node's labels for the Pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + schedulerName: + description: |- + If specified, the Pod will be dispatched by specified scheduler. + If not specified, the Pod will be dispatched by default scheduler. + type: string + tolerations: + description: |- + Allows Pods to be scheduled onto nodes with matching taints. + Each toleration in the array allows the Pod to tolerate node taints based on + specified `key`, `value`, `effect`, and `operator`. + + + - The `key`, `value`, and `effect` identify the taint that the toleration matches. + - The `operator` determines how the toleration matches the taint. + + + Pods with matching tolerations are allowed to be scheduled on tainted nodes, typically reserved for specific purposes. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of Pods ought to spread across topology + domains. Scheduler will schedule Pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies + how to spread matching pods among the given + topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + + + This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + serviceVersion: + description: |- + ServiceVersion specifies the version of the Service expected to be provisioned by this InstanceTemplate. + The version should follow the syntax and semantics of the "Semantic Versioning" specification (http://semver.org/). + maxLength: 32 + type: string + volumeClaimTemplates: + description: Specifies an override for the storage + requirements of the instances. + items: + properties: + annotations: + additionalProperties: + type: string + description: Specifies the annotations for + the PVC of the volume. + type: object + labels: + additionalProperties: + type: string + description: Specifies the labels for the + PVC of the volume. + type: object + name: + description: |- + Refers to the name of a volumeMount defined in either: + + + - `componentDefinition.spec.runtime.containers[*].volumeMounts` + + + The value of `name` must match the `name` field of a volumeMount specified in the corresponding `volumeMounts` array. + type: string + persistentVolumeClaimName: + description: |- + Specifies the prefix of the PVC name for the volume. + + + For each replica, the final name of the PVC will be in format: - + type: string + spec: + description: |- + Defines the desired characteristics of a PersistentVolumeClaim that will be created for the volume + with the mount name specified in the `name` field. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query + over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding + reference to the PersistentVolume backing + this claim. + type: string + type: object + required: + - name + type: object + type: array + required: + - name + type: object + type: array + labels: + additionalProperties: + type: string + description: Specifies Labels to override or add for underlying + Pods, PVCs, Account & TLS Secrets, Services Owned by + Component. + type: object + name: + description: |- + The unique name of this ShardTemplate. + + + The name can't be empty. + maxLength: 15 + pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: string + replicas: + default: 1 + description: Specifies the desired number of replicas + for the shard which are created from this template. + format: int32 + minimum: 0 + type: integer + resources: + description: Specifies an override for the resource requirements + of the shard. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + schedulingPolicy: + description: |- + Specifies the scheduling policy for the shard. + If defined, it will overwrite the scheduling policy defined in ClusterSpec and/or default template. + properties: + affinity: + description: Specifies a group of affinity scheduling + rules of the Cluster, including NodeAffinity, PodAffinity, + and PodAntiAffinity. + properties: + nodeAffinity: + description: Describes node affinity scheduling + rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key + that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key + that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with + matching the corresponding nodeSelectorTerm, + in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node + selector terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key + that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key + that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling + rules (e.g. co-locate this pod in the same node, + zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added + per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity + term, associated with the corresponding + weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling + rules (e.g. avoid putting this pod in the same + node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added + per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity + term, associated with the corresponding + weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeName: + description: |- + NodeName is a request to schedule this Pod onto a specific node. If it is non-empty, + the scheduler simply schedules this Pod onto that node, assuming that it fits resource + requirements. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the Pod to fit on a node. + Selector which must match a node's labels for the Pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + schedulerName: + description: |- + If specified, the Pod will be dispatched by specified scheduler. + If not specified, the Pod will be dispatched by default scheduler. + type: string + tolerations: + description: |- + Allows Pods to be scheduled onto nodes with matching taints. + Each toleration in the array allows the Pod to tolerate node taints based on + specified `key`, `value`, `effect`, and `operator`. + + + - The `key`, `value`, and `effect` identify the taint that the toleration matches. + - The `operator` determines how the toleration matches the taint. + + + Pods with matching tolerations are allowed to be scheduled on tainted nodes, typically reserved for specific purposes. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of Pods ought to spread across topology + domains. Scheduler will schedule Pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies + how to spread matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + + + This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + serviceVersion: + description: |- + ServiceVersion specifies the version of the Service expected to be provisioned by this template. + The version should follow the syntax and semantics of the "Semantic Versioning" specification (http://semver.org/). + maxLength: 32 + type: string + shardIDs: + description: Specifies the shard IDs to take over from + the existing shards. + items: + type: string + type: array + shardingDef: + description: |- + Specifies the ShardingDefinition custom resource (CR) that defines the sharding's characteristics and behavior. + + + The full name or regular expression is supported to match the ShardingDefinition. + maxLength: 64 + type: string + shards: + description: The number of shards to create from this + ShardTemplate. + format: int32 + maximum: 2048 + minimum: 0 + type: integer + volumeClaimTemplates: + description: Specifies an override for the storage requirements + of the shard. + items: + properties: + annotations: + additionalProperties: + type: string + description: Specifies the annotations for the PVC + of the volume. + type: object + labels: + additionalProperties: + type: string + description: Specifies the labels for the PVC of + the volume. + type: object + name: + description: |- + Refers to the name of a volumeMount defined in either: + + + - `componentDefinition.spec.runtime.containers[*].volumeMounts` + + + The value of `name` must match the `name` field of a volumeMount specified in the corresponding `volumeMounts` array. + type: string + persistentVolumeClaimName: + description: |- + Specifies the prefix of the PVC name for the volume. + + + For each replica, the final name of the PVC will be in format: - + type: string + spec: + description: |- + Defines the desired characteristics of a PersistentVolumeClaim that will be created for the volume + with the mount name specified in the `name` field. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - name + type: object + type: array + required: + - name + type: object + type: array shardingDef: description: |- Specifies the ShardingDefinition custom resource (CR) that defines the sharding's characteristics and behavior. @@ -7866,7 +11239,7 @@ spec: type: integer template: description: |- - The template for generating Components for shards, where each shard consists of one Component. + The default template for generating Components for shards, where each shard consists of one Component. This field is of type ClusterComponentSpec, which encapsulates all the required details and @@ -13961,7 +17334,6 @@ spec: type: object required: - name - - template type: object maxItems: 128 minItems: 1 diff --git a/controllers/apps/cluster/cluster_plan_builder.go b/controllers/apps/cluster/cluster_plan_builder.go index f1cf8aea48f..9a2150d0454 100644 --- a/controllers/apps/cluster/cluster_plan_builder.go +++ b/controllers/apps/cluster/cluster_plan_builder.go @@ -60,7 +60,8 @@ type clusterTransformContext struct { components []*appsv1.ClusterComponentSpec shardings []*appsv1.ClusterSharding - shardingComps map[string][]*appsv1.ClusterComponentSpec // comp specs for each sharding + shardingComps map[string][]*appsv1.ClusterComponentSpec // comp specs for each sharding + shardingCompsWithTpl map[string]map[string][]*appsv1.ClusterComponentSpec // TODO: remove this, annotations to be added to components for sharding, mapping with @allComps. annotations map[string]map[string]string diff --git a/controllers/apps/cluster/transformer_cluster_component.go b/controllers/apps/cluster/transformer_cluster_component.go index 16f1855f87d..564b5f84ed4 100644 --- a/controllers/apps/cluster/transformer_cluster_component.go +++ b/controllers/apps/cluster/transformer_cluster_component.go @@ -42,6 +42,7 @@ import ( "github.com/apecloud/kubeblocks/pkg/controller/component" "github.com/apecloud/kubeblocks/pkg/controller/graph" "github.com/apecloud/kubeblocks/pkg/controller/model" + "github.com/apecloud/kubeblocks/pkg/controller/sharding" ictrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" "github.com/apecloud/kubeblocks/pkg/generics" ) @@ -493,7 +494,7 @@ func (c *notExistPrecondition) compExist(transCtx *clusterTransformContext, dag func (c *notExistPrecondition) shardingExist(transCtx *clusterTransformContext, dag *graph.DAG, name string) (bool, error) { list := func() (bool, error) { - comps, err := ictrlutil.ListShardingComponents(transCtx.Context, transCtx.Client, transCtx.Cluster, name) + comps, err := sharding.ListShardingComponents(transCtx.Context, transCtx.Client, transCtx.Cluster, name) if err != nil { return false, err } @@ -608,7 +609,7 @@ func (c *phasePrecondition) shardingMatch(transCtx *clusterTransformContext, dag return false, fmt.Errorf("cluster sharding %s not found", name) } - comps, err := ictrlutil.ListShardingComponents(transCtx.Context, transCtx.Client, transCtx.Cluster, name) + comps, err := sharding.ListShardingComponents(transCtx.Context, transCtx.Client, transCtx.Cluster, name) if err != nil { return false, err } @@ -827,7 +828,7 @@ func (h *clusterShardingHandler) create(transCtx *clusterTransformContext, dag * // delete handles the sharding component deletion when cluster is Deleting func (h *clusterShardingHandler) delete(transCtx *clusterTransformContext, dag *graph.DAG, name string) error { - runningComps, err := ictrlutil.ListShardingComponents(transCtx.Context, transCtx.Client, transCtx.Cluster, name) + runningComps, err := sharding.ListShardingComponents(transCtx.Context, transCtx.Client, transCtx.Cluster, name) if err != nil { return err } @@ -860,7 +861,7 @@ func (h *clusterShardingHandler) deleteComp(transCtx *clusterTransformContext, } func (h *clusterShardingHandler) update(transCtx *clusterTransformContext, dag *graph.DAG, name string) error { - runningComps, err1 := ictrlutil.ListShardingComponents(transCtx.Context, transCtx.Client, transCtx.Cluster, name) + runningComps, err1 := sharding.ListShardingComponents(transCtx.Context, transCtx.Client, transCtx.Cluster, name) if err1 != nil { return err1 } @@ -934,28 +935,33 @@ func (h *clusterShardingHandler) protoComps(transCtx *clusterTransformContext, n func (h *clusterShardingHandler) buildComps(transCtx *clusterTransformContext, sharding *appsv1.ClusterSharding, running *appsv1.Component) ([]*appsv1.Component, error) { objs := make([]*appsv1.Component, 0) - shardingComps := transCtx.shardingComps[sharding.Name] - for i := range shardingComps { - spec := shardingComps[i] - labels := h.buildLabels(sharding) - annotations := h.buildAnnotations(transCtx, sharding.Name, spec.Name) - obj, err := buildComponentWrapper(transCtx, spec, labels, annotations, running) - if err != nil { - return nil, err + shardingComps := transCtx.shardingCompsWithTpl[sharding.Name] + for tplName, tplComps := range shardingComps { + for i := range tplComps { + spec := shardingComps[tplName][i] + labels := h.buildLabels(sharding, tplName) + annotations := h.buildAnnotations(transCtx, sharding.Name, spec.Name) + obj, err := buildComponentWrapper(transCtx, spec, labels, annotations, running) + if err != nil { + return nil, err + } + h.buildShardPodAntiAffinity(transCtx, sharding.Name, spec.Name, obj) + objs = append(objs, obj) } - h.buildShardPodAntiAffinity(transCtx, sharding.Name, spec.Name, obj) - objs = append(objs, obj) } return objs, nil } -func (h *clusterShardingHandler) buildLabels(sharding *appsv1.ClusterSharding) map[string]string { +func (h *clusterShardingHandler) buildLabels(sharding *appsv1.ClusterSharding, shardTplName string) map[string]string { labels := map[string]string{ constant.KBAppShardingNameLabelKey: sharding.Name, } if len(sharding.ShardingDef) > 0 { labels[constant.ShardingDefLabelKey] = sharding.ShardingDef } + if len(shardTplName) > 0 { + labels[constant.KBAppShardTemplateLabelKey] = shardTplName + } return labels } diff --git a/controllers/apps/cluster/transformer_cluster_component_status_test.go b/controllers/apps/cluster/transformer_cluster_component_status_test.go index ae481e329c5..e9945cd78d2 100644 --- a/controllers/apps/cluster/transformer_cluster_component_status_test.go +++ b/controllers/apps/cluster/transformer_cluster_component_status_test.go @@ -59,7 +59,7 @@ var _ = Describe("cluster component status transformer", func() { transCtx.components, transCtx.shardings, err = transformer.resolveCompsNShardingsFromSpecified(transCtx, cluster) Expect(err).Should(BeNil()) - transCtx.shardingComps, err = transformer.buildShardingComps(transCtx) + transCtx.shardingComps, transCtx.shardingCompsWithTpl, err = transformer.buildShardingComps(transCtx) Expect(err).Should(BeNil()) } diff --git a/controllers/apps/cluster/transformer_cluster_component_test.go b/controllers/apps/cluster/transformer_cluster_component_test.go index 2cb58a73865..0946e2f0509 100644 --- a/controllers/apps/cluster/transformer_cluster_component_test.go +++ b/controllers/apps/cluster/transformer_cluster_component_test.go @@ -416,7 +416,7 @@ var _ = Describe("cluster component transformer test", func() { transCtx.components, transCtx.shardings, err = transformer.resolveCompsNShardingsFromTopology(clusterDef, cluster) Expect(err).Should(BeNil()) - transCtx.shardingComps, err = transformer.buildShardingComps(transCtx) + transCtx.shardingComps, transCtx.shardingCompsWithTpl, err = transformer.buildShardingComps(transCtx) Expect(err).Should(BeNil()) } diff --git a/controllers/apps/cluster/transformer_cluster_normalization.go b/controllers/apps/cluster/transformer_cluster_normalization.go index 7c90d1b39c0..d7171832e78 100644 --- a/controllers/apps/cluster/transformer_cluster_normalization.go +++ b/controllers/apps/cluster/transformer_cluster_normalization.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/version" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" @@ -37,6 +38,7 @@ import ( "github.com/apecloud/kubeblocks/pkg/constant" "github.com/apecloud/kubeblocks/pkg/controller/component" "github.com/apecloud/kubeblocks/pkg/controller/graph" + "github.com/apecloud/kubeblocks/pkg/controller/sharding" "github.com/apecloud/kubeblocks/pkg/controllerutil" ) @@ -78,7 +80,7 @@ func (t *clusterNormalizationTransformer) Transform(ctx graph.TransformContext, } // build component specs for shardings after resolving definitions - transCtx.shardingComps, err = t.buildShardingComps(transCtx) + transCtx.shardingComps, transCtx.shardingCompsWithTpl, err = t.buildShardingComps(transCtx) if err != nil { return err } @@ -218,60 +220,122 @@ func (t *clusterNormalizationTransformer) resolveCompsNShardingsFromSpecified(tr } func (t *clusterNormalizationTransformer) resolveDefinitions4Shardings(transCtx *clusterTransformContext) error { - if len(transCtx.shardings) != 0 { - transCtx.shardingDefs = make(map[string]*appsv1.ShardingDefinition) - if transCtx.componentDefs == nil { - transCtx.componentDefs = make(map[string]*appsv1.ComponentDefinition) - } - for i, sharding := range transCtx.shardings { - shardingDef, compDef, serviceVersion, err := t.resolveShardingNCompDefinition(transCtx, sharding) - if err != nil { - return err - } - if shardingDef != nil { - transCtx.shardingDefs[shardingDef.Name] = shardingDef - // set the shardingDef as resolved - transCtx.shardings[i].ShardingDef = shardingDef.Name - } - transCtx.componentDefs[compDef.Name] = compDef - // set the componentDef and serviceVersion of template as resolved - transCtx.shardings[i].Template.ComponentDef = compDef.Name - transCtx.shardings[i].Template.ServiceVersion = serviceVersion + transCtx.shardingDefs = make(map[string]*appsv1.ShardingDefinition) + if transCtx.componentDefs == nil { + transCtx.componentDefs = make(map[string]*appsv1.ComponentDefinition) + } + for i := range transCtx.shardings { + shardingDefs, compDefs, err := t.resolveDefinitions4Sharding(transCtx, transCtx.shardings[i]) + if err != nil { + return err + } + for j := range shardingDefs { + transCtx.shardingDefs[shardingDefs[j].Name] = shardingDefs[j] + } + for j := range compDefs { + transCtx.componentDefs[compDefs[j].Name] = compDefs[j] } } return nil } -func (t *clusterNormalizationTransformer) resolveShardingNCompDefinition(transCtx *clusterTransformContext, - sharding *appsv1.ClusterSharding) (*appsv1.ShardingDefinition, *appsv1.ComponentDefinition, string, error) { - comp, err := t.firstShardingComponent(transCtx, sharding) +func (t *clusterNormalizationTransformer) resolveDefinitions4Sharding(transCtx *clusterTransformContext, + sharding *appsv1.ClusterSharding) ([]*appsv1.ShardingDefinition, []*appsv1.ComponentDefinition, error) { + shardTemplate := func(tpl appsv1.ShardTemplate) (string, *appsv1.ClusterComponentSpec) { + shardingDefName := sharding.ShardingDef + if tpl.ShardingDef != nil { + shardingDefName = *tpl.ShardingDef + } + spec := sharding.Template.DeepCopy() + if tpl.ServiceVersion != nil || tpl.CompDef != nil { + spec.ComponentDef = ptr.Deref(tpl.CompDef, "") + spec.ServiceVersion = ptr.Deref(tpl.ServiceVersion, "") + } + for _, inst := range tpl.Instances { + spec.Instances = append(spec.Instances, appsv1.InstanceTemplate{ + Name: inst.Name, + CompDef: inst.CompDef, + ServiceVersion: inst.ServiceVersion, + }) + } + return shardingDefName, spec + } + + updateShardTemplate := func(shardingDef *appsv1.ShardingDefinition, spec *appsv1.ClusterComponentSpec, tpl *appsv1.ShardTemplate) { + if shardingDef != nil { + // set the shardingDef as resolved + tpl.ShardingDef = ptr.To(shardingDef.Name) + } + // set the componentDef and serviceVersion of template as resolved + tpl.CompDef = ptr.To(spec.ComponentDef) + tpl.ServiceVersion = ptr.To(spec.ServiceVersion) + for i := range spec.Instances { + tpl.Instances[i].CompDef = spec.Instances[i].CompDef + tpl.Instances[i].ServiceVersion = spec.Instances[i].ServiceVersion + } + } + + shardingDefs := make([]*appsv1.ShardingDefinition, 0) + + // default + shardingDef, compDefs, err := t.resolveDefinitions4ShardTemplate(transCtx, sharding.Name, "", sharding.ShardingDef, &sharding.Template) if err != nil { - return nil, nil, "", err + return nil, nil, err + } + if shardingDef != nil { + // set the shardingDef as resolved + sharding.ShardingDef = shardingDef.Name + shardingDefs = append(shardingDefs, shardingDef) + } + // sharding.Template has been updated + + // templates + for i, tpl := range sharding.ShardTemplates { + shardingDefName, spec := shardTemplate(tpl) + tplShardingDef, tplCompDefs, err := t.resolveDefinitions4ShardTemplate(transCtx, sharding.Name, tpl.Name, shardingDefName, spec) + if err != nil { + return nil, nil, err + } + + updateShardTemplate(tplShardingDef, spec, &sharding.ShardTemplates[i]) + + if tplShardingDef != nil { + shardingDefs = append(shardingDefs, tplShardingDef) + } + compDefs = append(compDefs, tplCompDefs...) + } + + return shardingDefs, compDefs, nil +} + +func (t *clusterNormalizationTransformer) resolveDefinitions4ShardTemplate(transCtx *clusterTransformContext, + shardingName, shardTemplateName, shardingDefName string, spec *appsv1.ClusterComponentSpec) (*appsv1.ShardingDefinition, []*appsv1.ComponentDefinition, error) { + comp, err := t.firstShardingComponent(transCtx, shardingName, shardTemplateName) + if err != nil { + return nil, nil, err } var shardingDef *appsv1.ShardingDefinition - shardingDefName := t.shardingDefinitionName(sharding, comp) + shardingDefName = t.shardingDefinitionName(shardingDefName, comp) if len(shardingDefName) > 0 { shardingDef, err = resolveShardingDefinition(transCtx.Context, transCtx.Client, shardingDefName) if err != nil { - return nil, nil, "", err + return nil, nil, err } - if len(sharding.Template.ComponentDef) == 0 { - sharding.Template.ComponentDef = shardingDef.Spec.Template.CompDef + if len(spec.ComponentDef) == 0 { + spec.ComponentDef = shardingDef.Spec.Template.CompDef } } - spec := sharding.Template - compDef, serviceVersion, err := t.resolveCompDefinitionNServiceVersionWithComp(transCtx, &spec, comp) + compDefs, err := t.resolveDefinitions4ComponentWithObj(transCtx, spec, comp) if err != nil { - return nil, nil, "", err + return nil, nil, err } - return shardingDef, compDef, serviceVersion, err + return shardingDef, compDefs, nil } -func (t *clusterNormalizationTransformer) firstShardingComponent(transCtx *clusterTransformContext, - sharding *appsv1.ClusterSharding) (*appsv1.Component, error) { +func (t *clusterNormalizationTransformer) firstShardingComponent(transCtx *clusterTransformContext, shardingName, shardTemplateName string) (*appsv1.Component, error) { var ( ctx = transCtx.Context cli = transCtx.Client @@ -281,7 +345,10 @@ func (t *clusterNormalizationTransformer) firstShardingComponent(transCtx *clust compList := &appsv1.ComponentList{} ml := client.MatchingLabels{ constant.AppInstanceLabelKey: cluster.Name, - constant.KBAppShardingNameLabelKey: sharding.Name, + constant.KBAppShardingNameLabelKey: shardingName, + } + if len(shardTemplateName) > 0 { + ml[constant.KBAppShardTemplateLabelKey] = shardTemplateName } if err := cli.List(ctx, compList, client.InNamespace(cluster.Namespace), ml, client.Limit(1)); err != nil { return nil, err @@ -292,14 +359,14 @@ func (t *clusterNormalizationTransformer) firstShardingComponent(transCtx *clust return &compList.Items[0], nil } -func (t *clusterNormalizationTransformer) shardingDefinitionName(sharding *appsv1.ClusterSharding, comp *appsv1.Component) string { +func (t *clusterNormalizationTransformer) shardingDefinitionName(defaultShardingDefName string, comp *appsv1.Component) string { if comp != nil { shardingDefName, ok := comp.Labels[constant.ShardingDefLabelKey] if ok { return shardingDefName } } - return sharding.ShardingDef + return defaultShardingDefName } func (t *clusterNormalizationTransformer) resolveDefinitions4Components(transCtx *clusterTransformContext) error { @@ -321,10 +388,9 @@ func (t *clusterNormalizationTransformer) resolveDefinitions4Components(transCtx func (t *clusterNormalizationTransformer) resolveDefinitions4Component(transCtx *clusterTransformContext, compSpec *appsv1.ClusterComponentSpec) ([]*appsv1.ComponentDefinition, error) { var ( - ctx = transCtx.Context - cli = transCtx.Client - cluster = transCtx.Cluster - compDefs = make([]*appsv1.ComponentDefinition, 0) + ctx = transCtx.Context + cli = transCtx.Client + cluster = transCtx.Cluster ) comp := &appsv1.Component{} err := cli.Get(ctx, types.NamespacedName{Namespace: cluster.Namespace, Name: component.FullName(cluster.Name, compSpec.Name)}, comp) @@ -334,6 +400,14 @@ func (t *clusterNormalizationTransformer) resolveDefinitions4Component(transCtx } comp = nil } + return t.resolveDefinitions4ComponentWithObj(transCtx, compSpec, comp) +} + +func (t *clusterNormalizationTransformer) resolveDefinitions4ComponentWithObj(transCtx *clusterTransformContext, + compSpec *appsv1.ClusterComponentSpec, comp *appsv1.Component) ([]*appsv1.ComponentDefinition, error) { + var ( + compDefs = make([]*appsv1.ComponentDefinition, 0) + ) compDef, serviceVersion, err := t.resolveCompDefinitionNServiceVersionWithComp(transCtx, compSpec, comp) if err != nil { @@ -410,16 +484,23 @@ func (t *clusterNormalizationTransformer) checkTemplateUpgrade(serviceVersion, c return serviceVersion != runningTpl.ServiceVersion || compDefName != runningTpl.CompDef } -func (t *clusterNormalizationTransformer) buildShardingComps(transCtx *clusterTransformContext) (map[string][]*appsv1.ClusterComponentSpec, error) { +func (t *clusterNormalizationTransformer) buildShardingComps(transCtx *clusterTransformContext) (map[string][]*appsv1.ClusterComponentSpec, map[string]map[string][]*appsv1.ClusterComponentSpec, error) { + cluster := transCtx.Cluster shardingComps := make(map[string][]*appsv1.ClusterComponentSpec) - for _, sharding := range transCtx.shardings { - comps, err := controllerutil.GenShardingCompSpecList(transCtx.Context, transCtx.Client, transCtx.Cluster, sharding) + shardingCompsWithTpl := make(map[string]map[string][]*appsv1.ClusterComponentSpec) + for _, spec := range transCtx.shardings { + tplComps, err := sharding.BuildShardingCompSpecs(transCtx.Context, transCtx.Client, cluster.Namespace, cluster.Name, spec) if err != nil { - return nil, err + return nil, nil, err + } + shardingCompsWithTpl[spec.Name] = tplComps + for tpl, comps := range tplComps { + if len(comps) > 0 { + shardingComps[spec.Name] = append(shardingComps[spec.Name], tplComps[tpl]...) + } } - shardingComps[sharding.Name] = comps } - return shardingComps, nil + return shardingComps, shardingCompsWithTpl, nil } func (t *clusterNormalizationTransformer) postcheck(transCtx *clusterTransformContext) error { diff --git a/controllers/apps/cluster/transformer_cluster_restore.go b/controllers/apps/cluster/transformer_cluster_restore.go index 94665b4f1dd..491638a0711 100644 --- a/controllers/apps/cluster/transformer_cluster_restore.go +++ b/controllers/apps/cluster/transformer_cluster_restore.go @@ -29,6 +29,7 @@ import ( "github.com/apecloud/kubeblocks/pkg/controller/graph" "github.com/apecloud/kubeblocks/pkg/controller/model" "github.com/apecloud/kubeblocks/pkg/controller/plan" + "github.com/apecloud/kubeblocks/pkg/controller/sharding" intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" ) @@ -67,7 +68,7 @@ func (c *clusterRestoreTransformer) Transform(ctx graph.TransformContext, dag *g `the source targets count of the backup "%s" must be equal to or greater than the count of the shard components "%s"`, backup.Name, spec.Name) } - shardComponents, err := intctrlutil.ListShardingComponents(c.Context, c.Client, c.Cluster, spec.Name) + shardComponents, err := sharding.ListShardingComponents(c.Context, c.Client, c.Cluster, spec.Name) if err != nil { return err } diff --git a/controllers/dataprotection/backuppolicydriver_controller.go b/controllers/dataprotection/backuppolicydriver_controller.go index 152b992ca4e..2112aa957cb 100644 --- a/controllers/dataprotection/backuppolicydriver_controller.go +++ b/controllers/dataprotection/backuppolicydriver_controller.go @@ -39,6 +39,7 @@ import ( "github.com/apecloud/kubeblocks/pkg/constant" "github.com/apecloud/kubeblocks/pkg/controller/component" "github.com/apecloud/kubeblocks/pkg/controller/model" + "github.com/apecloud/kubeblocks/pkg/controller/sharding" intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" dptypes "github.com/apecloud/kubeblocks/pkg/dataprotection/types" dputils "github.com/apecloud/kubeblocks/pkg/dataprotection/utils" @@ -456,7 +457,7 @@ func (r *backupPolicyAndScheduleBuilder) matchMappingName(names []string, target } func (r *backupPolicyAndScheduleBuilder) buildBackupTargets(targets []dpv1alpha1.BackupTarget) []dpv1alpha1.BackupTarget { - shardComponents, _ := intctrlutil.ListShardingComponents(r.Context, r.Client, r.Cluster, r.componentName) + shardComponents, _ := sharding.ListShardingComponents(r.Context, r.Client, r.Cluster, r.componentName) sourceTargetMap := map[string]*dpv1alpha1.BackupTarget{} for i := range targets { sourceTargetMap[targets[i].Name] = &targets[i] diff --git a/controllers/dataprotection/backuppolicydriver_controller_test.go b/controllers/dataprotection/backuppolicydriver_controller_test.go index bea02fc603c..08b55413edc 100644 --- a/controllers/dataprotection/backuppolicydriver_controller_test.go +++ b/controllers/dataprotection/backuppolicydriver_controller_test.go @@ -166,6 +166,7 @@ var _ = Describe("BackupPolicyDriver Controller test", func() { testapps.NewComponentFactory(testCtx.DefaultNamespace, clusterObj.Name+"-"+defaultShardingCompName, ""). WithRandomName(). AddAnnotations(constant.KBAppClusterUIDKey, string(clusterObj.UID)). + AddLabels(constant.AppManagedByLabelKey, constant.AppName). AddLabels(constant.AppInstanceLabelKey, clusterObj.Name). AddLabels(constant.KBAppShardingNameLabelKey, defaultShardingCompName). SetReplicas(1). @@ -174,6 +175,7 @@ var _ = Describe("BackupPolicyDriver Controller test", func() { testapps.NewComponentFactory(testCtx.DefaultNamespace, clusterObj.Name+"-"+defaultShardingCompName, ""). WithRandomName(). AddAnnotations(constant.KBAppClusterUIDKey, string(clusterObj.UID)). + AddLabels(constant.AppManagedByLabelKey, constant.AppName). AddLabels(constant.AppInstanceLabelKey, clusterObj.Name). AddLabels(constant.KBAppShardingNameLabelKey, defaultShardingCompName). SetReplicas(1). @@ -182,6 +184,7 @@ var _ = Describe("BackupPolicyDriver Controller test", func() { testapps.NewComponentFactory(testCtx.DefaultNamespace, clusterObj.Name+"-"+defaultShardingCompName, ""). WithRandomName(). AddAnnotations(constant.KBAppClusterUIDKey, string(clusterObj.UID)). + AddLabels(constant.AppManagedByLabelKey, constant.AppName). AddLabels(constant.AppInstanceLabelKey, clusterObj.Name). AddLabels(constant.KBAppShardingNameLabelKey, defaultShardingCompName). SetReplicas(1). diff --git a/controllers/parameters/parameter_controller.go b/controllers/parameters/parameter_controller.go index dae582b5b07..4fc6a8e61b9 100644 --- a/controllers/parameters/parameter_controller.go +++ b/controllers/parameters/parameter_controller.go @@ -39,6 +39,7 @@ import ( "github.com/apecloud/kubeblocks/pkg/constant" "github.com/apecloud/kubeblocks/pkg/controller/component" "github.com/apecloud/kubeblocks/pkg/controller/render" + "github.com/apecloud/kubeblocks/pkg/controller/sharding" intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" "github.com/apecloud/kubeblocks/pkg/generics" ) @@ -298,7 +299,7 @@ func resolveComponents(ctx context.Context, reader client.Reader, cluster *appsv if shardingComp == nil { return nil, intctrlutil.NewErrorf(intctrlutil.ErrorTypeFatal, `component not found: %s`, componentName) } - components, err := intctrlutil.ListShardingComponents(ctx, reader, cluster, componentName) + components, err := sharding.ListShardingComponents(ctx, reader, cluster, componentName) if err != nil { return nil, err } diff --git a/controllers/parameters/parameter_controller_test.go b/controllers/parameters/parameter_controller_test.go index bdc28dfdf9f..4f360c17c9d 100644 --- a/controllers/parameters/parameter_controller_test.go +++ b/controllers/parameters/parameter_controller_test.go @@ -34,6 +34,7 @@ import ( configcore "github.com/apecloud/kubeblocks/pkg/configuration/core" "github.com/apecloud/kubeblocks/pkg/constant" "github.com/apecloud/kubeblocks/pkg/controller/component" + "github.com/apecloud/kubeblocks/pkg/controller/sharding" intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" testapps "github.com/apecloud/kubeblocks/pkg/testutil/apps" testparameters "github.com/apecloud/kubeblocks/pkg/testutil/parameters" @@ -183,7 +184,7 @@ var _ = Describe("Parameter Controller", func() { prepareTestEnv() By("Create sharding component objs") - shardingCompSpecList, err := intctrlutil.GenShardingCompSpecList(testCtx.Ctx, k8sClient, clusterObj, &clusterObj.Spec.Shardings[0]) + shardingCompSpecList, err := sharding.GenShardingCompSpecList4Test(testCtx.Ctx, k8sClient, clusterObj, &clusterObj.Spec.Shardings[0]) Expect(err).ShouldNot(HaveOccurred()) for _, spec := range shardingCompSpecList { shardingLabels := map[string]string{ diff --git a/controllers/parameters/parametertemplateextension_controller_test.go b/controllers/parameters/parametertemplateextension_controller_test.go index ade9129fc3d..915a5855b9d 100644 --- a/controllers/parameters/parametertemplateextension_controller_test.go +++ b/controllers/parameters/parametertemplateextension_controller_test.go @@ -33,7 +33,7 @@ import ( configcore "github.com/apecloud/kubeblocks/pkg/configuration/core" "github.com/apecloud/kubeblocks/pkg/constant" "github.com/apecloud/kubeblocks/pkg/controller/component" - intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" + "github.com/apecloud/kubeblocks/pkg/controller/sharding" testapps "github.com/apecloud/kubeblocks/pkg/testutil/apps" ) @@ -80,7 +80,7 @@ var _ = Describe("ParameterExtension Controller", func() { _, _, clusterObj, _, _ := mockReconcileResource() By("Create sharding component objs") - shardingCompSpecList, err := intctrlutil.GenShardingCompSpecList(testCtx.Ctx, k8sClient, clusterObj, &clusterObj.Spec.Shardings[0]) + shardingCompSpecList, err := sharding.GenShardingCompSpecList4Test(testCtx.Ctx, k8sClient, clusterObj, &clusterObj.Spec.Shardings[0]) Expect(err).ShouldNot(HaveOccurred()) for _, spec := range shardingCompSpecList { shardingLabels := map[string]string{ diff --git a/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml b/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml index 21c212f0ad3..79030d37f81 100644 --- a/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml +++ b/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml @@ -7828,7 +7828,7 @@ spec: pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ type: string x-kubernetes-validations: - - message: name is immutable + - message: the name is immutable rule: self == oldSelf offline: description: Specifies the names of shards (components) to be @@ -7836,6 +7836,3379 @@ spec: items: type: string type: array + shardTemplates: + description: |- + Specifies a list of heterogeneous shard templates, allowing different groups of shards + to be created with distinct configurations. + items: + properties: + annotations: + additionalProperties: + type: string + description: Specifies Annotations to override or add + for underlying Pods, PVCs, Account & TLS Secrets, Services + Owned by Component. + type: object + compDef: + description: Specifies the name of the referenced ComponentDefinition. + maxLength: 64 + type: string + env: + description: |- + Defines Env to override. + Add new or override existing envs. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + flatInstanceOrdinal: + description: Specifies an override for the instance naming + of the shard. + type: boolean + instances: + description: Specifies an override for the custom instances + of the shard. + items: + description: InstanceTemplate allows customization of + individual replica configurations in a Component. + properties: + annotations: + additionalProperties: + type: string + description: |- + Specifies a map of key-value pairs to be merged into the Pod's existing annotations. + Existing keys will have their values overwritten, while new keys will be added to the annotations. + type: object + canary: + description: Indicate whether the instances belonging + to this template are canary instances. + type: boolean + compDef: + description: Specifies the name of the referenced + ComponentDefinition. + maxLength: 64 + type: string + env: + description: |- + Defines Env to override. + Add new or override existing envs. + items: + description: EnvVar represents an environment + variable present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret + in the pod's namespace + properties: + key: + description: The key of the secret + to select from. Must be a valid + secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + labels: + additionalProperties: + type: string + description: |- + Specifies a map of key-value pairs that will be merged into the Pod's existing labels. + Values for existing keys will be overwritten, and new keys will be added. + type: object + name: + description: |- + Name specifies the unique name of the instance Pod created using this InstanceTemplate. + This name is constructed by concatenating the Component's name, the template's name, and the instance's ordinal + using the pattern: $(cluster.name)-$(component.name)-$(template.name)-$(ordinal). Ordinals start from 0. + The name can't be empty. + maxLength: 54 + pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: string + ordinals: + description: |- + Specifies the desired Ordinals of this InstanceTemplate. + The Ordinals used to specify the ordinal of the instance (pod) names to be generated under this InstanceTemplate. + If Ordinals are defined, their number must be equal to or more than the corresponding replicas. + + + For example, if Ordinals is {ranges: [{start: 0, end: 1}], discrete: [7]}, + then the instance names generated under this InstanceTemplate would be + $(cluster.name)-$(component.name)-$(template.name)-0、$(cluster.name)-$(component.name)-$(template.name)-1 and + $(cluster.name)-$(component.name)-$(template.name)-7 + properties: + discrete: + items: + format: int32 + type: integer + type: array + ranges: + items: + description: |- + Range represents a range with a start and an end value. Both start and end are included. + It is used to define a continuous segment. + properties: + end: + format: int32 + type: integer + start: + format: int32 + type: integer + required: + - end + - start + type: object + type: array + type: object + replicas: + default: 1 + description: |- + Specifies the number of instances (Pods) to create from this InstanceTemplate. + This field allows setting how many replicated instances of the Component, + with the specific overrides in the InstanceTemplate, are created. + The default value is 1. A value of 0 disables instance creation. + format: int32 + minimum: 0 + type: integer + resources: + description: |- + Specifies an override for the resource requirements of the first container in the Pod. + This field allows for customizing resource allocation (CPU, memory, etc.) for the container. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one + entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + schedulingPolicy: + description: |- + Specifies the scheduling policy for the instance. + If defined, it will overwrite the scheduling policy defined in ClusterSpec and/or ClusterComponentSpec. + properties: + affinity: + description: Specifies a group of affinity scheduling + rules of the Cluster, including NodeAffinity, + PodAffinity, and PodAntiAffinity. + properties: + nodeAffinity: + description: Describes node affinity scheduling + rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, + associated with the corresponding + weight. + properties: + matchExpressions: + description: A list of node + selector requirements by + node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node + selector requirements by + node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated + with matching the corresponding + nodeSelectorTerm, in the range + 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of + node selector terms. The terms + are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node + selector requirements by + node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node + selector requirements by + node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling + rules (e.g. co-locate this pod in the + same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of + the matched WeightedPodAffinityTerm + fields are added per-node to find + the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity + term, associated with the corresponding + weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key + is the label key + that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key + is the label key + that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity + scheduling rules (e.g. avoid putting this + pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of + the matched WeightedPodAffinityTerm + fields are added per-node to find + the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity + term, associated with the corresponding + weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key + is the label key + that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key + is the label key + that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeName: + description: |- + NodeName is a request to schedule this Pod onto a specific node. If it is non-empty, + the scheduler simply schedules this Pod onto that node, assuming that it fits resource + requirements. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the Pod to fit on a node. + Selector which must match a node's labels for the Pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + schedulerName: + description: |- + If specified, the Pod will be dispatched by specified scheduler. + If not specified, the Pod will be dispatched by default scheduler. + type: string + tolerations: + description: |- + Allows Pods to be scheduled onto nodes with matching taints. + Each toleration in the array allows the Pod to tolerate node taints based on + specified `key`, `value`, `effect`, and `operator`. + + + - The `key`, `value`, and `effect` identify the taint that the toleration matches. + - The `operator` determines how the toleration matches the taint. + + + Pods with matching tolerations are allowed to be scheduled on tainted nodes, typically reserved for specific purposes. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of Pods ought to spread across topology + domains. Scheduler will schedule Pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies + how to spread matching pods among the given + topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + + + This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + serviceVersion: + description: |- + ServiceVersion specifies the version of the Service expected to be provisioned by this InstanceTemplate. + The version should follow the syntax and semantics of the "Semantic Versioning" specification (http://semver.org/). + maxLength: 32 + type: string + volumeClaimTemplates: + description: Specifies an override for the storage + requirements of the instances. + items: + properties: + annotations: + additionalProperties: + type: string + description: Specifies the annotations for + the PVC of the volume. + type: object + labels: + additionalProperties: + type: string + description: Specifies the labels for the + PVC of the volume. + type: object + name: + description: |- + Refers to the name of a volumeMount defined in either: + + + - `componentDefinition.spec.runtime.containers[*].volumeMounts` + + + The value of `name` must match the `name` field of a volumeMount specified in the corresponding `volumeMounts` array. + type: string + persistentVolumeClaimName: + description: |- + Specifies the prefix of the PVC name for the volume. + + + For each replica, the final name of the PVC will be in format: - + type: string + spec: + description: |- + Defines the desired characteristics of a PersistentVolumeClaim that will be created for the volume + with the mount name specified in the `name` field. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query + over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding + reference to the PersistentVolume backing + this claim. + type: string + type: object + required: + - name + type: object + type: array + required: + - name + type: object + type: array + labels: + additionalProperties: + type: string + description: Specifies Labels to override or add for underlying + Pods, PVCs, Account & TLS Secrets, Services Owned by + Component. + type: object + name: + description: |- + The unique name of this ShardTemplate. + + + The name can't be empty. + maxLength: 15 + pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ + type: string + replicas: + default: 1 + description: Specifies the desired number of replicas + for the shard which are created from this template. + format: int32 + minimum: 0 + type: integer + resources: + description: Specifies an override for the resource requirements + of the shard. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + schedulingPolicy: + description: |- + Specifies the scheduling policy for the shard. + If defined, it will overwrite the scheduling policy defined in ClusterSpec and/or default template. + properties: + affinity: + description: Specifies a group of affinity scheduling + rules of the Cluster, including NodeAffinity, PodAffinity, + and PodAntiAffinity. + properties: + nodeAffinity: + description: Describes node affinity scheduling + rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key + that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key + that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with + matching the corresponding nodeSelectorTerm, + in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node + selector terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key + that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key + that the selector applies + to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling + rules (e.g. co-locate this pod in the same node, + zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added + per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity + term, associated with the corresponding + weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling + rules (e.g. avoid putting this pod in the same + node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added + per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity + term, associated with the corresponding + weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeName: + description: |- + NodeName is a request to schedule this Pod onto a specific node. If it is non-empty, + the scheduler simply schedules this Pod onto that node, assuming that it fits resource + requirements. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is a selector which must be true for the Pod to fit on a node. + Selector which must match a node's labels for the Pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + type: object + x-kubernetes-map-type: atomic + schedulerName: + description: |- + If specified, the Pod will be dispatched by specified scheduler. + If not specified, the Pod will be dispatched by default scheduler. + type: string + tolerations: + description: |- + Allows Pods to be scheduled onto nodes with matching taints. + Each toleration in the array allows the Pod to tolerate node taints based on + specified `key`, `value`, `effect`, and `operator`. + + + - The `key`, `value`, and `effect` identify the taint that the toleration matches. + - The `operator` determines how the toleration matches the taint. + + + Pods with matching tolerations are allowed to be scheduled on tainted nodes, typically reserved for specific purposes. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: |- + TopologySpreadConstraints describes how a group of Pods ought to spread across topology + domains. Scheduler will schedule Pods in a way which abides by the constraints. + All topologySpreadConstraints are ANDed. + items: + description: TopologySpreadConstraint specifies + how to spread matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + + + This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default). + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + serviceVersion: + description: |- + ServiceVersion specifies the version of the Service expected to be provisioned by this template. + The version should follow the syntax and semantics of the "Semantic Versioning" specification (http://semver.org/). + maxLength: 32 + type: string + shardIDs: + description: Specifies the shard IDs to take over from + the existing shards. + items: + type: string + type: array + shardingDef: + description: |- + Specifies the ShardingDefinition custom resource (CR) that defines the sharding's characteristics and behavior. + + + The full name or regular expression is supported to match the ShardingDefinition. + maxLength: 64 + type: string + shards: + description: The number of shards to create from this + ShardTemplate. + format: int32 + maximum: 2048 + minimum: 0 + type: integer + volumeClaimTemplates: + description: Specifies an override for the storage requirements + of the shard. + items: + properties: + annotations: + additionalProperties: + type: string + description: Specifies the annotations for the PVC + of the volume. + type: object + labels: + additionalProperties: + type: string + description: Specifies the labels for the PVC of + the volume. + type: object + name: + description: |- + Refers to the name of a volumeMount defined in either: + + + - `componentDefinition.spec.runtime.containers[*].volumeMounts` + + + The value of `name` must match the `name` field of a volumeMount specified in the corresponding `volumeMounts` array. + type: string + persistentVolumeClaimName: + description: |- + Specifies the prefix of the PVC name for the volume. + + + For each replica, the final name of the PVC will be in format: - + type: string + spec: + description: |- + Defines the desired characteristics of a PersistentVolumeClaim that will be created for the volume + with the mount name specified in the `name` field. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - name + type: object + type: array + required: + - name + type: object + type: array shardingDef: description: |- Specifies the ShardingDefinition custom resource (CR) that defines the sharding's characteristics and behavior. @@ -7866,7 +11239,7 @@ spec: type: integer template: description: |- - The template for generating Components for shards, where each shard consists of one Component. + The default template for generating Components for shards, where each shard consists of one Component. This field is of type ClusterComponentSpec, which encapsulates all the required details and @@ -13961,7 +17334,6 @@ spec: type: object required: - name - - template type: object maxItems: 128 minItems: 1 diff --git a/docs/developer_docs/api-reference/cluster.md b/docs/developer_docs/api-reference/cluster.md index e87e1245477..ea6b3eb65d1 100644 --- a/docs/developer_docs/api-reference/cluster.md +++ b/docs/developer_docs/api-reference/cluster.md @@ -3725,6 +3725,28 @@ string +shards
+ +int32 + + + +

Specifies the desired number of shards.

+

Users can declare the desired number of shards through this field. +KubeBlocks dynamically creates and deletes Components based on the difference +between the desired and actual number of shards. +KubeBlocks provides lifecycle management for sharding, including:

+
    +
  • Executing the shardProvision Action defined in the ShardingDefinition when the number of shards increases. +This allows for custom actions to be performed after a new shard is provisioned.
  • +
  • Executing the shardTerminate Action defined in the ShardingDefinition when the number of shards decreases. +This enables custom cleanup or data migration tasks to be executed before a shard is terminated. +Resources and data associated with the corresponding Component will also be deleted.
  • +
+ + + + template
@@ -3733,7 +3755,8 @@ ClusterComponentSpec -

The template for generating Components for shards, where each shard consists of one Component.

+(Optional) +

The default template for generating Components for shards, where each shard consists of one Component.

This field is of type ClusterComponentSpec, which encapsulates all the required details and definitions for creating and managing the Components. KubeBlocks uses this template to generate a set of identical Components of shards. @@ -3744,24 +3767,17 @@ enabling sharding and distribution of workloads across Components.

-shards
+shardTemplates
-int32 +
+[]ShardTemplate + -

Specifies the desired number of shards.

-

Users can declare the desired number of shards through this field. -KubeBlocks dynamically creates and deletes Components based on the difference -between the desired and actual number of shards. -KubeBlocks provides lifecycle management for sharding, including:

-
    -
  • Executing the shardProvision Action defined in the ShardingDefinition when the number of shards increases. -This allows for custom actions to be performed after a new shard is provisioned.
  • -
  • Executing the shardTerminate Action defined in the ShardingDefinition when the number of shards decreases. -This enables custom cleanup or data migration tasks to be executed before a shard is terminated. -Resources and data associated with the corresponding Component will also be deleted.
  • -
+(Optional) +

Specifies a list of heterogeneous shard templates, allowing different groups of shards +to be created with distinct configurations.

@@ -7887,7 +7903,7 @@ ContainerVars

InstanceTemplate

-(Appears on:ClusterComponentSpec, ComponentSpec) +(Appears on:ClusterComponentSpec, ComponentSpec, ShardTemplate)

InstanceTemplate allows customization of individual replica configurations in a Component.

@@ -8755,7 +8771,7 @@ and specifies that PersistentVolumeClaims associated with VolumeClaimTemplates w

PersistentVolumeClaimTemplate

-(Appears on:ClusterComponentSpec, ComponentSpec, InstanceTemplate) +(Appears on:ClusterComponentSpec, ComponentSpec, InstanceTemplate, ShardTemplate)

@@ -9669,7 +9685,7 @@ it will be counted towards MaxUnavailable.

SchedulingPolicy

-(Appears on:ClusterComponentSpec, ClusterSpec, ComponentSpec, InstanceTemplate, InstanceTemplate) +(Appears on:ClusterComponentSpec, ClusterSpec, ComponentSpec, InstanceTemplate, ShardTemplate, InstanceTemplate)

SchedulingPolicy defines the scheduling policy for instances.

@@ -11138,6 +11154,217 @@ and the value will be presented in the following format: service1.name:port1,ser +

ShardTemplate +

+

+(Appears on:ClusterSharding) +

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+name
+ +string + +
+

The unique name of this ShardTemplate.

+

The name can’t be empty.

+
+shardingDef
+ +string + +
+(Optional) +

Specifies the ShardingDefinition custom resource (CR) that defines the sharding’s characteristics and behavior.

+

The full name or regular expression is supported to match the ShardingDefinition.

+
+shards
+ +int32 + +
+(Optional) +

The number of shards to create from this ShardTemplate.

+
+shardIDs
+ +[]string + +
+(Optional) +

Specifies the shard IDs to take over from the existing shards.

+
+serviceVersion
+ +string + +
+(Optional) +

ServiceVersion specifies the version of the Service expected to be provisioned by this template. +The version should follow the syntax and semantics of the “Semantic Versioning” specification (http://semver.org/).

+
+compDef
+ +string + +
+(Optional) +

Specifies the name of the referenced ComponentDefinition.

+
+labels
+ +map[string]string + +
+(Optional) +

Specifies Labels to override or add for underlying Pods, PVCs, Account & TLS Secrets, Services Owned by Component.

+
+annotations
+ +map[string]string + +
+(Optional) +

Specifies Annotations to override or add for underlying Pods, PVCs, Account & TLS Secrets, Services Owned by Component.

+
+env
+ + +[]Kubernetes core/v1.EnvVar + + +
+(Optional) +

Defines Env to override. +Add new or override existing envs.

+
+replicas
+ +int32 + +
+(Optional) +

Specifies the desired number of replicas for the shard which are created from this template.

+
+schedulingPolicy
+ + +SchedulingPolicy + + +
+(Optional) +

Specifies the scheduling policy for the shard. +If defined, it will overwrite the scheduling policy defined in ClusterSpec and/or default template.

+
+resources
+ + +Kubernetes core/v1.ResourceRequirements + + +
+(Optional) +

Specifies an override for the resource requirements of the shard.

+
+volumeClaimTemplates
+ + +[]PersistentVolumeClaimTemplate + + +
+(Optional) +

Specifies an override for the storage requirements of the shard.

+
+instances
+ + +[]InstanceTemplate + + +
+(Optional) +

Specifies an override for the custom instances of the shard.

+
+flatInstanceOrdinal
+ +bool + +
+(Optional) +

Specifies an override for the instance naming of the shard.

+

ShardingDefinitionSpec

diff --git a/pkg/common/generate.go b/pkg/common/generate.go deleted file mode 100644 index e05f8e3f3b5..00000000000 --- a/pkg/common/generate.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package common - -import ( - "fmt" - - utilrand "k8s.io/apimachinery/pkg/util/rand" -) - -// NameGenerator generates names for objects. Some backends may have more information -// available to guide selection of new names and this interface hides those details. -type NameGenerator interface { - // GenerateName generates a valid name from the base name, adding a random suffix to the - // the base. If base is valid, the returned name must also be valid. The generator is - // responsible for knowing the maximum valid name length. - GenerateName(base string) string -} - -// simpleNameGenerator generates random names. -type simpleNameGenerator struct{} - -// SimpleNameGenerator is a generator that returns the name plus a random suffix of five alphanumerics -// when a name is requested. The string is guaranteed to not exceed the length of a standard Kubernetes -// name (63 characters) -var SimpleNameGenerator NameGenerator = simpleNameGenerator{} - -const ( - maxNameLength = 63 - randomLength = 3 - MaxGeneratedNameLength = maxNameLength - randomLength -) - -func (simpleNameGenerator) GenerateName(base string) string { - if len(base) > MaxGeneratedNameLength { - base = base[:MaxGeneratedNameLength] - } - return fmt.Sprintf("%s%s", base, utilrand.String(randomLength)) -} diff --git a/pkg/constant/labels.go b/pkg/constant/labels.go index afeb6be84f1..eb550e10b1f 100644 --- a/pkg/constant/labels.go +++ b/pkg/constant/labels.go @@ -44,6 +44,7 @@ const ( KBAppComponentLabelKey = "apps.kubeblocks.io/component-name" KBAppShardingNameLabelKey = "apps.kubeblocks.io/sharding-name" + KBAppShardTemplateLabelKey = "apps.kubeblocks.io/shard-template" KBAppInstanceTemplateLabelKey = "apps.kubeblocks.io/instance-template" PVCNameLabelKey = "apps.kubeblocks.io/pvc-name" VolumeClaimTemplateNameLabelKey = "apps.kubeblocks.io/vct-name" @@ -55,14 +56,14 @@ const ( ) func GetClusterLabels(clusterName string, labels ...map[string]string) map[string]string { - return withShardingNameLabel(map[string]string{ + return withShardingLabels(map[string]string{ AppManagedByLabelKey: AppName, AppInstanceLabelKey: clusterName, }, labels...) } func GetCompLabels(clusterName, compName string, labels ...map[string]string) map[string]string { - return withShardingNameLabel(map[string]string{ + return withShardingLabels(map[string]string{ AppManagedByLabelKey: AppName, AppInstanceLabelKey: clusterName, KBAppComponentLabelKey: compName, @@ -78,15 +79,17 @@ func GetCompLabelsWithDef(clusterName, compName, compDef string, labels ...map[s if len(compDef) > 0 { m[AppComponentLabelKey] = compDef } - return withShardingNameLabel(m, labels...) + return withShardingLabels(m, labels...) } -func withShardingNameLabel(labels map[string]string, extraLabels ...map[string]string) map[string]string { +func withShardingLabels(labels map[string]string, extraLabels ...map[string]string) map[string]string { for _, m := range extraLabels { if m != nil { if v, ok := m[KBAppShardingNameLabelKey]; ok { labels[KBAppShardingNameLabelKey] = v - break + } + if v, ok := m[KBAppShardTemplateLabelKey]; ok { + labels[KBAppShardTemplateLabelKey] = v } } } diff --git a/pkg/constant/pattern.go b/pkg/constant/pattern.go index 511857bbe61..586cb69136e 100644 --- a/pkg/constant/pattern.go +++ b/pkg/constant/pattern.go @@ -92,8 +92,3 @@ func GenerateDefaultRoleName(cmpdName string) string { func GenerateWorkloadNamePattern(clusterName, compName string) string { return fmt.Sprintf("%s-%s", clusterName, compName) } - -// GenerateShardingNamePrefix generates sharding name prefix. -func GenerateShardingNamePrefix(shardingName string) string { - return fmt.Sprintf("%s-", shardingName) -} diff --git a/pkg/controller/configuration/resource_wrapper.go b/pkg/controller/configuration/resource_wrapper.go index 18d186b6498..c29c4d2929e 100644 --- a/pkg/controller/configuration/resource_wrapper.go +++ b/pkg/controller/configuration/resource_wrapper.go @@ -31,7 +31,7 @@ import ( cfgcore "github.com/apecloud/kubeblocks/pkg/configuration/core" "github.com/apecloud/kubeblocks/pkg/constant" "github.com/apecloud/kubeblocks/pkg/controller/render" - intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" + "github.com/apecloud/kubeblocks/pkg/controller/sharding" ) type ResourceFetcher[T any] struct { @@ -103,7 +103,7 @@ func (r *ResourceFetcher[T]) ComponentAndComponentDef() *T { func (r *ResourceFetcher[T]) ComponentSpec() *T { return r.Wrap(func() (err error) { - r.ClusterComObj, err = intctrlutil.GetComponentSpecByName(r.Context, r.Client, r.ClusterObj, r.ComponentName) + r.ClusterComObj, err = r.getComponentSpecByName() if err != nil { return err } @@ -111,6 +111,25 @@ func (r *ResourceFetcher[T]) ComponentSpec() *T { }) } +func (r *ResourceFetcher[T]) getComponentSpecByName() (*appsv1.ClusterComponentSpec, error) { + compSpec := r.ClusterObj.Spec.GetComponentByName(r.ComponentName) + if compSpec != nil { + return compSpec, nil + } + for _, spec := range r.ClusterObj.Spec.Shardings { + shardingCompList, err := sharding.ListShardingCompSpecs(r.Context, r.Client, r.ClusterObj, &spec) + if err != nil { + return nil, err + } + for i, shardingComp := range shardingCompList { + if shardingComp.Name == r.ComponentName { + return shardingCompList[i], nil + } + } + } + return nil, nil +} + func (r *ResourceFetcher[T]) ConfigMap(configSpec string) *T { cmKey := client.ObjectKey{ Name: cfgcore.GetComponentCfgName(r.ClusterName, r.ComponentName, configSpec), diff --git a/pkg/controllerutil/sharding_utils.go b/pkg/controller/sharding/legacy.go similarity index 66% rename from pkg/controllerutil/sharding_utils.go rename to pkg/controller/sharding/legacy.go index 66b562515a3..1038debdd21 100644 --- a/pkg/controllerutil/sharding_utils.go +++ b/pkg/controller/sharding/legacy.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package controllerutil +package sharding import ( "context" @@ -25,24 +25,19 @@ import ( "slices" "strings" + "k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/client" appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" - "github.com/apecloud/kubeblocks/pkg/common" - "github.com/apecloud/kubeblocks/pkg/constant" ) -const ( - GenerateNameMaxRetryTimes = 1000000 -) - -func GenShardingCompSpecList(ctx context.Context, cli client.Reader, +func GenShardingCompSpecList4Test(ctx context.Context, cli client.Reader, cluster *appsv1.Cluster, sharding *appsv1.ClusterSharding) ([]*appsv1.ClusterComponentSpec, error) { offline := make([]string, 0) if sharding != nil && len(sharding.Offline) > 0 { for _, name := range sharding.Offline { - shortName, err := parseCompShortName(cluster.Name, name) + shortName, err := parseCompShortName4Test(cluster.Name, name) if err != nil { return nil, err } @@ -51,11 +46,11 @@ func GenShardingCompSpecList(ctx context.Context, cli client.Reader, } // list undeleted sharding component specs, the deleting ones are not included - undeletedShardingCompSpecs, err := listUndeletedShardingCompSpecs(ctx, cli, cluster, sharding) + undeletedShardingCompSpecs, err := listUndeletedShardingCompSpecs4Test(ctx, cli, cluster, sharding) if err != nil { return nil, err } - shards := removeOfflineShards(undeletedShardingCompSpecs, offline) + shards := removeOfflineShards4Test(undeletedShardingCompSpecs, offline) shardNames := sets.Set[string]{} for _, existShardingCompSpec := range undeletedShardingCompSpecs { @@ -69,7 +64,7 @@ func GenShardingCompSpecList(ctx context.Context, cli client.Reader, return shards, nil case len(shards) < int(sharding.Shards): for i := len(shards); i < int(sharding.Shards); i++ { - name, err := genRandomShardName(sharding.Name, shardNames) + name, err := genRandomShardName4Test(sharding.Name, shardNames) if err != nil { return nil, err } @@ -87,7 +82,20 @@ func GenShardingCompSpecList(ctx context.Context, cli client.Reader, return shards, nil } -func removeOfflineShards(shards []*appsv1.ClusterComponentSpec, offline []string) []*appsv1.ClusterComponentSpec { +func parseCompShortName4Test(clusterName, compName string) (string, error) { + name, found := strings.CutPrefix(compName, fmt.Sprintf("%s-", clusterName)) + if !found { + return "", fmt.Errorf("the component name has no cluster name as prefix: %s", compName) + } + return name, nil +} + +func listUndeletedShardingCompSpecs4Test(ctx context.Context, cli client.Reader, + cluster *appsv1.Cluster, sharding *appsv1.ClusterSharding) ([]*appsv1.ClusterComponentSpec, error) { + return listShardingCompSpecs4Test(ctx, cli, cluster, sharding, false) +} + +func removeOfflineShards4Test(shards []*appsv1.ClusterComponentSpec, offline []string) []*appsv1.ClusterComponentSpec { if len(offline) > 0 { s := sets.New(offline...) return slices.DeleteFunc(shards, func(shard *appsv1.ClusterComponentSpec) bool { @@ -97,65 +105,29 @@ func removeOfflineShards(shards []*appsv1.ClusterComponentSpec, offline []string return shards } -// listNCheckShardingComponents lists sharding components and checks if the sharding components are correct. It returns undeleted and deleting sharding components. -func listNCheckShardingComponents(ctx context.Context, cli client.Reader, - cluster *appsv1.Cluster, sharding *appsv1.ClusterSharding) ([]appsv1.Component, []appsv1.Component, error) { - shardingComps, err := ListShardingComponents(ctx, cli, cluster, sharding.Name) - if err != nil { - return nil, nil, err - } - - deletingShardingComps := make([]appsv1.Component, 0) - undeletedShardingComps := make([]appsv1.Component, 0) - for _, comp := range shardingComps { - if comp.GetDeletionTimestamp().IsZero() { - undeletedShardingComps = append(undeletedShardingComps, comp) - } else { - deletingShardingComps = append(deletingShardingComps, comp) +func genRandomShardName4Test(shardingName string, shardNames sets.Set[string]) (string, error) { + for i := 0; i < generateShardIDMaxRetryTimes; i++ { + id := rand.String(ShardIDLength) + name := fmt.Sprintf("%s-%s", shardingName, id) + if !shardNames.Has(name) { + return name, nil } } - - // TODO: ??? - // if cluster.Generation == cluster.Status.ObservedGeneration && len(undeletedShardingComps) != int(sharding.Shards) { - // return nil, nil, errors.New("sharding components are not correct when cluster is not updating") - // } - - return undeletedShardingComps, deletingShardingComps, nil + return "", fmt.Errorf("failed to generate a unique random name for sharding component: %s after %d retries", shardingName, generateShardIDMaxRetryTimes) } -func ListShardingComponents(ctx context.Context, cli client.Reader, - cluster *appsv1.Cluster, shardingName string) ([]appsv1.Component, error) { - compList := &appsv1.ComponentList{} - ml := client.MatchingLabels{ - constant.AppInstanceLabelKey: cluster.Name, - constant.KBAppShardingNameLabelKey: shardingName, - } - if err := cli.List(ctx, compList, client.InNamespace(cluster.Namespace), ml); err != nil { - return nil, err - } - return compList.Items, nil -} - -// listUndeletedShardingCompSpecs lists undeleted sharding component specs. -func listUndeletedShardingCompSpecs(ctx context.Context, cli client.Reader, +func ListShardingCompSpecs(ctx context.Context, cli client.Reader, cluster *appsv1.Cluster, sharding *appsv1.ClusterSharding) ([]*appsv1.ClusterComponentSpec, error) { - return listShardingCompSpecs(ctx, cli, cluster, sharding, false) + return listShardingCompSpecs4Test(ctx, cli, cluster, sharding, true) } -// listAllShardingCompSpecs lists all sharding component specs, including undeleted and deleting ones. -func listAllShardingCompSpecs(ctx context.Context, cli client.Reader, - cluster *appsv1.Cluster, sharding *appsv1.ClusterSharding) ([]*appsv1.ClusterComponentSpec, error) { - return listShardingCompSpecs(ctx, cli, cluster, sharding, true) -} - -// listShardingCompSpecs lists sharding component specs, with an option to include those marked for deletion. -func listShardingCompSpecs(ctx context.Context, cli client.Reader, +func listShardingCompSpecs4Test(ctx context.Context, cli client.Reader, cluster *appsv1.Cluster, sharding *appsv1.ClusterSharding, includeDeleting bool) ([]*appsv1.ClusterComponentSpec, error) { if sharding == nil { return nil, nil } - undeletedShardingComps, deletingShardingComps, err := listNCheckShardingComponents(ctx, cli, cluster, sharding) + undeletedShardingComps, deletingShardingComps, err := listNCheckShardingComponents4Test(ctx, cli, cluster, sharding) if err != nil { return nil, err } @@ -165,7 +137,7 @@ func listShardingCompSpecs(ctx context.Context, cli client.Reader, processComps := func(comps []appsv1.Component) error { for _, comp := range comps { - compShortName, err := parseCompShortName(cluster.Name, comp.Name) + compShortName, err := parseCompShortName4Test(cluster.Name, comp.Name) if err != nil { return err } @@ -191,21 +163,27 @@ func listShardingCompSpecs(ctx context.Context, cli client.Reader, return compSpecList, nil } -func genRandomShardName(shardingName string, shardNames sets.Set[string]) (string, error) { - shardingNamePrefix := constant.GenerateShardingNamePrefix(shardingName) - for i := 0; i < GenerateNameMaxRetryTimes; i++ { - name := common.SimpleNameGenerator.GenerateName(shardingNamePrefix) - if !shardNames.Has(name) { - return name, nil - } +func listNCheckShardingComponents4Test(ctx context.Context, cli client.Reader, + cluster *appsv1.Cluster, sharding *appsv1.ClusterSharding) ([]appsv1.Component, []appsv1.Component, error) { + shardingComps, err := ListShardingComponents(ctx, cli, cluster, sharding.Name) + if err != nil { + return nil, nil, err } - return "", fmt.Errorf("failed to generate a unique random name for sharding component: %s after %d retries", shardingName, GenerateNameMaxRetryTimes) -} -func parseCompShortName(clusterName, compName string) (string, error) { - name, found := strings.CutPrefix(compName, fmt.Sprintf("%s-", clusterName)) - if !found { - return "", fmt.Errorf("the component name has no cluster name as prefix: %s", compName) + deletingShardingComps := make([]appsv1.Component, 0) + undeletedShardingComps := make([]appsv1.Component, 0) + for _, comp := range shardingComps { + if comp.GetDeletionTimestamp().IsZero() { + undeletedShardingComps = append(undeletedShardingComps, comp) + } else { + deletingShardingComps = append(deletingShardingComps, comp) + } } - return name, nil + + // TODO: ??? + // if cluster.Generation == cluster.Status.ObservedGeneration && len(undeletedShardingComps) != int(sharding.Shards) { + // return nil, nil, errors.New("sharding components are not correct when cluster is not updating") + // } + + return undeletedShardingComps, deletingShardingComps, nil } diff --git a/pkg/controller/sharding/suite_test.go b/pkg/controller/sharding/suite_test.go new file mode 100644 index 00000000000..d3832a58485 --- /dev/null +++ b/pkg/controller/sharding/suite_test.go @@ -0,0 +1,123 @@ +/* +Copyright (C) 2022-2025 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package sharding + +import ( + "context" + "go/build" + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/go-logr/logr" + "github.com/go-logr/logr/testr" + "go.uber.org/zap/zapcore" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" + "github.com/apecloud/kubeblocks/pkg/testutil" + viper "github.com/apecloud/kubeblocks/pkg/viperx" +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment +var ctx context.Context +var cancel context.CancelFunc +var testCtx testutil.TestContext +var reqCtx requestCtx + +func init() { + viper.AutomaticEnv() +} + +type requestCtx struct { + Ctx context.Context + Req ctrl.Request + Log logr.Logger + Recorder record.EventRecorder +} + +func TestAPIs(t *testing.T) { + reqCtx.Log = testr.New(t) + reqCtx.Req = ctrl.Request{} + + RegisterFailHandler(Fail) + RunSpecs(t, "Sharding Suite") +} + +var _ = BeforeSuite(func() { + if viper.GetBool("ENABLE_DEBUG_LOG") { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true), func(o *zap.Options) { + o.TimeEncoder = zapcore.ISO8601TimeEncoder + })) + } + + ctx, cancel = context.WithCancel(context.TODO()) + reqCtx.Ctx = ctx + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{ + filepath.Join("..", "..", "..", "config", "crd", "bases"), + // use VolumeSnapshot v1beta1 API CRDs. + filepath.Join(build.Default.GOPATH, "pkg", "mod", "github.com", "kubernetes-csi/external-snapshotter/", + "client/v3@v3.0.0", "config", "crd"), + }, + ErrorIfCRDPathMissing: true, + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + // +kubebuilder:scaffold:scheme + scheme := scheme.Scheme + + err = appsv1.AddToScheme(scheme) + Expect(err).NotTo(HaveOccurred()) + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + + testCtx = testutil.NewDefaultTestContext(ctx, k8sClient, testEnv) +}) + +var _ = AfterSuite(func() { + cancel() + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/pkg/controller/sharding/types.go b/pkg/controller/sharding/types.go new file mode 100644 index 00000000000..6a75c7c63a3 --- /dev/null +++ b/pkg/controller/sharding/types.go @@ -0,0 +1,106 @@ +/* +Copyright (C) 2022-2025 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package sharding + +import ( + "fmt" + "slices" + "strings" + + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/sets" + + appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" +) + +const ( + ShardIDLength = 3 +) + +const ( + generateShardIDMaxRetryTimes = 1000000 + defaultShardTemplateName = "" +) + +type shardIDGenerator struct { + clusterName string + shardingName string + running []string + offline []string + takeOverByTemplate []string + initialized bool + ids sets.Set[string] +} + +func (g *shardIDGenerator) allocate() (string, error) { + if !g.initialized { + g.ids = sets.New(g.running...).Insert(g.offline...).Insert(g.takeOverByTemplate...) + g.initialized = true + } + for i := 0; i < generateShardIDMaxRetryTimes; i++ { + id := rand.String(ShardIDLength) + name := fmt.Sprintf("%s-%s-%s", g.clusterName, g.shardingName, id) + if !g.ids.Has(name) { + g.ids.Insert(name) + return id, nil + } + } + return "", fmt.Errorf("failed to allocate a unique shard id") +} + +type shardTemplate struct { + name string + count int32 + template *appsv1.ClusterComponentSpec + shards []*appsv1.ClusterComponentSpec +} + +func (t *shardTemplate) align(generator *shardIDGenerator) error { + diff := len(t.shards) - int(t.count) + switch { + case diff == 0: + return nil + case diff < 0: + return t.create(generator, diff*-1) + default: + return t.delete(diff) + } +} + +func (t *shardTemplate) create(generator *shardIDGenerator, cnt int) error { + for i := 0; i < cnt; i++ { + id, err := generator.allocate() + if err != nil { + return err + } + spec := t.template.DeepCopy() + spec.Name = fmt.Sprintf("%s-%s", generator.shardingName, id) + t.shards = append(t.shards, spec) + } + return nil +} + +func (t *shardTemplate) delete(cnt int) error { + slices.SortFunc(t.shards, func(a, b *appsv1.ClusterComponentSpec) int { + return strings.Compare(a.Name, b.Name) + }) + t.shards = t.shards[:len(t.shards)-cnt] + return nil +} diff --git a/pkg/controller/sharding/utils.go b/pkg/controller/sharding/utils.go new file mode 100644 index 00000000000..d2db153dcad --- /dev/null +++ b/pkg/controller/sharding/utils.go @@ -0,0 +1,225 @@ +/* +Copyright (C) 2022-2025 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package sharding + +import ( + "context" + "fmt" + "slices" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" + "github.com/apecloud/kubeblocks/pkg/constant" + "github.com/apecloud/kubeblocks/pkg/controller/model" +) + +func BuildShardingCompSpecs(ctx context.Context, cli client.Reader, + namespace, clusterName string, sharding *appsv1.ClusterSharding) (map[string][]*appsv1.ClusterComponentSpec, error) { + shardingComps, err := listShardingComponents(ctx, cli, namespace, clusterName, sharding.Name) + if err != nil { + return nil, err + } + return buildShardingCompSpecs(clusterName, sharding, shardingComps) +} + +func ListShardingComponents(ctx context.Context, cli client.Reader, cluster *appsv1.Cluster, shardingName string) ([]appsv1.Component, error) { + return listShardingComponents(ctx, cli, cluster.Namespace, cluster.Name, shardingName) +} + +func listShardingComponents(ctx context.Context, cli client.Reader, namespace, clusterName, shardingName string) ([]appsv1.Component, error) { + compList := &appsv1.ComponentList{} + labels := constant.GetClusterLabels(clusterName, map[string]string{constant.KBAppShardingNameLabelKey: shardingName}) + if err := cli.List(ctx, compList, client.InNamespace(namespace), client.MatchingLabels(labels)); err != nil { + return nil, err + } + return compList.Items, nil +} + +func buildShardingCompSpecs(clusterName string, sharding *appsv1.ClusterSharding, shardingComps []appsv1.Component) (map[string][]*appsv1.ClusterComponentSpec, error) { + if err := precheck(sharding); err != nil { + return nil, err + } + + compNames := make([]string, 0) + for _, comp := range shardingComps { + compNames = append(compNames, comp.Name) + } + generator := &shardIDGenerator{ + clusterName: clusterName, + shardingName: sharding.Name, + running: compNames, + offline: sharding.Offline, + takeOverByTemplate: shardNamesTakeOverByTemplate(clusterName, sharding), + } + + templates := buildShardTemplates(clusterName, sharding, shardingComps) + for i := range templates { + if err := templates[i].align(generator); err != nil { + return nil, err + } + } + + shards := map[string][]*appsv1.ClusterComponentSpec{} + for i, tpl := range templates { + shards[tpl.name] = templates[i].shards + } + return shards, nil +} + +func precheck(sharding *appsv1.ClusterSharding) error { + shards := int32(0) + names, shardIDs := sets.New[string](), sets.New[string]() + for _, tpl := range sharding.ShardTemplates { + if names.Has(tpl.Name) { + return fmt.Errorf("shard template name %s is duplicated", tpl.Name) + } + names.Insert(tpl.Name) + shards += ptr.Deref(tpl.Shards, 0) + for _, id := range tpl.ShardIDs { + if shardIDs.Has(id) { + return fmt.Errorf("shard id %s is duplicated", id) + } + } + shardIDs.Insert(tpl.ShardIDs...) + } + if shards > sharding.Shards { + return fmt.Errorf("the sum of shards in shard templates is greater than the total shards: %d vs %d", sharding.Shards, shards) + } + return nil +} + +func buildShardTemplates(clusterName string, sharding *appsv1.ClusterSharding, shardingComps []appsv1.Component) []*shardTemplate { + mergeWithTemplate := func(tpl *appsv1.ShardTemplate) *appsv1.ClusterComponentSpec { + spec := sharding.Template.DeepCopy() + if tpl.ServiceVersion != nil || tpl.CompDef != nil { + spec.ServiceVersion = ptr.Deref(tpl.ServiceVersion, "") + spec.ComponentDef = ptr.Deref(tpl.CompDef, "") + } + if tpl.Replicas != nil { + spec.Replicas = *tpl.Replicas + } + if tpl.Labels != nil { + spec.Labels = tpl.Labels + } + if tpl.Annotations != nil { + spec.Annotations = tpl.Annotations + } + if tpl.Env != nil { + spec.Env = tpl.Env + } + if tpl.SchedulingPolicy != nil { + spec.SchedulingPolicy = tpl.SchedulingPolicy + } + if tpl.Resources != nil { + spec.Resources = *tpl.Resources + } + if tpl.VolumeClaimTemplates != nil { + spec.VolumeClaimTemplates = tpl.VolumeClaimTemplates + } + if tpl.Instances != nil { + spec.Instances = tpl.Instances + } + if tpl.FlatInstanceOrdinal != nil { + spec.FlatInstanceOrdinal = *tpl.FlatInstanceOrdinal + } + return spec + } + + templates := make([]*shardTemplate, 0) + nameToIndex := map[string]int{} + cnt := int32(0) + for i, tpl := range sharding.ShardTemplates { + if ptr.Deref(tpl.Shards, 0) <= 0 { + continue + } + template := &shardTemplate{ + name: tpl.Name, + count: ptr.Deref(tpl.Shards, 0), + template: mergeWithTemplate(&sharding.ShardTemplates[i]), + shards: make([]*appsv1.ClusterComponentSpec, 0), + } + templates = append(templates, template) + cnt += template.count + nameToIndex[tpl.Name] = len(templates) - 1 + } + if cnt < sharding.Shards { + templates = append(templates, &shardTemplate{ + name: defaultShardTemplateName, + count: sharding.Shards - cnt, + template: &sharding.Template, + shards: make([]*appsv1.ClusterComponentSpec, 0), + }) + nameToIndex[defaultShardTemplateName] = len(templates) - 1 + } + + offline := sets.New(sharding.Offline...) + takeOverByTemplate := shardNamesTakeOverByTemplateMap(clusterName, sharding) + for _, comp := range shardingComps { + if model.IsObjectDeleting(&comp) || offline.Has(comp.Name) { + continue + } + tplName := defaultShardTemplateName + if comp.Labels != nil { + if name, ok := comp.Labels[constant.KBAppShardTemplateLabelKey]; ok { + tplName = name + } + } + if tplName == defaultShardTemplateName { + if name, ok := takeOverByTemplate[comp.Name]; ok { + tplName = name + } + } + idx, ok := nameToIndex[tplName] + if !ok { + continue // ignore the component + } + spec := templates[idx].template.DeepCopy() + spec.Name, _ = strings.CutPrefix(comp.Name, fmt.Sprintf("%s-", clusterName)) + templates[idx].shards = append(templates[idx].shards, spec) + } + + slices.SortFunc(templates, func(a, b *shardTemplate) int { + return strings.Compare(a.name, b.name) + }) + + return templates +} + +func shardNamesTakeOverByTemplate(clusterName string, sharding *appsv1.ClusterSharding) []string { + result := make([]string, 0) + for name := range shardNamesTakeOverByTemplateMap(clusterName, sharding) { + result = append(result, name) + } + return result +} + +func shardNamesTakeOverByTemplateMap(clusterName string, sharding *appsv1.ClusterSharding) map[string]string { + result := make(map[string]string) + for _, tpl := range sharding.ShardTemplates { + for _, id := range tpl.ShardIDs { + result[fmt.Sprintf("%s-%s-%s", clusterName, sharding.Name, id)] = tpl.Name + } + } + return result +} diff --git a/pkg/controller/sharding/utils_test.go b/pkg/controller/sharding/utils_test.go new file mode 100644 index 00000000000..9e0685ebbf2 --- /dev/null +++ b/pkg/controller/sharding/utils_test.go @@ -0,0 +1,523 @@ +/* +Copyright (C) 2022-2025 ApeCloud Co., Ltd + +This file is part of KubeBlocks project + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . +*/ + +package sharding + +import ( + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/utils/ptr" + + appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" + "github.com/apecloud/kubeblocks/pkg/constant" +) + +var _ = Describe("sharding", func() { + const ( + seed = 1670750000 + ) + + var ( + // first 10 ids + ids = []string{"bvj", "g7c", "gpz", "w8b", "dng", "rhk", "rzn", "ql8", "929", "99n"} + ) + + Context("build sharding comp specs", func() { + const ( + clusterName = "test-cluster" + shardingName = "sharding" + shardTemplateName = "shard-template" + ) + + BeforeEach(func() { + rand.Seed(seed) + }) + + It("precheck - shards", func() { + sharding := &appsv1.ClusterSharding{ + Name: shardingName, + Shards: 2, + Template: appsv1.ClusterComponentSpec{ + Replicas: 3, + }, + ShardTemplates: []appsv1.ShardTemplate{ + { + Name: fmt.Sprintf("%s-0", shardTemplateName), + Shards: ptr.To[int32](2), + Replicas: ptr.To[int32](5), + }, + { + Name: fmt.Sprintf("%s-1", shardTemplateName), + Shards: ptr.To[int32](2), + Replicas: ptr.To[int32](5), + }, + }, + } + + _, err := buildShardingCompSpecs(clusterName, sharding, nil) + Expect(err).ShouldNot(BeNil()) + Expect(err.Error()).Should(ContainSubstring("the sum of shards in shard templates is greater than the total shards")) + }) + + It("precheck - shard ids", func() { + runningComp1 := appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-%s", clusterName, shardingName, ids[9]), + Labels: map[string]string{ + constant.KBAppShardTemplateLabelKey: defaultShardTemplateName, + }, + }, + } + sharding := &appsv1.ClusterSharding{ + Name: shardingName, + Shards: 2, + Template: appsv1.ClusterComponentSpec{ + Replicas: 3, + }, + ShardTemplates: []appsv1.ShardTemplate{ + { + Name: fmt.Sprintf("%s-0", shardTemplateName), + Shards: ptr.To[int32](1), + ShardIDs: []string{ids[9]}, + Replicas: ptr.To[int32](5), + }, + { + Name: fmt.Sprintf("%s-1", shardTemplateName), + Shards: ptr.To[int32](1), + ShardIDs: []string{ids[9]}, + Replicas: ptr.To[int32](5), + }, + }, + } + + _, err := buildShardingCompSpecs(clusterName, sharding, []appsv1.Component{runningComp1}) + Expect(err).ShouldNot(BeNil()) + Expect(err.Error()).Should(ContainSubstring(fmt.Sprintf("shard id %s is duplicated", ids[9]))) + }) + + It("provision", func() { + sharding := &appsv1.ClusterSharding{ + Name: shardingName, + Shards: 2, + Template: appsv1.ClusterComponentSpec{ + Replicas: 3, + }, + } + + specs, err := buildShardingCompSpecs(clusterName, sharding, nil) + Expect(err).Should(Succeed()) + + Expect(len(specs)).Should(BeEquivalentTo(1)) + Expect(specs).Should(HaveKey(defaultShardTemplateName)) + Expect(specs[defaultShardTemplateName]).Should(HaveLen(2)) + Expect(specs[defaultShardTemplateName][0].Name).Should(HaveSuffix(ids[0])) + Expect(specs[defaultShardTemplateName][0].Replicas).Should(Equal(int32(3))) + Expect(specs[defaultShardTemplateName][1].Name).Should(HaveSuffix(ids[1])) + Expect(specs[defaultShardTemplateName][1].Replicas).Should(Equal(int32(3))) + }) + + It("provision with template", func() { + sharding := &appsv1.ClusterSharding{ + Name: shardingName, + Shards: 2, + Template: appsv1.ClusterComponentSpec{ + Replicas: 3, + }, + ShardTemplates: []appsv1.ShardTemplate{ + { + Name: shardTemplateName, + Shards: ptr.To[int32](1), + Replicas: ptr.To[int32](5), + }, + }, + } + + specs, err := buildShardingCompSpecs(clusterName, sharding, nil) + Expect(err).Should(Succeed()) + + Expect(len(specs)).Should(BeEquivalentTo(2)) + Expect(specs).Should(And(HaveKey(defaultShardTemplateName), HaveKey(shardTemplateName))) + Expect(specs[defaultShardTemplateName]).Should(HaveLen(1)) + Expect(specs[defaultShardTemplateName][0].Name).Should(HaveSuffix(ids[0])) + Expect(specs[defaultShardTemplateName][0].Replicas).Should(Equal(int32(3))) + Expect(specs[shardTemplateName]).Should(HaveLen(1)) + Expect(specs[shardTemplateName][0].Name).Should(HaveSuffix(ids[1])) + Expect(specs[shardTemplateName][0].Replicas).Should(Equal(int32(5))) + }) + + It("provision with offline", func() { + sharding := &appsv1.ClusterSharding{ + Name: shardingName, + Shards: 2, + Template: appsv1.ClusterComponentSpec{ + Replicas: 3, + }, + ShardTemplates: []appsv1.ShardTemplate{ + { + Name: shardTemplateName, + Shards: ptr.To[int32](1), + Replicas: ptr.To[int32](5), + }, + }, + Offline: []string{fmt.Sprintf("%s-%s-%s", clusterName, shardingName, ids[0])}, + } + + specs, err := buildShardingCompSpecs(clusterName, sharding, nil) + Expect(err).Should(Succeed()) + + Expect(len(specs)).Should(BeEquivalentTo(2)) + Expect(specs).Should(And(HaveKey(defaultShardTemplateName), HaveKey(shardTemplateName))) + Expect(specs[defaultShardTemplateName]).Should(HaveLen(1)) + Expect(specs[defaultShardTemplateName][0].Name).Should(HaveSuffix(ids[1])) // skip offline shard of ids[0] + Expect(specs[defaultShardTemplateName][0].Replicas).Should(Equal(int32(3))) + Expect(specs[shardTemplateName]).Should(HaveLen(1)) + Expect(specs[shardTemplateName][0].Name).Should(HaveSuffix(ids[2])) + Expect(specs[shardTemplateName][0].Replicas).Should(Equal(int32(5))) + }) + + PIt("merge with shard template", func() { + sharding := &appsv1.ClusterSharding{ + Name: shardingName, + Shards: 2, + Template: appsv1.ClusterComponentSpec{ + Replicas: 3, + }, + ShardTemplates: []appsv1.ShardTemplate{ + { + Name: shardTemplateName, + Shards: ptr.To[int32](1), + Replicas: ptr.To[int32](5), + }, + }, + } + + specs, err := buildShardingCompSpecs(clusterName, sharding, nil) + Expect(err).Should(Succeed()) + + Expect(len(specs)).Should(BeEquivalentTo(2)) + Expect(specs).Should(HaveKey(defaultShardTemplateName)) + Expect(specs[defaultShardTemplateName]).Should(HaveLen(1)) + Expect(specs[defaultShardTemplateName][0].Replicas).Should(Equal(int32(3))) + Expect(specs).Should(HaveKey(shardTemplateName)) + Expect(specs[shardTemplateName]).Should(HaveLen(1)) + Expect(specs[shardTemplateName][0].Replicas).Should(Equal(int32(5))) + }) + + It("scale out", func() { + runningComp := appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-%s", clusterName, shardingName, ids[9]), + Labels: map[string]string{ + constant.KBAppShardTemplateLabelKey: defaultShardTemplateName, + }, + }, + } + sharding := &appsv1.ClusterSharding{ + Name: shardingName, + Shards: 2, + Template: appsv1.ClusterComponentSpec{ + Replicas: 3, + }, + } + + specs, err := buildShardingCompSpecs(clusterName, sharding, []appsv1.Component{runningComp}) + Expect(err).Should(Succeed()) + + Expect(len(specs)).Should(BeEquivalentTo(1)) + Expect(specs).Should(HaveKey(defaultShardTemplateName)) + Expect(specs[defaultShardTemplateName]).Should(HaveLen(2)) + Expect(specs[defaultShardTemplateName][0].Name).Should(HaveSuffix(ids[9])) + Expect(specs[defaultShardTemplateName][1].Name).Should(HaveSuffix(ids[0])) + }) + + It("scale out - shard template", func() { + runningComp := appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-%s", clusterName, shardingName, ids[9]), + Labels: map[string]string{ + constant.KBAppShardTemplateLabelKey: defaultShardTemplateName, + }, + }, + } + sharding := &appsv1.ClusterSharding{ + Name: shardingName, + Shards: 2, + Template: appsv1.ClusterComponentSpec{ + Replicas: 3, + }, + ShardTemplates: []appsv1.ShardTemplate{ + { + Name: shardTemplateName, + Shards: ptr.To[int32](1), + Replicas: ptr.To[int32](5), + }, + }, + } + + specs, err := buildShardingCompSpecs(clusterName, sharding, []appsv1.Component{runningComp}) + Expect(err).Should(Succeed()) + + Expect(len(specs)).Should(BeEquivalentTo(2)) + Expect(specs).Should(And(HaveKey(defaultShardTemplateName), HaveKey(shardTemplateName))) + Expect(specs[defaultShardTemplateName]).Should(HaveLen(1)) + Expect(specs[defaultShardTemplateName][0].Name).Should(HaveSuffix(ids[9])) + Expect(specs[defaultShardTemplateName][0].Replicas).Should(Equal(int32(3))) + Expect(specs[shardTemplateName]).Should(HaveLen(1)) + Expect(specs[shardTemplateName][0].Name).Should(HaveSuffix(ids[0])) + Expect(specs[shardTemplateName][0].Replicas).Should(Equal(int32(5))) + }) + + It("scale in", func() { + runningComp1 := appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-%s", clusterName, shardingName, ids[8]), + Labels: map[string]string{ + constant.KBAppShardTemplateLabelKey: defaultShardTemplateName, + }, + }, + } + runningComp2 := appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-%s", clusterName, shardingName, ids[9]), + Labels: map[string]string{ + constant.KBAppShardTemplateLabelKey: defaultShardTemplateName, + }, + }, + } + sharding := &appsv1.ClusterSharding{ + Name: shardingName, + Shards: 1, + Template: appsv1.ClusterComponentSpec{ + Replicas: 3, + }, + } + + specs, err := buildShardingCompSpecs(clusterName, sharding, []appsv1.Component{runningComp1, runningComp2}) + Expect(err).Should(Succeed()) + + Expect(len(specs)).Should(BeEquivalentTo(1)) + Expect(specs).Should(HaveKey(defaultShardTemplateName)) + Expect(specs[defaultShardTemplateName]).Should(HaveLen(1)) + Expect(specs[defaultShardTemplateName][0].Name).Should(HaveSuffix(ids[8])) // runningComp1.Name < runningComp2.Name + }) + + It("scale in - shard template", func() { + runningComp1 := appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-%s", clusterName, shardingName, ids[8]), + Labels: map[string]string{ + constant.KBAppShardTemplateLabelKey: shardTemplateName, // shard template + }, + }, + } + runningComp2 := appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-%s", clusterName, shardingName, ids[9]), + Labels: map[string]string{ + constant.KBAppShardTemplateLabelKey: defaultShardTemplateName, + }, + }, + } + sharding := &appsv1.ClusterSharding{ + Name: shardingName, + Shards: 1, + Template: appsv1.ClusterComponentSpec{ + Replicas: 3, + }, + // the shard template is removed + } + + specs, err := buildShardingCompSpecs(clusterName, sharding, []appsv1.Component{runningComp1, runningComp2}) + Expect(err).Should(Succeed()) + + Expect(len(specs)).Should(BeEquivalentTo(1)) + Expect(specs).Should(HaveKey(defaultShardTemplateName)) + Expect(specs[defaultShardTemplateName]).Should(HaveLen(1)) + Expect(specs[defaultShardTemplateName][0].Name).Should(HaveSuffix(ids[9])) // runningComp1 belongs to the shard template + }) + + It("scale in - offline", func() { + runningComp1 := appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-%s", clusterName, shardingName, ids[8]), + Labels: map[string]string{ + constant.KBAppShardTemplateLabelKey: defaultShardTemplateName, + }, + }, + } + runningComp2 := appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-%s", clusterName, shardingName, ids[9]), + Labels: map[string]string{ + constant.KBAppShardTemplateLabelKey: defaultShardTemplateName, + }, + }, + } + sharding := &appsv1.ClusterSharding{ + Name: shardingName, + Shards: 1, + Template: appsv1.ClusterComponentSpec{ + Replicas: 3, + }, + Offline: []string{runningComp1.Name}, + } + + specs, err := buildShardingCompSpecs(clusterName, sharding, []appsv1.Component{runningComp1, runningComp2}) + Expect(err).Should(Succeed()) + + Expect(len(specs)).Should(BeEquivalentTo(1)) + Expect(specs).Should(HaveKey(defaultShardTemplateName)) + Expect(specs[defaultShardTemplateName]).Should(HaveLen(1)) + Expect(specs[defaultShardTemplateName][0].Name).Should(HaveSuffix(ids[9])) // runningComp1 has been offline + }) + + It("scale in & out", func() { + runningComp1 := appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-%s", clusterName, shardingName, ids[8]), + Labels: map[string]string{ + constant.KBAppShardTemplateLabelKey: defaultShardTemplateName, + }, + }, + } + runningComp2 := appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-%s", clusterName, shardingName, ids[9]), + Labels: map[string]string{ + constant.KBAppShardTemplateLabelKey: defaultShardTemplateName, + }, + }, + } + sharding := &appsv1.ClusterSharding{ + Name: shardingName, + Shards: 2, // still 2 shards + Template: appsv1.ClusterComponentSpec{ + Replicas: 3, + }, + Offline: []string{runningComp1.Name}, // but shard 1 is offline + } + + specs, err := buildShardingCompSpecs(clusterName, sharding, []appsv1.Component{runningComp1, runningComp2}) + Expect(err).Should(Succeed()) + + Expect(len(specs)).Should(BeEquivalentTo(1)) + Expect(specs).Should(HaveKey(defaultShardTemplateName)) + Expect(specs[defaultShardTemplateName]).Should(HaveLen(2)) + Expect(specs[defaultShardTemplateName][0].Name).Should(Or(HaveSuffix(ids[9]), HaveSuffix(ids[0]))) + Expect(specs[defaultShardTemplateName][1].Name).Should(Or(HaveSuffix(ids[9]), HaveSuffix(ids[0]))) + }) + + It("scale in & out - shard template", func() { + runningComp1 := appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-%s", clusterName, shardingName, ids[8]), + Labels: map[string]string{ + constant.KBAppShardTemplateLabelKey: defaultShardTemplateName, + }, + }, + } + runningComp2 := appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-%s", clusterName, shardingName, ids[9]), + Labels: map[string]string{ + constant.KBAppShardTemplateLabelKey: shardTemplateName, + }, + }, + } + sharding := &appsv1.ClusterSharding{ + Name: shardingName, + Shards: 2, // still 2 shards + Template: appsv1.ClusterComponentSpec{ + Replicas: 3, + }, + ShardTemplates: []appsv1.ShardTemplate{ + { + Name: shardTemplateName, + Shards: ptr.To[int32](1), + Replicas: ptr.To[int32](5), + }, + }, + Offline: []string{runningComp1.Name, runningComp2.Name}, // both shard 1 and shard 2 are offline + } + + specs, err := buildShardingCompSpecs(clusterName, sharding, []appsv1.Component{runningComp1, runningComp2}) + Expect(err).Should(Succeed()) + + Expect(len(specs)).Should(BeEquivalentTo(2)) + Expect(specs).Should(And(HaveKey(defaultShardTemplateName), HaveKey(shardTemplateName))) + Expect(specs[defaultShardTemplateName]).Should(HaveLen(1)) + Expect(specs[defaultShardTemplateName][0].Name).Should(HaveSuffix(ids[0])) + Expect(specs[defaultShardTemplateName][0].Replicas).Should(Equal(int32(3))) + Expect(specs[shardTemplateName]).Should(HaveLen(1)) + Expect(specs[shardTemplateName][0].Name).Should(HaveSuffix(ids[1])) + Expect(specs[shardTemplateName][0].Replicas).Should(Equal(int32(5))) + }) + + It("take over", func() { + runningComp1 := appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-%s", clusterName, shardingName, ids[8]), + Labels: map[string]string{ + constant.KBAppShardTemplateLabelKey: defaultShardTemplateName, + }, + }, + } + runningComp2 := appsv1.Component{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-%s", clusterName, shardingName, ids[9]), + Labels: map[string]string{ + constant.KBAppShardTemplateLabelKey: defaultShardTemplateName, + }, + }, + } + sharding := &appsv1.ClusterSharding{ + Name: shardingName, + Shards: 2, + Template: appsv1.ClusterComponentSpec{ + Replicas: 3, + }, + ShardTemplates: []appsv1.ShardTemplate{ + { + Name: shardTemplateName, + Shards: ptr.To[int32](1), + ShardIDs: []string{ids[9]}, // take over + Replicas: ptr.To[int32](5), + }, + }, + } + + specs, err := buildShardingCompSpecs(clusterName, sharding, []appsv1.Component{runningComp1, runningComp2}) + Expect(err).Should(Succeed()) + + Expect(len(specs)).Should(BeEquivalentTo(2)) + Expect(specs).Should(And(HaveKey(defaultShardTemplateName), HaveKey(shardTemplateName))) + Expect(specs[defaultShardTemplateName]).Should(HaveLen(1)) + Expect(specs[defaultShardTemplateName][0].Name).Should(HaveSuffix(ids[8])) + Expect(specs[defaultShardTemplateName][0].Replicas).Should(Equal(int32(3))) + Expect(specs[shardTemplateName]).Should(HaveLen(1)) + Expect(specs[shardTemplateName][0].Name).Should(HaveSuffix(ids[9])) + Expect(specs[shardTemplateName][0].Replicas).Should(Equal(int32(5))) + }) + }) +}) diff --git a/pkg/controllerutil/cluster_utils.go b/pkg/controllerutil/cluster_utils.go deleted file mode 100644 index bd899ab9db1..00000000000 --- a/pkg/controllerutil/cluster_utils.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright (C) 2022-2025 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package controllerutil - -import ( - "context" - - "sigs.k8s.io/controller-runtime/pkg/client" - - appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" -) - -func GetComponentSpecByName(ctx context.Context, cli client.Reader, - cluster *appsv1.Cluster, componentName string) (*appsv1.ClusterComponentSpec, error) { - compSpec := cluster.Spec.GetComponentByName(componentName) - if compSpec != nil { - return compSpec, nil - } - for _, sharding := range cluster.Spec.Shardings { - shardingCompList, err := listAllShardingCompSpecs(ctx, cli, cluster, &sharding) - if err != nil { - return nil, err - } - for i, shardingComp := range shardingCompList { - if shardingComp.Name == componentName { - compSpec = shardingCompList[i] - return compSpec, nil - } - } - } - return nil, nil -} diff --git a/pkg/controllerutil/cluster_utils_test.go b/pkg/controllerutil/cluster_utils_test.go deleted file mode 100644 index d15c8e8f283..00000000000 --- a/pkg/controllerutil/cluster_utils_test.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Copyright (C) 2022-2025 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package controllerutil - -import ( - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "sigs.k8s.io/controller-runtime/pkg/client" - - appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" - "github.com/apecloud/kubeblocks/pkg/constant" - "github.com/apecloud/kubeblocks/pkg/generics" - testapps "github.com/apecloud/kubeblocks/pkg/testutil/apps" -) - -var _ = Describe("cluster utils test", func() { - - // Cleanups - cleanEnv := func() { - // must wait till resources deleted and no longer existed before the testcases start, - // otherwise if later it needs to create some new resource objects with the same name, - // in race conditions, it will find the existence of old objects, resulting failure to - // create the new objects. - By("clean resources") - - // delete cluster(and all dependent sub-resources), cluster definition - testapps.ClearClusterResourcesWithRemoveFinalizerOption(&testCtx) - - // delete rest mocked objects - inNS := client.InNamespace(testCtx.DefaultNamespace) - ml := client.HasLabels{testCtx.TestObjLabelKey} - // namespaced - testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.ComponentSignature, true, inNS, ml) - testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.PodSignature, true, inNS, ml) - testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.ServiceSignature, true, inNS, ml) - } - - Context("cluster utils test", func() { - const ( - compDefName = "test-compdef" - clusterName = "test-cls" - mysqlCompName = "mysql" - mysqlShardingName = "mysql-sharding" - mysqlShardingCompName = "mysql-sharding-comp" - ) - - var ( - cluster *appsv1.Cluster - ) - - BeforeEach(func() { - cleanEnv() - - testapps.NewComponentDefinitionFactory(compDefName).SetDefaultSpec().GetObject() - cluster = testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterName, ""). - SetUID(clusterName). - AddComponent(mysqlCompName, compDefName). - AddSharding(mysqlShardingName, "", compDefName). - SetShards(0). - Create(&testCtx).GetObject() - }) - - It("get original or generated cluster component spec test", func() { - compSpec, err := GetComponentSpecByName(testCtx.Ctx, k8sClient, cluster, mysqlCompName) - Expect(err).ShouldNot(HaveOccurred()) - Expect(compSpec).ShouldNot(BeNil()) - Expect(compSpec.Name).Should(Equal(mysqlCompName)) - - compSpec, err = GetComponentSpecByName(testCtx.Ctx, k8sClient, cluster, "fakeCompName") - Expect(err).ShouldNot(HaveOccurred()) - Expect(compSpec).Should(BeNil()) - - By("create mock sharding component object") - mockCompObj := testapps.NewComponentFactory(testCtx.DefaultNamespace, cluster.Name+"-"+mysqlShardingCompName, ""). - AddAnnotations(constant.KBAppClusterUIDKey, string(cluster.UID)). - AddLabels(constant.AppInstanceLabelKey, cluster.Name). - AddLabels(constant.KBAppShardingNameLabelKey, mysqlShardingName). - SetReplicas(1). - Create(&testCtx). - GetObject() - compKey := client.ObjectKeyFromObject(mockCompObj) - Eventually(testapps.CheckObjExists(&testCtx, compKey, &appsv1.Component{}, true)).Should(Succeed()) - - compSpec, err = GetComponentSpecByName(testCtx.Ctx, k8sClient, cluster, mysqlShardingCompName) - Expect(err).ShouldNot(HaveOccurred()) - Expect(compSpec).ShouldNot(BeNil()) - Expect(compSpec.Name).Should(Equal(mysqlShardingCompName)) - }) - }) -}) diff --git a/pkg/controllerutil/sharding_utils_test.go b/pkg/controllerutil/sharding_utils_test.go deleted file mode 100644 index f8b67e19be3..00000000000 --- a/pkg/controllerutil/sharding_utils_test.go +++ /dev/null @@ -1,106 +0,0 @@ -/* -Copyright (C) 2022-2025 ApeCloud Co., Ltd - -This file is part of KubeBlocks project - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see . -*/ - -package controllerutil - -import ( - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - "sigs.k8s.io/controller-runtime/pkg/client" - - appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" - "github.com/apecloud/kubeblocks/pkg/constant" - "github.com/apecloud/kubeblocks/pkg/generics" - testapps "github.com/apecloud/kubeblocks/pkg/testutil/apps" -) - -var _ = Describe("cluster shard component", func() { - - // Cleanups - cleanEnv := func() { - // must wait till resources deleted and no longer existed before the testcases start, - // otherwise if later it needs to create some new resource objects with the same name, - // in race conditions, it will find the existence of old objects, resulting failure to - // create the new objects. - By("clean resources") - - // delete cluster(and all dependent sub-resources), cluster definition - testapps.ClearClusterResourcesWithRemoveFinalizerOption(&testCtx) - - // delete rest mocked objects - inNS := client.InNamespace(testCtx.DefaultNamespace) - ml := client.HasLabels{testCtx.TestObjLabelKey} - // namespaced - testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.ComponentSignature, true, inNS, ml) - testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.PodSignature, true, inNS, ml) - testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.ServiceSignature, true, inNS, ml) - } - - Context("cluster shard component", func() { - const ( - compDefName = "test-compdef" - clusterName = "test-cluster" - mysqlCompName = "mysql" - mysqlShardingName = "mysql-sharding" - mysqlShardingCompName = "mysql-sharding-comp" - ) - - var ( - cluster *appsv1.Cluster - ) - - BeforeEach(func() { - cleanEnv() - - testapps.NewComponentDefinitionFactory(compDefName).SetDefaultSpec().GetObject() - cluster = testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterName, ""). - SetUID(clusterName). - AddComponent(mysqlCompName, compDefName). - AddSharding(mysqlShardingName, "", compDefName). - SetShards(1). - Create(&testCtx).GetObject() - }) - - It("generate sharding component spec test", func() { - By("create mock sharding component object") - mockCompObj := testapps.NewComponentFactory(testCtx.DefaultNamespace, cluster.Name+"-"+mysqlShardingCompName, ""). - AddAnnotations(constant.KBAppClusterUIDKey, string(cluster.UID)). - AddLabels(constant.AppInstanceLabelKey, cluster.Name). - AddLabels(constant.KBAppShardingNameLabelKey, mysqlShardingName). - SetReplicas(1). - Create(&testCtx). - GetObject() - compKey := client.ObjectKeyFromObject(mockCompObj) - Eventually(testapps.CheckObjExists(&testCtx, compKey, &appsv1.Component{}, true)).Should(Succeed()) - - sharding := &appsv1.ClusterSharding{ - Template: appsv1.ClusterComponentSpec{ - Replicas: 2, - }, - Name: mysqlShardingName, - Shards: 2, - } - shardingCompSpecList, err := GenShardingCompSpecList(testCtx.Ctx, k8sClient, cluster, sharding) - Expect(err).ShouldNot(HaveOccurred()) - Expect(shardingCompSpecList).ShouldNot(BeNil()) - Expect(len(shardingCompSpecList)).Should(BeEquivalentTo(2)) - }) - }) -}) diff --git a/pkg/operations/custom.go b/pkg/operations/custom.go index afe64eb2caa..06052b84ce6 100644 --- a/pkg/operations/custom.go +++ b/pkg/operations/custom.go @@ -36,6 +36,7 @@ import ( "github.com/apecloud/kubeblocks/pkg/common" "github.com/apecloud/kubeblocks/pkg/constant" "github.com/apecloud/kubeblocks/pkg/controller/component" + "github.com/apecloud/kubeblocks/pkg/controller/sharding" intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" ) @@ -137,7 +138,7 @@ func (c CustomOpsHandler) listComponents(reqCtx intctrlutil.RequestCtx, } return []appsv1.Component{*comp}, nil } - return intctrlutil.ListShardingComponents(reqCtx.Ctx, cli, cluster, componentName) + return sharding.ListShardingComponents(reqCtx.Ctx, cli, cluster, componentName) } func (c CustomOpsHandler) checkExpression(reqCtx intctrlutil.RequestCtx, diff --git a/pkg/operations/custom_test.go b/pkg/operations/custom_test.go index c4a05db6b75..1e60302e779 100644 --- a/pkg/operations/custom_test.go +++ b/pkg/operations/custom_test.go @@ -24,17 +24,18 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/rand" "sigs.k8s.io/controller-runtime/pkg/client" logf "sigs.k8s.io/controller-runtime/pkg/log" appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" opsv1alpha1 "github.com/apecloud/kubeblocks/apis/operations/v1alpha1" - "github.com/apecloud/kubeblocks/pkg/common" "github.com/apecloud/kubeblocks/pkg/constant" + "github.com/apecloud/kubeblocks/pkg/controller/sharding" intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" "github.com/apecloud/kubeblocks/pkg/generics" testapps "github.com/apecloud/kubeblocks/pkg/testutil/apps" @@ -356,7 +357,7 @@ var _ = Describe("CustomOps", func() { // create a sharding component shardingNamePrefix := constant.GenerateClusterComponentName(cluster.Name, defaultCompName) - shardingCompName := common.SimpleNameGenerator.GenerateName(shardingNamePrefix) + shardingCompName := fmt.Sprintf("%s-%s", shardingNamePrefix, rand.String(sharding.ShardIDLength)) compObj = testapps.NewComponentFactory(testCtx.DefaultNamespace, shardingCompName, compDefName). AddLabels(constant.AppInstanceLabelKey, cluster.Name). AddLabels(constant.KBAppClusterUIDKey, string(cluster.UID)). diff --git a/pkg/operations/horizontal_scaling_test.go b/pkg/operations/horizontal_scaling_test.go index 70015c77701..a7b5194e6fc 100644 --- a/pkg/operations/horizontal_scaling_test.go +++ b/pkg/operations/horizontal_scaling_test.go @@ -673,6 +673,7 @@ var _ = Describe("HorizontalScaling OpsRequest", func() { By("mock the new components") createComponent := func(compName string) *appsv1.Component { comp := testapps.NewComponentFactory(testCtx.DefaultNamespace, opsRes.Cluster.Name+"-"+compName, compDefName). + AddLabels(constant.AppManagedByLabelKey, constant.AppName). AddLabels(constant.AppInstanceLabelKey, opsRes.Cluster.Name). AddLabels(constant.KBAppClusterUIDKey, string(opsRes.Cluster.UID)). AddLabels(constant.KBAppShardingNameLabelKey, secondaryCompName). diff --git a/pkg/operations/ops_comp_helper.go b/pkg/operations/ops_comp_helper.go index e3904cceace..22ae18502ca 100644 --- a/pkg/operations/ops_comp_helper.go +++ b/pkg/operations/ops_comp_helper.go @@ -31,6 +31,7 @@ import ( appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1" opsv1alpha1 "github.com/apecloud/kubeblocks/apis/operations/v1alpha1" "github.com/apecloud/kubeblocks/pkg/constant" + "github.com/apecloud/kubeblocks/pkg/controller/sharding" intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" ) @@ -198,25 +199,25 @@ func (c componentOpsHelper) buildProgressResources(reqCtx intctrlutil.RequestCtx // 2. handle the sharding status. for i := range opsRes.Cluster.Spec.Shardings { - sharding := opsRes.Cluster.Spec.Shardings[i] - compOps, ok := c.getComponentOps(sharding.Name) + spec := opsRes.Cluster.Spec.Shardings[i] + compOps, ok := c.getComponentOps(spec.Name) if !ok { continue } if c.isHScaleShards(opsRes.OpsRequest, compOps) { - if err := setProgressResource(&sharding.Template, compOps, "", &sharding.Shards); err != nil { + if err := setProgressResource(&spec.Template, compOps, "", &spec.Shards); err != nil { return nil, err } continue } // handle the progress of the components of the sharding. - shardingComps, err := intctrlutil.ListShardingComponents(reqCtx.Ctx, cli, opsRes.Cluster, sharding.Name) + shardingComps, err := sharding.ListShardingComponents(reqCtx.Ctx, cli, opsRes.Cluster, spec.Name) if err != nil { return nil, err } for j := range shardingComps { - if err = setProgressResource(&sharding.Template, compOps, - shardingComps[j].Labels[constant.KBAppComponentLabelKey], &sharding.Shards); err != nil { + if err = setProgressResource(&spec.Template, compOps, + shardingComps[j].Labels[constant.KBAppComponentLabelKey], &spec.Shards); err != nil { return nil, err } } diff --git a/pkg/operations/ops_progress_util.go b/pkg/operations/ops_progress_util.go index 4a20ec26cfc..5b463dce76b 100644 --- a/pkg/operations/ops_progress_util.go +++ b/pkg/operations/ops_progress_util.go @@ -39,6 +39,7 @@ import ( "github.com/apecloud/kubeblocks/pkg/constant" intctrlcomp "github.com/apecloud/kubeblocks/pkg/controller/component" "github.com/apecloud/kubeblocks/pkg/controller/instanceset" + "github.com/apecloud/kubeblocks/pkg/controller/sharding" intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" ) @@ -560,7 +561,7 @@ func handleScaleOutForShards(reqCtx intctrlutil.RequestCtx, opsRes *OpsResource, pgRes *progressResource, compStatus *opsv1alpha1.OpsRequestComponentStatus) (int32, error) { - compList, err := intctrlutil.ListShardingComponents(reqCtx.Ctx, cli, opsRes.Cluster, pgRes.compOps.GetComponentName()) + compList, err := sharding.ListShardingComponents(reqCtx.Ctx, cli, opsRes.Cluster, pgRes.compOps.GetComponentName()) if err != nil { return 0, err } @@ -591,7 +592,7 @@ func handleScaleInForShards(reqCtx intctrlutil.RequestCtx, pgRes *progressResource, compStatus *opsv1alpha1.OpsRequestComponentStatus, updateShards int32) (int32, error) { - compList, err := intctrlutil.ListShardingComponents(reqCtx.Ctx, cli, opsRes.Cluster, pgRes.compOps.GetComponentName()) + compList, err := sharding.ListShardingComponents(reqCtx.Ctx, cli, opsRes.Cluster, pgRes.compOps.GetComponentName()) if err != nil { return 0, err } diff --git a/pkg/operations/volume_expansion.go b/pkg/operations/volume_expansion.go index d96a0e9da6e..b2cb172afcf 100644 --- a/pkg/operations/volume_expansion.go +++ b/pkg/operations/volume_expansion.go @@ -36,6 +36,7 @@ import ( opsv1alpha1 "github.com/apecloud/kubeblocks/apis/operations/v1alpha1" "github.com/apecloud/kubeblocks/pkg/constant" "github.com/apecloud/kubeblocks/pkg/controller/instanceset" + "github.com/apecloud/kubeblocks/pkg/controller/sharding" intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" ) @@ -146,17 +147,17 @@ func (ve volumeExpansionOpsHandler) ReconcileAction(reqCtx intctrlutil.RequestCt } setVeHelpers(compSpec, compOps, compSpec.Name) } - for _, sharding := range opsRes.Cluster.Spec.Shardings { - compOps, ok := compOpsHelper.componentOpsSet[sharding.Name] + for _, spec := range opsRes.Cluster.Spec.Shardings { + compOps, ok := compOpsHelper.componentOpsSet[spec.Name] if !ok { continue } - shardingComps, err := intctrlutil.ListShardingComponents(reqCtx.Ctx, cli, opsRes.Cluster, sharding.Name) + shardingComps, err := sharding.ListShardingComponents(reqCtx.Ctx, cli, opsRes.Cluster, spec.Name) if err != nil { return opsRequestPhase, 0, err } for _, v := range shardingComps { - setVeHelpers(sharding.Template, compOps, v.Labels[constant.KBAppComponentLabelKey]) + setVeHelpers(spec.Template, compOps, v.Labels[constant.KBAppComponentLabelKey]) } } // reconcile the status.components. when the volume expansion is successful,