diff --git a/apis/apps/v1/types.go b/apis/apps/v1/types.go
index d751a930181..c8e899f5577 100644
--- a/apis/apps/v1/types.go
+++ b/apis/apps/v1/types.go
@@ -747,7 +747,7 @@ type TLSSecretRef struct {
// InstanceTemplate allows customization of individual replica configurations in a Component.
type InstanceTemplate struct {
// Name specifies the unique name of the instance Pod created using this InstanceTemplate.
- // This name is constructed by concatenating the Component's name, the template's name, and the instance's ordinal
+ // This name is constructed by concatenating the Component's name, the template's name, and the instance's ordinal. The name can't be empty.
// using the pattern: $(cluster.name)-$(component.name)-$(template.name)-$(ordinal). Ordinals start from 0.
// The specified name overrides any default naming conventions or patterns.
//
@@ -768,6 +768,7 @@ type InstanceTemplate struct {
// Specifies the desired Ordinals of this InstanceTemplate.
// The Ordinals used to specify the ordinal of the instance (pod) names to be generated under this InstanceTemplate.
+ // If Ordinals are defined, their number must match the corresponding replicas.
//
// For example, if Ordinals is {ranges: [{start: 0, end: 1}], discrete: [7]},
// then the instance names generated under this InstanceTemplate would be
diff --git a/apis/workloads/v1/instanceset_types.go b/apis/workloads/v1/instanceset_types.go
index fcea1e5fcb4..7f21a1e9a0a 100644
--- a/apis/workloads/v1/instanceset_types.go
+++ b/apis/workloads/v1/instanceset_types.go
@@ -82,6 +82,7 @@ type InstanceSetSpec struct {
// Specifies the desired Ordinals of the default template.
// The Ordinals used to specify the ordinal of the instance (pod) names to be generated under the default template.
+ // If Ordinals are defined, their number must match the corresponding replicas.
//
// For example, if Ordinals is {ranges: [{start: 0, end: 1}], discrete: [7]},
// then the instance names generated under the default template would be
@@ -266,6 +267,9 @@ type InstanceSetStatus struct {
// [replicas-updatedReplicas,replicas)
UpdateRevision string `json:"updateRevision,omitempty"`
+ // FIXME: unify with InstanceStatus?
+ CurrentInstances CurrentInstances `json:"currentInstances,omitempty"`
+
// Represents the latest available observations of an instanceset's current state.
// Known .status.conditions.type are: "InstanceFailure", "InstanceReady"
//
@@ -433,6 +437,12 @@ type InstanceConfigStatus struct {
Generation int64 `json:"generation"`
}
+// CurrentInstances maps templates to current pods
+// key is template name (default template has empty name), value is a list of pod ordinals
+// store ordinals only to save some space.
+// the list is always sorted by ordinal
+type CurrentInstances map[string][]int32
+
// InstanceTemplateStatus aggregates the status of replicas for each InstanceTemplate
type InstanceTemplateStatus struct {
// Name, the name of the InstanceTemplate.
diff --git a/pkg/controller/instanceset/in_place_update_util.go b/pkg/controller/instanceset/in_place_update_util.go
index 5b824079fab..1ffac997f42 100644
--- a/pkg/controller/instanceset/in_place_update_util.go
+++ b/pkg/controller/instanceset/in_place_update_util.go
@@ -30,6 +30,7 @@ import (
workloads "github.com/apecloud/kubeblocks/apis/workloads/v1"
"github.com/apecloud/kubeblocks/pkg/constant"
+ "github.com/apecloud/kubeblocks/pkg/controller/instanceset/instancetemplate"
intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil"
viper "github.com/apecloud/kubeblocks/pkg/viperx"
)
@@ -294,23 +295,21 @@ func getPodUpdatePolicy(its *workloads.InstanceSet, pod *corev1.Pod) (PodUpdateP
return RecreatePolicy, nil
}
- itsExt, err := buildInstanceSetExt(its, nil)
+ // FIXME: compressed templates
+ itsExt, err := instancetemplate.BuildInstanceSetExt(its, nil)
if err != nil {
return NoOpsPolicy, err
}
- templateList := buildInstanceTemplateExts(itsExt)
- parentName, _ := ParseParentNameAndOrdinal(pod.Name)
- templateName, _ := strings.CutPrefix(parentName, its.Name)
- if len(templateName) > 0 {
- templateName, _ = strings.CutPrefix(templateName, "-")
+ instance2TemplateMap, err := instancetemplate.BuildInstanceName2TemplateMap(itsExt)
+ if err != nil {
+ return NoOpsPolicy, err
}
- index := slices.IndexFunc(templateList, func(templateExt *instanceTemplateExt) bool {
- return templateName == templateExt.Name
- })
- if index < 0 {
+ template, ok := instance2TemplateMap[pod.Name]
+ if !ok {
return NoOpsPolicy, fmt.Errorf("no corresponding template found for instance %s", pod.Name)
}
- inst, err := buildInstanceByTemplate(pod.Name, templateList[index], its, getPodRevision(pod))
+
+ inst, err := buildInstanceByTemplate(pod.Name, template, its, getPodRevision(pod))
if err != nil {
return NoOpsPolicy, err
}
diff --git a/pkg/controller/instanceset/instance_template_util.go b/pkg/controller/instanceset/instance_template_util.go
index b173da3d7f3..f3cd94eb22f 100644
--- a/pkg/controller/instanceset/instance_template_util.go
+++ b/pkg/controller/instanceset/instance_template_util.go
@@ -20,80 +20,28 @@ along with this program. If not, see .
package instanceset
import (
- corev1 "k8s.io/api/core/v1"
-
- workloads "github.com/apecloud/kubeblocks/apis/workloads/v1"
+ "github.com/apecloud/kubeblocks/pkg/controller/instanceset/instancetemplate"
)
// TODO: remove these after extract the Schema of the API types from Kubeblocks into a separate Go package.
// InstanceSetExt serves as a Public Struct,
// used as the type for the input parameters of BuildInstanceTemplateExts.
-type InstanceSetExt struct {
- Its *workloads.InstanceSet
- InstanceTemplates []*workloads.InstanceTemplate
-}
+type InstanceSetExt = instancetemplate.InstanceSetExt
// InstanceTemplateExt serves as a Public Struct,
// used as the type for the construction results returned by BuildInstanceTemplateExts.
-type InstanceTemplateExt struct {
- Name string
- Replicas int32
- corev1.PodTemplateSpec
- VolumeClaimTemplates []corev1.PersistentVolumeClaim
-}
+type InstanceTemplateExt = instancetemplate.InstanceTemplateExt
// BuildInstanceName2TemplateMap serves as a Public API, through which users can obtain InstanceName2TemplateMap objects
// processed by the buildInstanceName2TemplateMap function.
-func BuildInstanceName2TemplateMap(itsExt *InstanceSetExt) (map[string]*instanceTemplateExt, error) {
- instanceTemplateList := buildInstanceTemplateExts(&instanceSetExt{
- its: itsExt.Its,
- instanceTemplates: itsExt.InstanceTemplates,
- })
- allNameTemplateMap := make(map[string]*instanceTemplateExt)
- var instanceNameList []string
- for _, template := range instanceTemplateList {
- ordinalList, err := GetOrdinalListByTemplateName(itsExt.Its, template.Name)
- if err != nil {
- return nil, err
- }
- instanceNames, err := GenerateInstanceNamesFromTemplate(itsExt.Its.Name, template.Name, template.Replicas, itsExt.Its.Spec.OfflineInstances, ordinalList)
- if err != nil {
- return nil, err
- }
- instanceNameList = append(instanceNameList, instanceNames...)
- for _, name := range instanceNames {
- allNameTemplateMap[name] = template
- }
- }
- // validate duplicate pod names
- getNameFunc := func(n string) string {
- return n
- }
- if err := ValidateDupInstanceNames(instanceNameList, getNameFunc); err != nil {
- return nil, err
- }
-
- return allNameTemplateMap, nil
-}
+var BuildInstanceName2TemplateMap = instancetemplate.BuildInstanceName2TemplateMap
// BuildInstanceTemplateExts serves as a Public API, through which users can obtain InstanceTemplateExt objects
// processed by the buildInstanceTemplateExts function.
// Its main purpose is to acquire the PodTemplate rendered by InstanceTemplate.
-func BuildInstanceTemplateExts(itsExt *InstanceSetExt) []*InstanceTemplateExt {
- itsExts := buildInstanceTemplateExts(&instanceSetExt{
- its: itsExt.Its,
- instanceTemplates: itsExt.InstanceTemplates,
- })
- var instanceTemplateExts []*InstanceTemplateExt
- for _, itsExt := range itsExts {
- instanceTemplateExts = append(instanceTemplateExts, (*InstanceTemplateExt)(itsExt))
- }
- return instanceTemplateExts
-}
+var BuildInstanceTemplateExts = instancetemplate.BuildInstanceTemplateExts
// BuildInstanceTemplates serves as a Public API, allowing users to construct InstanceTemplates.
// The constructed InstanceTemplates can be used as part of the input for BuildInstanceTemplateExts.
-func BuildInstanceTemplates(totalReplicas int32, instances []workloads.InstanceTemplate, instancesCompressed *corev1.ConfigMap) []*workloads.InstanceTemplate {
- return buildInstanceTemplates(totalReplicas, instances, instancesCompressed)
-}
+var BuildInstanceTemplates = instancetemplate.BuildInstanceTemplates
diff --git a/pkg/controller/instanceset/instance_util.go b/pkg/controller/instanceset/instance_util.go
index 1c0fe0e98f1..4c52175d00d 100644
--- a/pkg/controller/instanceset/instance_util.go
+++ b/pkg/controller/instanceset/instance_util.go
@@ -43,6 +43,7 @@ import (
workloads "github.com/apecloud/kubeblocks/apis/workloads/v1"
"github.com/apecloud/kubeblocks/pkg/constant"
"github.com/apecloud/kubeblocks/pkg/controller/builder"
+ "github.com/apecloud/kubeblocks/pkg/controller/instanceset/instancetemplate"
"github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx"
"github.com/apecloud/kubeblocks/pkg/controller/model"
intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil"
@@ -61,11 +62,6 @@ type instanceTemplateExt struct {
VolumeClaimTemplates []corev1.PersistentVolumeClaim
}
-type instanceSetExt struct {
- its *workloads.InstanceSet
- instanceTemplates []*workloads.InstanceTemplate
-}
-
var (
reader *zstd.Decoder
writer *zstd.Encoder
@@ -311,35 +307,6 @@ func ValidateDupInstanceNames[T any](instances []T, getNameFunc func(item T) str
return nil
}
-func buildInstanceName2TemplateMap(itsExt *instanceSetExt) (map[string]*instanceTemplateExt, error) {
- instanceTemplateList := buildInstanceTemplateExts(itsExt)
- allNameTemplateMap := make(map[string]*instanceTemplateExt)
- var instanceNameList []string
- for _, template := range instanceTemplateList {
- ordinalList, err := GetOrdinalListByTemplateName(itsExt.its, template.Name)
- if err != nil {
- return nil, err
- }
- instanceNames, err := GenerateInstanceNamesFromTemplate(itsExt.its.Name, template.Name, template.Replicas, itsExt.its.Spec.OfflineInstances, ordinalList)
- if err != nil {
- return nil, err
- }
- instanceNameList = append(instanceNameList, instanceNames...)
- for _, name := range instanceNames {
- allNameTemplateMap[name] = template
- }
- }
- // validate duplicate pod names
- getNameFunc := func(n string) string {
- return n
- }
- if err := ValidateDupInstanceNames(instanceNameList, getNameFunc); err != nil {
- return nil, err
- }
-
- return allNameTemplateMap, nil
-}
-
func GenerateAllInstanceNames(parentName string, replicas int32, templates []InstanceTemplate, offlineInstances []string, defaultTemplateOrdinals kbappsv1.Ordinals) ([]string, error) {
totalReplicas := int32(0)
instanceNameList := make([]string, 0)
@@ -526,7 +493,7 @@ func MergeNodeSelectorOnceAnnotation(its *workloads.InstanceSet, podToNodeMappin
return nil
}
-func buildInstanceByTemplate(name string, template *instanceTemplateExt, parent *workloads.InstanceSet, revision string) (*instance, error) {
+func buildInstanceByTemplate(name string, template *instancetemplate.InstanceTemplateExt, parent *workloads.InstanceSet, revision string) (*instance, error) {
// 1. build a pod from template
var err error
if len(revision) == 0 {
@@ -540,6 +507,7 @@ func buildInstanceByTemplate(name string, template *instanceTemplateExt, parent
AddAnnotationsInMap(template.Annotations).
AddLabelsInMap(template.Labels).
AddLabelsInMap(labels).
+ AddLabels(instancetemplate.TemplateNameLabelKey, template.Name).
AddLabels(constant.KBAppPodNameLabelKey, name). // used as a pod-service selector
AddControllerRevisionHashLabel(revision).
SetPodSpec(*template.Spec.DeepCopy()).
@@ -610,7 +578,7 @@ func buildInstanceByTemplate(name string, template *instanceTemplateExt, parent
return inst, nil
}
-func buildInstancePVCByTemplate(name string, template *instanceTemplateExt, parent *workloads.InstanceSet) []*corev1.PersistentVolumeClaim {
+func buildInstancePVCByTemplate(name string, template *instancetemplate.InstanceTemplateExt, parent *workloads.InstanceSet) []*corev1.PersistentVolumeClaim {
// 2. build pvcs from template
var pvcs []*corev1.PersistentVolumeClaim
labels := getMatchLabels(parent.Name)
@@ -724,53 +692,6 @@ func copyAndMerge(oldObj, newObj client.Object) client.Object {
}
}
-func validateSpec(its *workloads.InstanceSet, tree *kubebuilderx.ObjectTree) error {
- replicasInTemplates := int32(0)
- itsExt, err := buildInstanceSetExt(its, tree)
- if err != nil {
- return err
- }
- templateNames := sets.New[string]()
- for _, template := range itsExt.instanceTemplates {
- replicas := int32(1)
- if template.Replicas != nil {
- replicas = *template.Replicas
- }
- replicasInTemplates += replicas
- if templateNames.Has(template.Name) {
- err = fmt.Errorf("duplicate instance template name: %s", template.Name)
- if tree != nil {
- tree.EventRecorder.Event(its, corev1.EventTypeWarning, EventReasonInvalidSpec, err.Error())
- }
- return err
- }
- templateNames.Insert(template.Name)
- }
- // sum of spec.templates[*].replicas should not greater than spec.replicas
- if replicasInTemplates > *its.Spec.Replicas {
- err = fmt.Errorf("total replicas in instances(%d) should not greater than replicas in spec(%d)", replicasInTemplates, *its.Spec.Replicas)
- if tree != nil {
- tree.EventRecorder.Event(its, corev1.EventTypeWarning, EventReasonInvalidSpec, err.Error())
- }
- return err
- }
-
- // try to generate all pod names
- var instances []InstanceTemplate
- for i := range its.Spec.Instances {
- instances = append(instances, &its.Spec.Instances[i])
- }
- _, err = GenerateAllInstanceNames(its.Name, *its.Spec.Replicas, instances, its.Spec.OfflineInstances, its.Spec.DefaultTemplateOrdinals)
- if err != nil {
- if tree != nil {
- tree.EventRecorder.Event(its, corev1.EventTypeWarning, EventReasonInvalidSpec, err.Error())
- }
- return err
- }
-
- return nil
-}
-
func BuildInstanceTemplateRevision(template *corev1.PodTemplateSpec, parent *workloads.InstanceSet) (string, error) {
podTemplate := filterInPlaceFields(template)
its := builder.NewInstanceSetBuilder(parent.Namespace, parent.Name).
@@ -787,51 +708,6 @@ func BuildInstanceTemplateRevision(template *corev1.PodTemplateSpec, parent *wor
return cr.Labels[ControllerRevisionHashLabel], nil
}
-func buildInstanceTemplateExts(itsExt *instanceSetExt) []*instanceTemplateExt {
- defaultTemplate := itsExt.its.Spec.Template.DeepCopy()
- makeInstanceTemplateExt := func(templateName string) *instanceTemplateExt {
- var claims []corev1.PersistentVolumeClaim
- for _, template := range itsExt.its.Spec.VolumeClaimTemplates {
- claims = append(claims, *template.DeepCopy())
- }
- return &instanceTemplateExt{
- Name: templateName,
- PodTemplateSpec: *defaultTemplate.DeepCopy(),
- VolumeClaimTemplates: claims,
- }
- }
-
- var instanceTemplateExtList []*instanceTemplateExt
- for _, template := range itsExt.instanceTemplates {
- templateExt := makeInstanceTemplateExt(template.Name)
- buildInstanceTemplateExt(*template, templateExt)
- instanceTemplateExtList = append(instanceTemplateExtList, templateExt)
- }
- return instanceTemplateExtList
-}
-
-func buildInstanceTemplates(totalReplicas int32, instances []workloads.InstanceTemplate, instancesCompressed *corev1.ConfigMap) []*workloads.InstanceTemplate {
- var instanceTemplateList []*workloads.InstanceTemplate
- var replicasInTemplates int32
- instanceTemplates := getInstanceTemplates(instances, instancesCompressed)
- for i := range instanceTemplates {
- instance := &instanceTemplates[i]
- replicas := int32(1)
- if instance.Replicas != nil {
- replicas = *instance.Replicas
- }
- instanceTemplateList = append(instanceTemplateList, instance)
- replicasInTemplates += replicas
- }
- if replicasInTemplates < totalReplicas {
- replicas := totalReplicas - replicasInTemplates
- instance := &workloads.InstanceTemplate{Replicas: &replicas}
- instanceTemplateList = append(instanceTemplateList, instance)
- }
-
- return instanceTemplateList
-}
-
func getInstanceTemplateMap(annotations map[string]string) (map[string]string, error) {
if annotations == nil {
return nil, nil
@@ -894,180 +770,3 @@ func findTemplateObject(its *workloads.InstanceSet, tree *kubebuilderx.ObjectTre
}
return nil, nil
}
-
-func buildInstanceTemplateExt(template workloads.InstanceTemplate, templateExt *instanceTemplateExt) {
- templateExt.Name = template.Name
- replicas := int32(1)
- if template.Replicas != nil {
- replicas = *template.Replicas
- }
- templateExt.Replicas = replicas
- if template.SchedulingPolicy != nil && template.SchedulingPolicy.NodeName != "" {
- templateExt.Spec.NodeName = template.SchedulingPolicy.NodeName
- }
- mergeMap(&template.Annotations, &templateExt.Annotations)
- mergeMap(&template.Labels, &templateExt.Labels)
- if template.SchedulingPolicy != nil {
- mergeMap(&template.SchedulingPolicy.NodeSelector, &templateExt.Spec.NodeSelector)
- }
- if len(templateExt.Spec.Containers) > 0 {
- if template.Resources != nil {
- src := template.Resources
- dst := &templateExt.Spec.Containers[0].Resources
- mergeCPUNMemory(&src.Limits, &dst.Limits)
- mergeCPUNMemory(&src.Requests, &dst.Requests)
- }
- if template.Env != nil {
- intctrlutil.MergeList(&template.Env, &templateExt.Spec.Containers[0].Env,
- func(item corev1.EnvVar) func(corev1.EnvVar) bool {
- return func(env corev1.EnvVar) bool {
- return env.Name == item.Name
- }
- })
- }
- }
-
- if template.SchedulingPolicy != nil {
- intctrlutil.MergeList(&template.SchedulingPolicy.Tolerations, &templateExt.Spec.Tolerations,
- func(item corev1.Toleration) func(corev1.Toleration) bool {
- return func(t corev1.Toleration) bool {
- return reflect.DeepEqual(item, t)
- }
- })
- intctrlutil.MergeList(&template.SchedulingPolicy.TopologySpreadConstraints, &templateExt.Spec.TopologySpreadConstraints,
- func(item corev1.TopologySpreadConstraint) func(corev1.TopologySpreadConstraint) bool {
- return func(t corev1.TopologySpreadConstraint) bool {
- return reflect.DeepEqual(item, t)
- }
- })
- mergeAffinity(&template.SchedulingPolicy.Affinity, &templateExt.Spec.Affinity)
- }
-}
-
-func mergeCPUNMemory(s, d *corev1.ResourceList) {
- if s == nil || *s == nil || d == nil {
- return
- }
- for _, k := range []corev1.ResourceName{corev1.ResourceCPU, corev1.ResourceMemory} {
- if v, ok := (*s)[k]; ok {
- if *d == nil {
- *d = make(corev1.ResourceList)
- }
- (*d)[k] = v
- }
- }
-}
-
-// TODO: merge with existing mergeAffinity function which locates at pkg/controller/scheduling/scheduling_utils.go
-func mergeAffinity(affinity1Ptr, affinity2Ptr **corev1.Affinity) {
- if affinity1Ptr == nil || *affinity1Ptr == nil {
- return
- }
- if *affinity2Ptr == nil {
- *affinity2Ptr = &corev1.Affinity{}
- }
- affinity1 := *affinity1Ptr
- affinity2 := *affinity2Ptr
-
- // Merge PodAffinity
- mergePodAffinity(&affinity1.PodAffinity, &affinity2.PodAffinity)
-
- // Merge PodAntiAffinity
- mergePodAntiAffinity(&affinity1.PodAntiAffinity, &affinity2.PodAntiAffinity)
-
- // Merge NodeAffinity
- mergeNodeAffinity(&affinity1.NodeAffinity, &affinity2.NodeAffinity)
-}
-
-func mergePodAffinity(podAffinity1Ptr, podAffinity2Ptr **corev1.PodAffinity) {
- if podAffinity1Ptr == nil || *podAffinity1Ptr == nil {
- return
- }
- if *podAffinity2Ptr == nil {
- *podAffinity2Ptr = &corev1.PodAffinity{}
- }
- podAffinity1 := *podAffinity1Ptr
- podAffinity2 := *podAffinity2Ptr
-
- intctrlutil.MergeList(&podAffinity1.RequiredDuringSchedulingIgnoredDuringExecution, &podAffinity2.RequiredDuringSchedulingIgnoredDuringExecution,
- func(item corev1.PodAffinityTerm) func(corev1.PodAffinityTerm) bool {
- return func(t corev1.PodAffinityTerm) bool {
- return reflect.DeepEqual(item, t)
- }
- })
- intctrlutil.MergeList(&podAffinity1.PreferredDuringSchedulingIgnoredDuringExecution, &podAffinity2.PreferredDuringSchedulingIgnoredDuringExecution,
- func(item corev1.WeightedPodAffinityTerm) func(corev1.WeightedPodAffinityTerm) bool {
- return func(t corev1.WeightedPodAffinityTerm) bool {
- return reflect.DeepEqual(item, t)
- }
- })
-}
-
-func mergePodAntiAffinity(podAntiAffinity1Ptr, podAntiAffinity2Ptr **corev1.PodAntiAffinity) {
- if podAntiAffinity1Ptr == nil || *podAntiAffinity1Ptr == nil {
- return
- }
- if *podAntiAffinity2Ptr == nil {
- *podAntiAffinity2Ptr = &corev1.PodAntiAffinity{}
- }
- podAntiAffinity1 := *podAntiAffinity1Ptr
- podAntiAffinity2 := *podAntiAffinity2Ptr
-
- intctrlutil.MergeList(&podAntiAffinity1.RequiredDuringSchedulingIgnoredDuringExecution, &podAntiAffinity2.RequiredDuringSchedulingIgnoredDuringExecution,
- func(item corev1.PodAffinityTerm) func(corev1.PodAffinityTerm) bool {
- return func(t corev1.PodAffinityTerm) bool {
- return reflect.DeepEqual(item, t)
- }
- })
- intctrlutil.MergeList(&podAntiAffinity1.PreferredDuringSchedulingIgnoredDuringExecution, &podAntiAffinity2.PreferredDuringSchedulingIgnoredDuringExecution,
- func(item corev1.WeightedPodAffinityTerm) func(corev1.WeightedPodAffinityTerm) bool {
- return func(t corev1.WeightedPodAffinityTerm) bool {
- return reflect.DeepEqual(item, t)
- }
- })
-}
-
-func mergeNodeAffinity(nodeAffinity1Ptr, nodeAffinity2Ptr **corev1.NodeAffinity) {
- if nodeAffinity1Ptr == nil || *nodeAffinity1Ptr == nil {
- return
- }
- if *nodeAffinity2Ptr == nil {
- *nodeAffinity2Ptr = &corev1.NodeAffinity{}
- }
- nodeAffinity1 := *nodeAffinity1Ptr
- nodeAffinity2 := *nodeAffinity2Ptr
-
- if nodeAffinity1.RequiredDuringSchedulingIgnoredDuringExecution != nil {
- if nodeAffinity2.RequiredDuringSchedulingIgnoredDuringExecution == nil {
- nodeAffinity2.RequiredDuringSchedulingIgnoredDuringExecution = &corev1.NodeSelector{}
- }
- intctrlutil.MergeList(&nodeAffinity1.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms,
- &nodeAffinity2.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms,
- func(item corev1.NodeSelectorTerm) func(corev1.NodeSelectorTerm) bool {
- return func(t corev1.NodeSelectorTerm) bool {
- return reflect.DeepEqual(item, t)
- }
- })
- }
- intctrlutil.MergeList(&nodeAffinity1.PreferredDuringSchedulingIgnoredDuringExecution,
- &nodeAffinity2.PreferredDuringSchedulingIgnoredDuringExecution,
- func(item corev1.PreferredSchedulingTerm) func(corev1.PreferredSchedulingTerm) bool {
- return func(t corev1.PreferredSchedulingTerm) bool {
- return reflect.DeepEqual(item, t)
- }
- })
-}
-
-func buildInstanceSetExt(its *workloads.InstanceSet, tree *kubebuilderx.ObjectTree) (*instanceSetExt, error) {
- instancesCompressed, err := findTemplateObject(its, tree)
- if err != nil {
- return nil, err
- }
-
- instanceTemplateList := buildInstanceTemplates(*its.Spec.Replicas, its.Spec.Instances, instancesCompressed)
-
- return &instanceSetExt{
- its: its,
- instanceTemplates: instanceTemplateList,
- }, nil
-}
diff --git a/pkg/controller/instanceset/instance_util_test.go b/pkg/controller/instanceset/instance_util_test.go
index ff7c522dc76..d48dac7166c 100644
--- a/pkg/controller/instanceset/instance_util_test.go
+++ b/pkg/controller/instanceset/instance_util_test.go
@@ -30,7 +30,6 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -39,6 +38,7 @@ import (
workloads "github.com/apecloud/kubeblocks/apis/workloads/v1"
"github.com/apecloud/kubeblocks/pkg/constant"
"github.com/apecloud/kubeblocks/pkg/controller/builder"
+ "github.com/apecloud/kubeblocks/pkg/controller/instanceset/instancetemplate"
"github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx"
)
@@ -119,69 +119,11 @@ var _ = Describe("instance util test", func() {
})
})
- Context("buildInstanceName2TemplateMap", func() {
- It("build an its with default template only", func() {
- itsExt, err := buildInstanceSetExt(its, nil)
- Expect(err).Should(BeNil())
- nameTemplate, err := buildInstanceName2TemplateMap(itsExt)
- Expect(err).Should(BeNil())
- Expect(nameTemplate).Should(HaveLen(3))
- name0 := its.Name + "-0"
- Expect(nameTemplate).Should(HaveKey(name0))
- Expect(nameTemplate).Should(HaveKey(its.Name + "-1"))
- Expect(nameTemplate).Should(HaveKey(its.Name + "-2"))
- nameTemplate[name0].PodTemplateSpec.Spec.Volumes = nil
- defaultTemplate := its.Spec.Template.DeepCopy()
- Expect(nameTemplate[name0].PodTemplateSpec.Spec).Should(Equal(defaultTemplate.Spec))
- })
-
- It("build an its with one instance template override", func() {
- nameOverride := "name-override"
- nameOverride0 := its.Name + "-" + nameOverride + "-0"
- annotationOverride := map[string]string{
- "foo": "bar",
- }
- labelOverride := map[string]string{
- "foo": "bar",
- }
- resources := corev1.ResourceRequirements{
- Limits: map[corev1.ResourceName]resource.Quantity{
- corev1.ResourceCPU: resource.MustParse("600m"),
- },
- }
- instance := workloads.InstanceTemplate{
- Name: nameOverride,
- Annotations: annotationOverride,
- Labels: labelOverride,
- Resources: &resources,
- }
- its.Spec.Instances = append(its.Spec.Instances, instance)
- itsExt, err := buildInstanceSetExt(its, nil)
- Expect(err).Should(BeNil())
- nameTemplate, err := buildInstanceName2TemplateMap(itsExt)
- Expect(err).Should(BeNil())
- Expect(nameTemplate).Should(HaveLen(3))
- name0 := its.Name + "-0"
- name1 := its.Name + "-1"
- Expect(nameTemplate).Should(HaveKey(name0))
- Expect(nameTemplate).Should(HaveKey(name1))
- Expect(nameTemplate).Should(HaveKey(nameOverride0))
- expectedTemplate := its.Spec.Template.DeepCopy()
- Expect(nameTemplate[name0].PodTemplateSpec.Spec).Should(Equal(expectedTemplate.Spec))
- Expect(nameTemplate[name1].PodTemplateSpec.Spec).Should(Equal(expectedTemplate.Spec))
- Expect(nameTemplate[nameOverride0].PodTemplateSpec.Spec).ShouldNot(Equal(expectedTemplate.Spec))
- Expect(nameTemplate[nameOverride0].PodTemplateSpec.Annotations).Should(Equal(annotationOverride))
- Expect(nameTemplate[nameOverride0].PodTemplateSpec.Labels).Should(Equal(labelOverride))
- Expect(nameTemplate[nameOverride0].PodTemplateSpec.Spec.Containers[0].Resources.Limits[corev1.ResourceCPU]).Should(Equal(resources.Limits[corev1.ResourceCPU]))
- Expect(nameTemplate[nameOverride0].PodTemplateSpec.Spec.Containers[0].Resources.Requests[corev1.ResourceCPU]).Should(Equal(its.Spec.Template.Spec.Containers[0].Resources.Requests[corev1.ResourceCPU]))
- })
- })
-
Context("buildInstanceByTemplate", func() {
It("should work well", func() {
- itsExt, err := buildInstanceSetExt(its, nil)
+ itsExt, err := instancetemplate.BuildInstanceSetExt(its, nil)
Expect(err).Should(BeNil())
- nameTemplate, err := buildInstanceName2TemplateMap(itsExt)
+ nameTemplate, err := instancetemplate.BuildInstanceName2TemplateMap(itsExt)
Expect(err).Should(BeNil())
Expect(nameTemplate).Should(HaveLen(3))
name := name + "-0"
@@ -209,9 +151,9 @@ var _ = Describe("instance util test", func() {
})
It("adds nodeSelector according to annotation", func() {
- itsExt, err := buildInstanceSetExt(its, nil)
+ itsExt, err := instancetemplate.BuildInstanceSetExt(its, nil)
Expect(err).Should(BeNil())
- nameTemplate, err := buildInstanceName2TemplateMap(itsExt)
+ nameTemplate, err := instancetemplate.BuildInstanceName2TemplateMap(itsExt)
Expect(err).Should(BeNil())
name := name + "-0"
Expect(nameTemplate).Should(HaveKey(name))
@@ -239,9 +181,9 @@ var _ = Describe("instance util test", func() {
Context("buildInstancePVCByTemplate", func() {
It("should work well", func() {
- itsExt, err := buildInstanceSetExt(its, nil)
+ itsExt, err := instancetemplate.BuildInstanceSetExt(its, nil)
Expect(err).Should(BeNil())
- nameTemplate, err := buildInstanceName2TemplateMap(itsExt)
+ nameTemplate, err := instancetemplate.BuildInstanceName2TemplateMap(itsExt)
Expect(err).Should(BeNil())
Expect(nameTemplate).Should(HaveLen(3))
name := name + "-0"
@@ -255,26 +197,6 @@ var _ = Describe("instance util test", func() {
})
})
- Context("validateSpec", func() {
- It("should work well", func() {
- By("a valid spec")
- Expect(validateSpec(its, nil)).Should(Succeed())
-
- By("sum of replicas in instance exceeds spec.replicas")
- its2 := its.DeepCopy()
- replicas := int32(4)
- name := "barrrrr"
- instance := workloads.InstanceTemplate{
- Name: name,
- Replicas: &replicas,
- }
- its2.Spec.Instances = append(its2.Spec.Instances, instance)
- err := validateSpec(its2, nil)
- Expect(err).Should(HaveOccurred())
- Expect(err.Error()).Should(ContainSubstring("should not greater than replicas in spec"))
- })
- })
-
Context("copyAndMerge", func() {
It("should work well", func() {
By("merge svc")
@@ -821,550 +743,6 @@ var _ = Describe("instance util test", func() {
})
})
- Context("mergeAffinity", func() {
- It("merge all configs", func() {
- affinity1 := &corev1.Affinity{
- NodeAffinity: &corev1.NodeAffinity{
- RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
- NodeSelectorTerms: []corev1.NodeSelectorTerm{
- {
- MatchExpressions: []corev1.NodeSelectorRequirement{
- {
- Key: "node-role.kubernetes.io/worker",
- Operator: corev1.NodeSelectorOpExists,
- },
- },
- MatchFields: nil,
- },
- {
- MatchExpressions: []corev1.NodeSelectorRequirement{
- {
- Key: "topology.kubernetes.io/zone",
- Operator: corev1.NodeSelectorOpIn,
- Values: []string{
- "east1",
- },
- },
- },
- },
- },
- },
- PreferredDuringSchedulingIgnoredDuringExecution: nil,
- },
- PodAffinity: &corev1.PodAffinity{
- RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{
- {
- LabelSelector: &metav1.LabelSelector{
- MatchLabels: nil,
- MatchExpressions: []metav1.LabelSelectorRequirement{
- {
- Key: "app",
- Operator: metav1.LabelSelectorOpIn,
- Values: []string{"myapp"},
- },
- },
- },
- Namespaces: nil,
- TopologyKey: "",
- NamespaceSelector: &metav1.LabelSelector{
- MatchLabels: nil,
- MatchExpressions: nil,
- },
- },
- },
- PreferredDuringSchedulingIgnoredDuringExecution: nil,
- },
- PodAntiAffinity: &corev1.PodAntiAffinity{
- RequiredDuringSchedulingIgnoredDuringExecution: nil,
- PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{
- {
- Weight: 100,
- PodAffinityTerm: corev1.PodAffinityTerm{
- LabelSelector: &metav1.LabelSelector{
- MatchLabels: nil,
- MatchExpressions: []metav1.LabelSelectorRequirement{
- {
- Key: "app",
- Operator: metav1.LabelSelectorOpIn,
- Values: []string{"myapp"},
- },
- },
- },
- TopologyKey: "kubernetes.io/hostname",
- NamespaceSelector: &metav1.LabelSelector{
- MatchLabels: nil,
- MatchExpressions: nil,
- },
- },
- },
- },
- },
- }
- affinity2 := &corev1.Affinity{
- NodeAffinity: &corev1.NodeAffinity{
- RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
- NodeSelectorTerms: []corev1.NodeSelectorTerm{
- {
- MatchExpressions: []corev1.NodeSelectorRequirement{
- {
- Key: "disktype",
- Operator: corev1.NodeSelectorOpIn,
- Values: []string{
- "hdd",
- },
- },
- },
- MatchFields: nil,
- },
- {
- MatchExpressions: []corev1.NodeSelectorRequirement{
- {
- Key: "topology.kubernetes.io/zone",
- Operator: corev1.NodeSelectorOpIn,
- Values: []string{
- "west1",
- },
- },
- },
- },
- },
- },
- PreferredDuringSchedulingIgnoredDuringExecution: nil,
- },
- PodAffinity: &corev1.PodAffinity{
- RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{
- {
- LabelSelector: &metav1.LabelSelector{
- MatchLabels: nil,
- MatchExpressions: []metav1.LabelSelectorRequirement{
- {
- Key: "app",
- Operator: metav1.LabelSelectorOpIn,
- Values: []string{"myapp"},
- },
- },
- },
- Namespaces: nil,
- TopologyKey: "",
- NamespaceSelector: &metav1.LabelSelector{
- MatchLabels: nil,
- MatchExpressions: nil,
- },
- },
- },
- PreferredDuringSchedulingIgnoredDuringExecution: nil,
- },
- PodAntiAffinity: &corev1.PodAntiAffinity{
- RequiredDuringSchedulingIgnoredDuringExecution: nil,
- PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{
- {
- Weight: 100,
- PodAffinityTerm: corev1.PodAffinityTerm{
- LabelSelector: &metav1.LabelSelector{
- MatchLabels: nil,
- MatchExpressions: []metav1.LabelSelectorRequirement{
- {
- Key: "app",
- Operator: metav1.LabelSelectorOpIn,
- Values: []string{"myapp"},
- },
- },
- },
- TopologyKey: "kubernetes.io/hostname",
- NamespaceSelector: &metav1.LabelSelector{
- MatchLabels: nil,
- MatchExpressions: nil,
- },
- },
- },
- },
- },
- }
-
- mergeAffinity(&affinity1, &affinity2)
-
- expectMergedAffinity := &corev1.Affinity{
- NodeAffinity: &corev1.NodeAffinity{
- RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
- NodeSelectorTerms: []corev1.NodeSelectorTerm{
- {
- MatchExpressions: []corev1.NodeSelectorRequirement{
- {
- Key: "disktype",
- Operator: corev1.NodeSelectorOpIn,
- Values: []string{
- "hdd",
- },
- },
- },
- MatchFields: nil,
- },
- {
- MatchExpressions: []corev1.NodeSelectorRequirement{
- {
- Key: "topology.kubernetes.io/zone",
- Operator: corev1.NodeSelectorOpIn,
- Values: []string{
- "west1",
- },
- },
- },
- },
- {
- MatchExpressions: []corev1.NodeSelectorRequirement{
- {
- Key: "node-role.kubernetes.io/worker",
- Operator: corev1.NodeSelectorOpExists,
- },
- },
- MatchFields: nil,
- },
- {
- MatchExpressions: []corev1.NodeSelectorRequirement{
- {
- Key: "topology.kubernetes.io/zone",
- Operator: corev1.NodeSelectorOpIn,
- Values: []string{
- "east1",
- },
- },
- },
- },
- },
- },
- PreferredDuringSchedulingIgnoredDuringExecution: nil,
- },
- PodAffinity: &corev1.PodAffinity{
- RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{
- {
- LabelSelector: &metav1.LabelSelector{
- MatchLabels: nil,
- MatchExpressions: []metav1.LabelSelectorRequirement{
- {
- Key: "app",
- Operator: metav1.LabelSelectorOpIn,
- Values: []string{"myapp"},
- },
- },
- },
- Namespaces: nil,
- TopologyKey: "",
- NamespaceSelector: &metav1.LabelSelector{
- MatchLabels: nil,
- MatchExpressions: nil,
- },
- },
- },
- PreferredDuringSchedulingIgnoredDuringExecution: nil,
- },
- PodAntiAffinity: &corev1.PodAntiAffinity{
- RequiredDuringSchedulingIgnoredDuringExecution: nil,
- PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{
- {
- Weight: 100,
- PodAffinityTerm: corev1.PodAffinityTerm{
- LabelSelector: &metav1.LabelSelector{
- MatchLabels: nil,
- MatchExpressions: []metav1.LabelSelectorRequirement{
- {
- Key: "app",
- Operator: metav1.LabelSelectorOpIn,
- Values: []string{"myapp"},
- },
- },
- },
- TopologyKey: "kubernetes.io/hostname",
- NamespaceSelector: &metav1.LabelSelector{
- MatchLabels: nil,
- MatchExpressions: nil,
- },
- },
- },
- },
- },
- }
- Expect(affinity2).Should(Equal(expectMergedAffinity))
- })
- It("merge with nil src", func() {
- var affinity1 *corev1.Affinity
- affinity2 := &corev1.Affinity{
- NodeAffinity: &corev1.NodeAffinity{
- RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
- NodeSelectorTerms: []corev1.NodeSelectorTerm{
- {
- MatchExpressions: []corev1.NodeSelectorRequirement{
- {
- Key: "node-role.kubernetes.io/worker",
- Operator: corev1.NodeSelectorOpExists,
- },
- },
- MatchFields: nil,
- },
- },
- },
- PreferredDuringSchedulingIgnoredDuringExecution: nil,
- },
- PodAffinity: &corev1.PodAffinity{
- RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{
- {
- LabelSelector: &metav1.LabelSelector{
- MatchLabels: nil,
- MatchExpressions: []metav1.LabelSelectorRequirement{
- {
- Key: "app",
- Operator: metav1.LabelSelectorOpIn,
- Values: []string{"myapp"},
- },
- },
- },
- Namespaces: nil,
- TopologyKey: "",
- NamespaceSelector: &metav1.LabelSelector{
- MatchLabels: nil,
- MatchExpressions: nil,
- },
- },
- },
- PreferredDuringSchedulingIgnoredDuringExecution: nil,
- },
- PodAntiAffinity: &corev1.PodAntiAffinity{
- RequiredDuringSchedulingIgnoredDuringExecution: nil,
- PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{
- {
- Weight: 100,
- PodAffinityTerm: corev1.PodAffinityTerm{
- LabelSelector: &metav1.LabelSelector{
- MatchLabels: nil,
- MatchExpressions: []metav1.LabelSelectorRequirement{
- {
- Key: "app",
- Operator: metav1.LabelSelectorOpIn,
- Values: []string{"myapp"},
- },
- },
- },
- TopologyKey: "kubernetes.io/hostname",
- NamespaceSelector: &metav1.LabelSelector{
- MatchLabels: nil,
- MatchExpressions: nil,
- },
- },
- },
- },
- },
- }
-
- mergeAffinity(&affinity1, &affinity2)
-
- expectMergedAffinity := &corev1.Affinity{
- NodeAffinity: &corev1.NodeAffinity{
- RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
- NodeSelectorTerms: []corev1.NodeSelectorTerm{
- {
- MatchExpressions: []corev1.NodeSelectorRequirement{
- {
- Key: "node-role.kubernetes.io/worker",
- Operator: corev1.NodeSelectorOpExists,
- },
- },
- MatchFields: nil,
- },
- },
- },
- PreferredDuringSchedulingIgnoredDuringExecution: nil,
- },
- PodAffinity: &corev1.PodAffinity{
- RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{
- {
- LabelSelector: &metav1.LabelSelector{
- MatchLabels: nil,
- MatchExpressions: []metav1.LabelSelectorRequirement{
- {
- Key: "app",
- Operator: metav1.LabelSelectorOpIn,
- Values: []string{"myapp"},
- },
- },
- },
- Namespaces: nil,
- TopologyKey: "",
- NamespaceSelector: &metav1.LabelSelector{
- MatchLabels: nil,
- MatchExpressions: nil,
- },
- },
- },
- PreferredDuringSchedulingIgnoredDuringExecution: nil,
- },
- PodAntiAffinity: &corev1.PodAntiAffinity{
- RequiredDuringSchedulingIgnoredDuringExecution: nil,
- PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{
- {
- Weight: 100,
- PodAffinityTerm: corev1.PodAffinityTerm{
- LabelSelector: &metav1.LabelSelector{
- MatchLabels: nil,
- MatchExpressions: []metav1.LabelSelectorRequirement{
- {
- Key: "app",
- Operator: metav1.LabelSelectorOpIn,
- Values: []string{"myapp"},
- },
- },
- },
- TopologyKey: "kubernetes.io/hostname",
- NamespaceSelector: &metav1.LabelSelector{
- MatchLabels: nil,
- MatchExpressions: nil,
- },
- },
- },
- },
- },
- }
- Expect(affinity2).Should(Equal(expectMergedAffinity))
- })
- It("merge with nil dst", func() {
- affinity1 := &corev1.Affinity{
- NodeAffinity: &corev1.NodeAffinity{
- RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
- NodeSelectorTerms: []corev1.NodeSelectorTerm{
- {
- MatchExpressions: []corev1.NodeSelectorRequirement{
- {
- Key: "node-role.kubernetes.io/worker",
- Operator: corev1.NodeSelectorOpExists,
- },
- },
- MatchFields: nil,
- },
- },
- },
- PreferredDuringSchedulingIgnoredDuringExecution: nil,
- },
- PodAffinity: &corev1.PodAffinity{
- RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{
- {
- LabelSelector: &metav1.LabelSelector{
- MatchLabels: nil,
- MatchExpressions: []metav1.LabelSelectorRequirement{
- {
- Key: "app",
- Operator: metav1.LabelSelectorOpIn,
- Values: []string{"myapp"},
- },
- },
- },
- Namespaces: nil,
- TopologyKey: "",
- NamespaceSelector: &metav1.LabelSelector{
- MatchLabels: nil,
- MatchExpressions: nil,
- },
- },
- },
- PreferredDuringSchedulingIgnoredDuringExecution: nil,
- },
- PodAntiAffinity: &corev1.PodAntiAffinity{
- RequiredDuringSchedulingIgnoredDuringExecution: nil,
- PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{
- {
- Weight: 100,
- PodAffinityTerm: corev1.PodAffinityTerm{
- LabelSelector: &metav1.LabelSelector{
- MatchLabels: nil,
- MatchExpressions: []metav1.LabelSelectorRequirement{
- {
- Key: "app",
- Operator: metav1.LabelSelectorOpIn,
- Values: []string{"myapp"},
- },
- },
- },
- TopologyKey: "kubernetes.io/hostname",
- NamespaceSelector: &metav1.LabelSelector{
- MatchLabels: nil,
- MatchExpressions: nil,
- },
- },
- },
- },
- },
- }
- var affinity2 *corev1.Affinity = nil
-
- mergeAffinity(&affinity1, &affinity2)
-
- expectMergedAffinity := &corev1.Affinity{
- NodeAffinity: &corev1.NodeAffinity{
- RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
- NodeSelectorTerms: []corev1.NodeSelectorTerm{
- {
- MatchExpressions: []corev1.NodeSelectorRequirement{
- {
- Key: "node-role.kubernetes.io/worker",
- Operator: corev1.NodeSelectorOpExists,
- },
- },
- MatchFields: nil,
- },
- },
- },
- PreferredDuringSchedulingIgnoredDuringExecution: nil,
- },
- PodAffinity: &corev1.PodAffinity{
- RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{
- {
- LabelSelector: &metav1.LabelSelector{
- MatchLabels: nil,
- MatchExpressions: []metav1.LabelSelectorRequirement{
- {
- Key: "app",
- Operator: metav1.LabelSelectorOpIn,
- Values: []string{"myapp"},
- },
- },
- },
- Namespaces: nil,
- TopologyKey: "",
- NamespaceSelector: &metav1.LabelSelector{
- MatchLabels: nil,
- MatchExpressions: nil,
- },
- },
- },
- PreferredDuringSchedulingIgnoredDuringExecution: nil,
- },
- PodAntiAffinity: &corev1.PodAntiAffinity{
- RequiredDuringSchedulingIgnoredDuringExecution: nil,
- PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{
- {
- Weight: 100,
- PodAffinityTerm: corev1.PodAffinityTerm{
- LabelSelector: &metav1.LabelSelector{
- MatchLabels: nil,
- MatchExpressions: []metav1.LabelSelectorRequirement{
- {
- Key: "app",
- Operator: metav1.LabelSelectorOpIn,
- Values: []string{"myapp"},
- },
- },
- },
- TopologyKey: "kubernetes.io/hostname",
- NamespaceSelector: &metav1.LabelSelector{
- MatchLabels: nil,
- MatchExpressions: nil,
- },
- },
- },
- },
- },
- }
- Expect(affinity2).Should(Equal(expectMergedAffinity))
- })
- })
-
Context("ParseParentNameAndOrdinal", func() {
It("Benchmark", Serial, Label("measurement"), func() {
experiment := gmeasure.NewExperiment("ParseParentNameAndOrdinal Benchmark")
diff --git a/pkg/controller/instanceset/instancetemplate/compression.go b/pkg/controller/instanceset/instancetemplate/compression.go
new file mode 100644
index 00000000000..9a0b9e8edda
--- /dev/null
+++ b/pkg/controller/instanceset/instancetemplate/compression.go
@@ -0,0 +1,94 @@
+/*
+Copyright (C) 2022-2025 ApeCloud Co., Ltd
+
+This file is part of KubeBlocks project
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Affero General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Affero General Public License for more details.
+
+You should have received a copy of the GNU Affero General Public License
+along with this program. If not, see .
+*/
+
+package instancetemplate
+
+import (
+ "encoding/json"
+
+ corev1 "k8s.io/api/core/v1"
+
+ workloads "github.com/apecloud/kubeblocks/apis/workloads/v1"
+ "github.com/apecloud/kubeblocks/pkg/controller/builder"
+ "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx"
+)
+
+// extract compressed instance templates from the configmap
+func getInstanceTemplates(instances []workloads.InstanceTemplate, template *corev1.ConfigMap) []workloads.InstanceTemplate {
+ if template == nil {
+ return instances
+ }
+
+ // if template is found with incorrect format, try it as the whole templates is corrupted.
+ if template.BinaryData == nil {
+ return nil
+ }
+ templateData, ok := template.BinaryData[TemplateRefDataKey]
+ if !ok {
+ return nil
+ }
+ templateByte, err := reader.DecodeAll(templateData, nil)
+ if err != nil {
+ return nil
+ }
+ extraTemplates := make([]workloads.InstanceTemplate, 0)
+ err = json.Unmarshal(templateByte, &extraTemplates)
+ if err != nil {
+ return nil
+ }
+
+ return append(instances, extraTemplates...)
+}
+
+func findTemplateObject(its *workloads.InstanceSet, tree *kubebuilderx.ObjectTree) (*corev1.ConfigMap, error) {
+ templateMap, err := getInstanceTemplateMap(its.Annotations)
+ // error has been checked in prepare stage, there should be no error occurs
+ if err != nil {
+ return nil, nil
+ }
+ for name, templateName := range templateMap {
+ if name != its.Name {
+ continue
+ }
+ // find the compressed instance templates, parse them
+ template := builder.NewConfigMapBuilder(its.Namespace, templateName).GetObject()
+ templateObj, err := tree.Get(template)
+ if err != nil {
+ return nil, err
+ }
+ template, _ = templateObj.(*corev1.ConfigMap)
+ return template, nil
+ }
+ return nil, nil
+}
+
+func getInstanceTemplateMap(annotations map[string]string) (map[string]string, error) {
+ if annotations == nil {
+ return nil, nil
+ }
+ templateRef, ok := annotations[TemplateRefAnnotationKey]
+ if !ok {
+ return nil, nil
+ }
+ templateMap := make(map[string]string)
+ if err := json.Unmarshal([]byte(templateRef), &templateMap); err != nil {
+ return nil, err
+ }
+ return templateMap, nil
+}
diff --git a/pkg/controller/instanceset/instancetemplate/merge.go b/pkg/controller/instanceset/instancetemplate/merge.go
new file mode 100644
index 00000000000..01428c14e04
--- /dev/null
+++ b/pkg/controller/instanceset/instancetemplate/merge.go
@@ -0,0 +1,117 @@
+/*
+Copyright (C) 2022-2025 ApeCloud Co., Ltd
+
+This file is part of KubeBlocks project
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Affero General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Affero General Public License for more details.
+
+You should have received a copy of the GNU Affero General Public License
+along with this program. If not, see .
+*/
+
+package instancetemplate
+
+import (
+ "reflect"
+
+ corev1 "k8s.io/api/core/v1"
+
+ workloads "github.com/apecloud/kubeblocks/apis/workloads/v1"
+ "github.com/apecloud/kubeblocks/pkg/controller/scheduling"
+ intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil"
+)
+
+func buildInstanceTemplateExt(template *workloads.InstanceTemplate, its *workloads.InstanceSet) *InstanceTemplateExt {
+ var claims []corev1.PersistentVolumeClaim
+ for _, t := range its.Spec.VolumeClaimTemplates {
+ claims = append(claims, *t.DeepCopy())
+ }
+ templateExt := &InstanceTemplateExt{
+ Name: template.Name,
+ PodTemplateSpec: *its.Spec.Template.DeepCopy(),
+ VolumeClaimTemplates: claims,
+ }
+
+ replicas := int32(1)
+ if template.Replicas != nil {
+ replicas = *template.Replicas
+ }
+ templateExt.Replicas = replicas
+
+ if template.SchedulingPolicy != nil && template.SchedulingPolicy.NodeName != "" {
+ templateExt.Spec.NodeName = template.SchedulingPolicy.NodeName
+ }
+ mergeMap(&template.Annotations, &templateExt.Annotations)
+ mergeMap(&template.Labels, &templateExt.Labels)
+ if template.SchedulingPolicy != nil {
+ mergeMap(&template.SchedulingPolicy.NodeSelector, &templateExt.Spec.NodeSelector)
+ }
+ if len(templateExt.Spec.Containers) > 0 {
+ if template.Resources != nil {
+ src := template.Resources
+ dst := &templateExt.Spec.Containers[0].Resources
+ mergeCPUNMemory(&src.Limits, &dst.Limits)
+ mergeCPUNMemory(&src.Requests, &dst.Requests)
+ }
+ if template.Env != nil {
+ intctrlutil.MergeList(&template.Env, &templateExt.Spec.Containers[0].Env,
+ func(item corev1.EnvVar) func(corev1.EnvVar) bool {
+ return func(env corev1.EnvVar) bool {
+ return env.Name == item.Name
+ }
+ })
+ }
+ }
+
+ if template.SchedulingPolicy != nil {
+ intctrlutil.MergeList(&template.SchedulingPolicy.Tolerations, &templateExt.Spec.Tolerations,
+ func(item corev1.Toleration) func(corev1.Toleration) bool {
+ return func(t corev1.Toleration) bool {
+ return reflect.DeepEqual(item, t)
+ }
+ })
+ intctrlutil.MergeList(&template.SchedulingPolicy.TopologySpreadConstraints, &templateExt.Spec.TopologySpreadConstraints,
+ func(item corev1.TopologySpreadConstraint) func(corev1.TopologySpreadConstraint) bool {
+ return func(t corev1.TopologySpreadConstraint) bool {
+ return reflect.DeepEqual(item, t)
+ }
+ })
+ templateExt.Spec.Affinity = scheduling.MergeAffinity(template.SchedulingPolicy.Affinity, templateExt.Spec.Affinity)
+ }
+
+ return templateExt
+}
+
+func mergeCPUNMemory(s, d *corev1.ResourceList) {
+ if s == nil || *s == nil || d == nil {
+ return
+ }
+ for _, k := range []corev1.ResourceName{corev1.ResourceCPU, corev1.ResourceMemory} {
+ if v, ok := (*s)[k]; ok {
+ if *d == nil {
+ *d = make(corev1.ResourceList)
+ }
+ (*d)[k] = v
+ }
+ }
+}
+
+func mergeMap[K comparable, V any](src, dst *map[K]V) {
+ if len(*src) == 0 {
+ return
+ }
+ if *dst == nil {
+ *dst = make(map[K]V)
+ }
+ for k, v := range *src {
+ (*dst)[k] = v
+ }
+}
diff --git a/pkg/controller/instanceset/instancetemplate/oridinal.go b/pkg/controller/instanceset/instancetemplate/oridinal.go
new file mode 100644
index 00000000000..5aa28cf96d4
--- /dev/null
+++ b/pkg/controller/instanceset/instancetemplate/oridinal.go
@@ -0,0 +1,74 @@
+/*
+Copyright (C) 2022-2025 ApeCloud Co., Ltd
+
+This file is part of KubeBlocks project
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Affero General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Affero General Public License for more details.
+
+You should have received a copy of the GNU Affero General Public License
+along with this program. If not, see .
+*/
+
+package instancetemplate
+
+import (
+ "fmt"
+ "slices"
+ "strconv"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/util/sets"
+
+ kbappsv1 "github.com/apecloud/kubeblocks/apis/apps/v1"
+ workloads "github.com/apecloud/kubeblocks/apis/workloads/v1"
+)
+
+// ConvertOrdinalsToSet assumes oridnals are valid
+func ConvertOrdinalsToSet(ordinals kbappsv1.Ordinals) sets.Set[int32] {
+ ordinalSet := sets.New(ordinals.Discrete...)
+ for _, item := range ordinals.Ranges {
+ for ordinal := item.Start; ordinal <= item.End; ordinal++ {
+ ordinalSet.Insert(ordinal)
+ }
+ }
+ return ordinalSet
+}
+
+// ConvertOrdinalsToSortedList assumes oridnals are valid
+func ConvertOrdinalsToSortedList(ordinals kbappsv1.Ordinals) []int32 {
+ ordinalSet := ConvertOrdinalsToSet(ordinals)
+ sortedOrdinalList := ordinalSet.UnsortedList()
+ slices.Sort(sortedOrdinalList)
+ return sortedOrdinalList
+}
+
+func convertOrdinalSetToSortedList(ordinalSet sets.Set[int32]) []int32 {
+ sortedOrdinalList := ordinalSet.UnsortedList()
+ slices.Sort(sortedOrdinalList)
+ return sortedOrdinalList
+}
+
+func GetOrdinal(podName string) (int32, error) {
+ index := strings.LastIndex(podName, "-")
+ if index < 0 {
+ return -1, fmt.Errorf("failed to get ordinal from pod %v", podName)
+ }
+ ordinalStr := podName[index+1:]
+ ordinal, err := strconv.ParseInt(ordinalStr, 10, 32)
+ if err != nil {
+ return -1, err
+ }
+ return int32(ordinal), nil
+}
+
+func GetInstanceName(its *workloads.InstanceSet, ordinal int32) string {
+ return fmt.Sprintf("%v-%v", its.Name, ordinal)
+}
diff --git a/pkg/controller/instanceset/instancetemplate/suite_test.go b/pkg/controller/instanceset/instancetemplate/suite_test.go
new file mode 100644
index 00000000000..ef1a0a011c3
--- /dev/null
+++ b/pkg/controller/instanceset/instancetemplate/suite_test.go
@@ -0,0 +1,65 @@
+package instancetemplate
+
+import (
+ "testing"
+
+ "github.com/apecloud/kubeblocks/pkg/controller/builder"
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+const (
+ namespace = "foo"
+ name = "bar"
+)
+
+var (
+ pod = builder.NewPodBuilder("", "").
+ AddContainer(corev1.Container{
+ Name: "foo",
+ Image: "bar",
+ Ports: []corev1.ContainerPort{
+ {
+ Name: "my-svc",
+ Protocol: corev1.ProtocolTCP,
+ ContainerPort: 12345,
+ },
+ },
+ Resources: corev1.ResourceRequirements{
+ Limits: map[corev1.ResourceName]resource.Quantity{
+ corev1.ResourceCPU: resource.MustParse("500m"),
+ corev1.ResourceMemory: resource.MustParse("1Gi"),
+ },
+ Requests: map[corev1.ResourceName]resource.Quantity{
+ corev1.ResourceCPU: resource.MustParse("300m"),
+ corev1.ResourceMemory: resource.MustParse("512Mi"),
+ },
+ },
+ }).GetObject()
+ template = corev1.PodTemplateSpec{
+ ObjectMeta: pod.ObjectMeta,
+ Spec: pod.Spec,
+ }
+ volumeClaimTemplates = []corev1.PersistentVolumeClaim{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "data",
+ },
+ Spec: corev1.PersistentVolumeClaimSpec{
+ Resources: corev1.VolumeResourceRequirements{
+ Requests: map[corev1.ResourceName]resource.Quantity{
+ corev1.ResourceStorage: resource.MustParse("2G"),
+ },
+ },
+ },
+ },
+ }
+)
+
+func TestInstanceTemplate(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "InstanceTemplate Suite")
+}
diff --git a/pkg/controller/instanceset/instancetemplate/template.go b/pkg/controller/instanceset/instancetemplate/template.go
new file mode 100644
index 00000000000..e1464682ae8
--- /dev/null
+++ b/pkg/controller/instanceset/instancetemplate/template.go
@@ -0,0 +1,266 @@
+/*
+Copyright (C) 2022-2025 ApeCloud Co., Ltd
+
+This file is part of KubeBlocks project
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Affero General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Affero General Public License for more details.
+
+You should have received a copy of the GNU Affero General Public License
+along with this program. If not, see .
+*/
+
+package instancetemplate
+
+import (
+ "fmt"
+ "slices"
+ "strings"
+
+ "github.com/klauspost/compress/zstd"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/util/runtime"
+ "k8s.io/apimachinery/pkg/util/sets"
+ "k8s.io/klog/v2"
+ "k8s.io/utils/ptr"
+
+ workloads "github.com/apecloud/kubeblocks/apis/workloads/v1"
+ "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx"
+)
+
+var (
+ reader *zstd.Decoder
+ writer *zstd.Encoder
+)
+
+func init() {
+ var err error
+ reader, err = zstd.NewReader(nil)
+ runtime.Must(err)
+ writer, err = zstd.NewWriter(nil)
+ runtime.Must(err)
+}
+
+func BuildInstanceName2TemplateMap(itsExt *InstanceSetExt) (map[string]*InstanceTemplateExt, error) {
+ template2OrdinalSetMap, err := GenerateTemplateName2OrdinalMap(itsExt)
+ if err != nil {
+ return nil, err
+ }
+
+ allNameTemplateMap := make(map[string]*InstanceTemplateExt)
+ instanceTemplatesMap := itsExt.InstanceTemplates
+ for templateName, ordinalSet := range template2OrdinalSetMap {
+ tpl := instanceTemplatesMap[templateName]
+ tplExt := buildInstanceTemplateExt(tpl, itsExt.InstanceSet)
+ for ordinal := range ordinalSet {
+ instanceName := fmt.Sprintf("%v-%v", itsExt.InstanceSet.Name, ordinal)
+ allNameTemplateMap[instanceName] = tplExt
+ }
+ }
+
+ return allNameTemplateMap, nil
+}
+
+func BuildInstanceTemplateExts(itsExt *InstanceSetExt) ([]*InstanceTemplateExt, error) {
+ instanceTemplatesMap := itsExt.InstanceTemplates
+ templates := make([]*InstanceTemplateExt, 0, len(instanceTemplatesMap))
+ for templateName := range instanceTemplatesMap {
+ tpl := instanceTemplatesMap[templateName]
+ tplExt := buildInstanceTemplateExt(tpl, itsExt.InstanceSet)
+ templates = append(templates, tplExt)
+ }
+
+ return templates, nil
+}
+
+// GenerateTemplateName2OrdinalMap returns a map from template name to sorted ordinals
+// it rely on the instanceset's status to generate desired pod names
+// it may not be updated, but it should converge eventually
+//
+// template ordianls are assumed to be valid at this time
+func GenerateTemplateName2OrdinalMap(itsExt *InstanceSetExt) (map[string]sets.Set[int32], error) {
+ allOrdinalSet := sets.New[int32]()
+ template2OrdinalSetMap := map[string]sets.Set[int32]{}
+ ordinalToTemplateMap := map[int32]string{}
+ instanceTemplatesList := make([]*workloads.InstanceTemplate, 0, len(itsExt.InstanceTemplates))
+ for _, instanceTemplate := range itsExt.InstanceTemplates {
+ instanceTemplatesList = append(instanceTemplatesList, instanceTemplate)
+ template2OrdinalSetMap[instanceTemplate.Name] = sets.New[int32]()
+ }
+ slices.SortFunc(instanceTemplatesList, func(a, b *workloads.InstanceTemplate) int {
+ return strings.Compare(a.Name, b.Name)
+ })
+
+ for templateName, ordinals := range itsExt.InstanceSet.Status.CurrentInstances {
+ template2OrdinalSetMap[templateName].Insert(ordinals...)
+ for _, ordinal := range ordinals {
+ allOrdinalSet.Insert(ordinal)
+ ordinalToTemplateMap[ordinal] = templateName
+ }
+ }
+
+ // 1. handle those who have ordinals specified
+ for _, instanceTemplate := range instanceTemplatesList {
+ currentOrdinalSet := template2OrdinalSetMap[instanceTemplate.Name]
+ desiredOrdinalSet := ConvertOrdinalsToSet(instanceTemplate.Ordinals)
+ if len(desiredOrdinalSet) == 0 {
+ continue
+ }
+ toDelete := currentOrdinalSet.Difference(desiredOrdinalSet)
+ toCreate := desiredOrdinalSet.Difference(currentOrdinalSet)
+ for _, ordinal := range toDelete.UnsortedList() {
+ allOrdinalSet.Delete(ordinal)
+ template2OrdinalSetMap[ordinalToTemplateMap[ordinal]].Delete(ordinal)
+ }
+ for _, ordinal := range toCreate.UnsortedList() {
+ if templateName, ok := ordinalToTemplateMap[ordinal]; ok {
+ // if the ordinal is already in the current instance, replace it with the new one
+ template2OrdinalSetMap[templateName].Delete(ordinal)
+ }
+ template2OrdinalSetMap[instanceTemplate.Name].Insert(ordinal)
+ allOrdinalSet.Insert(ordinal)
+ }
+ }
+
+ offlineOrdinals := sets.New[int32]()
+ for _, instance := range itsExt.InstanceSet.Spec.OfflineInstances {
+ ordinal, err := GetOrdinal(instance)
+ if err != nil {
+ return nil, err
+ }
+ offlineOrdinals.Insert(ordinal)
+ }
+ // 2. handle those who have decreased replicas
+ for _, instanceTemplate := range instanceTemplatesList {
+ currentOrdinals := template2OrdinalSetMap[instanceTemplate.Name]
+ if toOffline := currentOrdinals.Intersection(offlineOrdinals); toOffline.Len() > 0 {
+ for ordinal := range toOffline {
+ allOrdinalSet.Delete(ordinal)
+ template2OrdinalSetMap[instanceTemplate.Name].Delete(ordinal)
+ }
+ }
+ // replicas must be non-nil
+ if int(*instanceTemplate.Replicas) < len(currentOrdinals) {
+ // delete in the name set from high to low
+ l := convertOrdinalSetToSortedList(currentOrdinals)
+ for i := len(currentOrdinals) - 1; i >= int(*instanceTemplate.Replicas); i-- {
+ allOrdinalSet.Delete(l[i])
+ template2OrdinalSetMap[instanceTemplate.Name].Delete(l[i])
+ }
+ }
+ }
+
+ // 3. handle those who have increased replicas
+ var cur int32 = 0
+ for _, instanceTemplate := range instanceTemplatesList {
+ currentOrdinals := template2OrdinalSetMap[instanceTemplate.Name]
+ if int(*instanceTemplate.Replicas) > len(currentOrdinals) {
+ for i := len(currentOrdinals); i < int(*instanceTemplate.Replicas); i++ {
+ // find the next available ordinal
+ for {
+ if !allOrdinalSet.Has(cur) && !offlineOrdinals.Has(cur) {
+ allOrdinalSet.Insert(cur)
+ template2OrdinalSetMap[instanceTemplate.Name].Insert(cur)
+ break
+ }
+ cur++
+ }
+ }
+ }
+ }
+
+ return template2OrdinalSetMap, nil
+}
+
+func GenerateAllInstanceNames(itsExt *InstanceSetExt) ([]string, error) {
+ template2OrdinalSetMap, err := GenerateTemplateName2OrdinalMap(itsExt)
+ if err != nil {
+ return nil, err
+ }
+ allOrdinalSet := sets.New[int32]()
+ for _, ordinalSet := range template2OrdinalSetMap {
+ allOrdinalSet = allOrdinalSet.Union(ordinalSet)
+ }
+ instanceNames := make([]string, 0, len(allOrdinalSet))
+ allOrdinalList := convertOrdinalSetToSortedList(allOrdinalSet)
+ for _, ordinal := range allOrdinalList {
+ instanceNames = append(instanceNames, fmt.Sprintf("%v-%v", itsExt.InstanceSet.Name, ordinal))
+ }
+ return instanceNames, nil
+}
+
+func buildInstanceTemplatesMap(its *workloads.InstanceSet, instancesCompressed *corev1.ConfigMap) map[string]*workloads.InstanceTemplate {
+ rtn := make(map[string]*workloads.InstanceTemplate)
+ l := BuildInstanceTemplates(its, instancesCompressed)
+ for _, t := range l {
+ rtn[t.Name] = t
+ }
+ return rtn
+}
+
+// BuildInstanceTemplates builds a complate instance template list,
+// i.e. append a pseudo template (which equals to `.spec.template`)
+// to the end of the list, to fill up the replica count.
+// And also if there is any compressed template, add them too.
+//
+// It is not guaranteed that the returned list is sorted.
+// It is assumed that its spec is valid, e.g. replicasInTemplates < totalReplica.
+func BuildInstanceTemplates(its *workloads.InstanceSet, instancesCompressed *corev1.ConfigMap) []*workloads.InstanceTemplate {
+ var instanceTemplateList []*workloads.InstanceTemplate
+ var replicasInTemplates int32
+ instanceTemplates := getInstanceTemplates(its.Spec.Instances, instancesCompressed)
+ for i := range instanceTemplates {
+ instance := &instanceTemplates[i]
+ if instance.Replicas == nil {
+ instance.Replicas = ptr.To[int32](1)
+ }
+ instanceTemplateList = append(instanceTemplateList, instance)
+ replicasInTemplates += *instance.Replicas
+ }
+ totalReplicas := *its.Spec.Replicas
+ if replicasInTemplates < totalReplicas {
+ replicas := totalReplicas - replicasInTemplates
+ instance := &workloads.InstanceTemplate{Replicas: &replicas, Ordinals: its.Spec.DefaultTemplateOrdinals}
+ instanceTemplateList = append(instanceTemplateList, instance)
+ }
+
+ return instanceTemplateList
+}
+
+func BuildInstanceSetExt(its *workloads.InstanceSet, tree *kubebuilderx.ObjectTree) (*InstanceSetExt, error) {
+ instancesCompressed, err := findTemplateObject(its, tree)
+ if err != nil {
+ return nil, err
+ }
+
+ instanceTemplateMap := buildInstanceTemplatesMap(its, instancesCompressed)
+
+ return &InstanceSetExt{
+ InstanceSet: its,
+ InstanceTemplates: instanceTemplateMap,
+ }, nil
+}
+
+// PodsToCurrentInstances coverts pods to instanceset's .status.currentInstances
+func PodsToCurrentInstances(pods []*corev1.Pod, its *workloads.InstanceSet) (workloads.CurrentInstances, error) {
+ currentInstances := make(workloads.CurrentInstances)
+ for _, pod := range pods {
+ templateName, ok := pod.Labels[TemplateNameLabelKey]
+ if !ok {
+ return nil, fmt.Errorf("unknown pod %v", klog.KObj(pod))
+ }
+ ordinal, err := GetOrdinal(pod.Name)
+ if err != nil {
+ return nil, err
+ }
+ currentInstances[templateName] = append(currentInstances[templateName], ordinal)
+ }
+ return currentInstances, nil
+}
diff --git a/pkg/controller/instanceset/instancetemplate/template_test.go b/pkg/controller/instanceset/instancetemplate/template_test.go
new file mode 100644
index 00000000000..996ce286358
--- /dev/null
+++ b/pkg/controller/instanceset/instancetemplate/template_test.go
@@ -0,0 +1,281 @@
+package instancetemplate
+
+import (
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/sets"
+ "k8s.io/utils/ptr"
+
+ kbappsv1 "github.com/apecloud/kubeblocks/apis/apps/v1"
+ workloads "github.com/apecloud/kubeblocks/apis/workloads/v1"
+ "github.com/apecloud/kubeblocks/pkg/controller/builder"
+)
+
+var _ = Describe("Template tests", func() {
+ DescribeTable("generates instance ordinals",
+ func(its *workloads.InstanceSet, expected map[string]sets.Set[int32], expectError bool) {
+ Expect(validateOrdinals(its)).To(Succeed())
+ itsExt, err := BuildInstanceSetExt(its, nil)
+ Expect(err).NotTo(HaveOccurred())
+ ordinals, err := GenerateTemplateName2OrdinalMap(itsExt)
+ if expectError {
+ Expect(err).To(HaveOccurred())
+ } else {
+ Expect(err).NotTo(HaveOccurred())
+ Expect(ordinals).To(Equal(expected))
+ }
+ },
+
+ Entry("a new instanceset", &workloads.InstanceSet{
+ Spec: workloads.InstanceSetSpec{
+ Replicas: ptr.To[int32](5),
+ Instances: []workloads.InstanceTemplate{
+ {
+ Name: "t1",
+ Replicas: ptr.To[int32](2),
+ },
+ {
+ Name: "t2",
+ Replicas: ptr.To[int32](1),
+ },
+ },
+ },
+ }, map[string]sets.Set[int32]{
+ "": sets.New[int32](0, 1),
+ "t1": sets.New[int32](2, 3),
+ "t2": sets.New[int32](4),
+ }, false),
+
+ Entry("with running instances", &workloads.InstanceSet{
+ Spec: workloads.InstanceSetSpec{
+ Replicas: ptr.To[int32](5),
+ Instances: []workloads.InstanceTemplate{
+ {
+ Name: "t1",
+ Replicas: ptr.To[int32](2),
+ },
+ {
+ Name: "t2",
+ Replicas: ptr.To[int32](1),
+ },
+ },
+ },
+ Status: workloads.InstanceSetStatus{
+ CurrentInstances: workloads.CurrentInstances{
+ "": []int32{0, 1},
+ "t1": []int32{2, 4},
+ },
+ },
+ }, map[string]sets.Set[int32]{
+ "": sets.New[int32](0, 1),
+ "t1": sets.New[int32](2, 4),
+ "t2": sets.New[int32](3),
+ }, false),
+
+ Entry("deal with scale in", &workloads.InstanceSet{
+ Spec: workloads.InstanceSetSpec{
+ Replicas: ptr.To[int32](5),
+ Instances: []workloads.InstanceTemplate{
+ {
+ Name: "t1",
+ Replicas: ptr.To[int32](1),
+ },
+ {
+ Name: "t2",
+ Replicas: ptr.To[int32](1),
+ },
+ },
+ },
+ Status: workloads.InstanceSetStatus{
+ CurrentInstances: workloads.CurrentInstances{
+ "": []int32{0, 1},
+ "t1": []int32{2, 3, 4},
+ },
+ },
+ }, map[string]sets.Set[int32]{
+ "": sets.New[int32](0, 1, 3),
+ "t1": sets.New[int32](2),
+ "t2": sets.New[int32](4),
+ }, false),
+
+ Entry("with ordinal spec", &workloads.InstanceSet{
+ Spec: workloads.InstanceSetSpec{
+ Replicas: ptr.To[int32](5),
+ Instances: []workloads.InstanceTemplate{
+ {
+ Name: "t1",
+ Replicas: ptr.To[int32](2),
+ Ordinals: kbappsv1.Ordinals{
+ Discrete: []int32{10, 11},
+ },
+ },
+ {
+ Name: "t2",
+ Replicas: ptr.To[int32](3),
+ Ordinals: kbappsv1.Ordinals{
+ Ranges: []kbappsv1.Range{
+ {
+ Start: 2,
+ End: 3,
+ },
+ },
+ Discrete: []int32{0},
+ },
+ },
+ },
+ },
+ }, map[string]sets.Set[int32]{
+ "t1": sets.New[int32](10, 11),
+ "t2": sets.New[int32](0, 2, 3),
+ }, false),
+
+ Entry("with ordinal spec replacing a normal one", &workloads.InstanceSet{
+ Spec: workloads.InstanceSetSpec{
+ Replicas: ptr.To[int32](4),
+ Instances: []workloads.InstanceTemplate{
+ {
+ Name: "t1",
+ Replicas: ptr.To[int32](2),
+ Ordinals: kbappsv1.Ordinals{
+ Ranges: []kbappsv1.Range{
+ {
+ Start: 2,
+ End: 3,
+ },
+ },
+ },
+ },
+ },
+ },
+ Status: workloads.InstanceSetStatus{
+ CurrentInstances: workloads.CurrentInstances{
+ "": []int32{0, 1, 2},
+ },
+ },
+ }, map[string]sets.Set[int32]{
+ "t1": sets.New[int32](2, 3),
+ "": sets.New[int32](0, 1),
+ }, false),
+
+ Entry("with offline instances", &workloads.InstanceSet{
+ Spec: workloads.InstanceSetSpec{
+ Replicas: ptr.To[int32](4),
+ Instances: []workloads.InstanceTemplate{
+ {
+ Name: "t1",
+ Replicas: ptr.To[int32](2),
+ },
+ },
+ OfflineInstances: []string{"foo-2"},
+ },
+ Status: workloads.InstanceSetStatus{
+ CurrentInstances: workloads.CurrentInstances{
+ "": []int32{0, 1, 2},
+ "t1": []int32{3},
+ },
+ },
+ }, map[string]sets.Set[int32]{
+ "": sets.New[int32](0, 1),
+ "t1": sets.New[int32](3, 4),
+ }, false),
+ )
+
+ It("generates instance names", func() {
+ its := &workloads.InstanceSet{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "foo",
+ Namespace: "default",
+ },
+ Spec: workloads.InstanceSetSpec{
+ Replicas: ptr.To[int32](5),
+ Instances: []workloads.InstanceTemplate{
+ {
+ Name: "t1",
+ Replicas: ptr.To[int32](2),
+ },
+ {
+ Name: "t2",
+ Replicas: ptr.To[int32](1),
+ },
+ },
+ },
+ }
+
+ itsExt, err := BuildInstanceSetExt(its, nil)
+ Expect(err).NotTo(HaveOccurred())
+ names, err := GenerateAllInstanceNames(itsExt)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(names).To(Equal([]string{"foo-0", "foo-1", "foo-2", "foo-3", "foo-4"}))
+ })
+
+ Context("buildInstanceName2TemplateMap", func() {
+ var its *workloads.InstanceSet
+ BeforeEach(func() {
+ its = builder.NewInstanceSetBuilder(namespace, name).
+ SetReplicas(3).
+ SetTemplate(template).
+ SetVolumeClaimTemplates(volumeClaimTemplates...).
+ GetObject()
+ })
+
+ It("build an its with default template only", func() {
+ itsExt, err := BuildInstanceSetExt(its, nil)
+ Expect(err).Should(BeNil())
+ nameTemplate, err := BuildInstanceName2TemplateMap(itsExt)
+ Expect(err).Should(BeNil())
+ Expect(nameTemplate).Should(HaveLen(3))
+ name0 := its.Name + "-0"
+ Expect(nameTemplate).Should(HaveKey(name0))
+ Expect(nameTemplate).Should(HaveKey(its.Name + "-1"))
+ Expect(nameTemplate).Should(HaveKey(its.Name + "-2"))
+ nameTemplate[name0].PodTemplateSpec.Spec.Volumes = nil
+ defaultTemplate := its.Spec.Template.DeepCopy()
+ Expect(nameTemplate[name0].PodTemplateSpec.Spec).Should(Equal(defaultTemplate.Spec))
+ })
+
+ It("build an its with one instance template override", func() {
+ nameOverride := "name-override"
+ annotationOverride := map[string]string{
+ "foo": "bar",
+ }
+ labelOverride := map[string]string{
+ "foo": "bar",
+ }
+ resources := corev1.ResourceRequirements{
+ Limits: map[corev1.ResourceName]resource.Quantity{
+ corev1.ResourceCPU: resource.MustParse("600m"),
+ },
+ }
+ instance := workloads.InstanceTemplate{
+ Name: nameOverride,
+ Annotations: annotationOverride,
+ Labels: labelOverride,
+ Resources: &resources,
+ }
+ its.Spec.Instances = append(its.Spec.Instances, instance)
+ itsExt, err := BuildInstanceSetExt(its, nil)
+ Expect(err).Should(BeNil())
+ nameTemplate, err := BuildInstanceName2TemplateMap(itsExt)
+ Expect(err).Should(BeNil())
+ Expect(nameTemplate).Should(HaveLen(3))
+ name0 := its.Name + "-0"
+ name1 := its.Name + "-1"
+ nameOverridePodName := its.Name + "-2"
+ Expect(nameTemplate).Should(HaveKey(name0))
+ Expect(nameTemplate).Should(HaveKey(name1))
+ Expect(nameTemplate).Should(HaveKey(nameOverridePodName))
+ expectedTemplate := its.Spec.Template.DeepCopy()
+ Expect(nameTemplate[name0].PodTemplateSpec.Spec).Should(Equal(expectedTemplate.Spec))
+ Expect(nameTemplate[name1].PodTemplateSpec.Spec).Should(Equal(expectedTemplate.Spec))
+ Expect(nameTemplate[nameOverridePodName].PodTemplateSpec.Spec).ShouldNot(Equal(expectedTemplate.Spec))
+ Expect(nameTemplate[nameOverridePodName].PodTemplateSpec.Annotations).Should(Equal(annotationOverride))
+ Expect(nameTemplate[nameOverridePodName].PodTemplateSpec.Labels).Should(Equal(labelOverride))
+ Expect(nameTemplate[nameOverridePodName].PodTemplateSpec.Spec.Containers[0].Resources.Limits[corev1.ResourceCPU]).Should(Equal(resources.Limits[corev1.ResourceCPU]))
+ Expect(nameTemplate[nameOverridePodName].PodTemplateSpec.Spec.Containers[0].Resources.Requests[corev1.ResourceCPU]).Should(Equal(its.Spec.Template.Spec.Containers[0].Resources.Requests[corev1.ResourceCPU]))
+ })
+ })
+})
diff --git a/pkg/controller/instanceset/instancetemplate/types.go b/pkg/controller/instanceset/instancetemplate/types.go
new file mode 100644
index 00000000000..430491ec6e8
--- /dev/null
+++ b/pkg/controller/instanceset/instancetemplate/types.go
@@ -0,0 +1,47 @@
+/*
+Copyright (C) 2022-2025 ApeCloud Co., Ltd
+
+This file is part of KubeBlocks project
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Affero General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Affero General Public License for more details.
+
+You should have received a copy of the GNU Affero General Public License
+along with this program. If not, see .
+*/
+
+package instancetemplate
+
+import (
+ corev1 "k8s.io/api/core/v1"
+
+ workloads "github.com/apecloud/kubeblocks/apis/workloads/v1"
+)
+
+const (
+ // used to specify the configmap which stores the compressed instance template
+ TemplateRefAnnotationKey = "kubeblocks.io/template-ref"
+ TemplateRefDataKey = "instances"
+
+ TemplateNameLabelKey = "workloads.kubeblocks.io/template-name"
+)
+
+type InstanceSetExt struct {
+ InstanceSet *workloads.InstanceSet
+ InstanceTemplates map[string]*workloads.InstanceTemplate // key is template name
+}
+
+// InstanceTemplateExt merges the default podSpec with overrides in the template
+type InstanceTemplateExt struct {
+ Name string
+ Replicas int32
+ corev1.PodTemplateSpec
+ VolumeClaimTemplates []corev1.PersistentVolumeClaim
+}
diff --git a/pkg/controller/instanceset/instancetemplate/validation.go b/pkg/controller/instanceset/instancetemplate/validation.go
new file mode 100644
index 00000000000..e34edcffce8
--- /dev/null
+++ b/pkg/controller/instanceset/instancetemplate/validation.go
@@ -0,0 +1,132 @@
+/*
+Copyright (C) 2022-2025 ApeCloud Co., Ltd
+
+This file is part of KubeBlocks project
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Affero General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Affero General Public License for more details.
+
+You should have received a copy of the GNU Affero General Public License
+along with this program. If not, see .
+*/
+
+package instancetemplate
+
+import (
+ "fmt"
+
+ "k8s.io/apimachinery/pkg/util/sets"
+
+ workloads "github.com/apecloud/kubeblocks/apis/workloads/v1"
+ "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx"
+)
+
+// validate a complete instance template list
+// TODO: take compressed templates into consideration
+func validateOrdinals(its *workloads.InstanceSet) error {
+ ordinalSet := sets.New[int32]()
+ offlineOrdinals := sets.New[int32]()
+ for _, name := range its.Spec.OfflineInstances {
+ ordinal, err := GetOrdinal(name)
+ if err != nil {
+ return err
+ }
+ if offlineOrdinals.Has(ordinal) {
+ return fmt.Errorf("duplicate offlineInstance: %v", name)
+ }
+ offlineOrdinals.Insert(ordinal)
+ }
+ tpls := make([]workloads.InstanceTemplate, 0, len(its.Spec.Instances)+1)
+ tpls = append(tpls, workloads.InstanceTemplate{Ordinals: its.Spec.DefaultTemplateOrdinals})
+ tpls = append(tpls, its.Spec.Instances...)
+ for _, tmpl := range tpls {
+ ordinals := tmpl.Ordinals
+ for _, item := range ordinals.Discrete {
+ if item < 0 {
+ return fmt.Errorf("ordinal(%v) must >= 0", item)
+ }
+ if ordinalSet.Has(item) {
+ return fmt.Errorf("duplicate ordinal(%v)", item)
+ }
+ if offlineOrdinals.Has(item) {
+ return fmt.Errorf("ordinal(%v) exists in offlineInstances", item)
+ }
+ ordinalSet.Insert(item)
+ }
+
+ for _, item := range ordinals.Ranges {
+ start := item.Start
+ end := item.End
+
+ if start < 0 {
+ return fmt.Errorf("ordinal's start(%v) must >= 0", start)
+ }
+
+ if start > end {
+ return fmt.Errorf("range's end(%v) must >= start(%v)", end, start)
+ }
+
+ for ordinal := start; ordinal <= end; ordinal++ {
+ if ordinalSet.Has(ordinal) {
+ return fmt.Errorf("duplicate ordinal(%v)", item)
+ }
+ if offlineOrdinals.Has(ordinal) {
+ return fmt.Errorf("ordinal(%v) exists in offlineInstances", item)
+ }
+ ordinalSet.Insert(ordinal)
+ }
+ }
+ }
+ return nil
+}
+
+func ValidateInstanceTemplates(its *workloads.InstanceSet, tree *kubebuilderx.ObjectTree) error {
+ if err := validateOrdinals(its); err != nil {
+ return fmt.Errorf("failed to validate ordinals: %w", err)
+ }
+
+ instancesCompressed, err := findTemplateObject(its, tree)
+ if err != nil {
+ return fmt.Errorf("failed to find compreesssed template: %w", err)
+ }
+
+ instanceTemplates := getInstanceTemplates(its.Spec.Instances, instancesCompressed)
+ templateNames := sets.New[string]()
+ replicasInTemplates := int32(0)
+ for _, template := range instanceTemplates {
+ replicas := int32(1)
+ if template.Replicas != nil {
+ replicas = *template.Replicas
+ }
+ replicasInTemplates += replicas
+ if templateNames.Has(template.Name) {
+ err = fmt.Errorf("duplicate instance template name: %s", template.Name)
+ return err
+ }
+ templateNames.Insert(template.Name)
+ }
+ // sum of spec.templates[*].replicas should not greater than spec.replicas
+ if replicasInTemplates > *its.Spec.Replicas {
+ err = fmt.Errorf("total replicas in instances(%d) should not greater than replicas in spec(%d)", replicasInTemplates, *its.Spec.Replicas)
+ return err
+ }
+
+ itsExt, err := BuildInstanceSetExt(its, tree)
+ if err != nil {
+ return fmt.Errorf("failed to build instance set ext: %w", err)
+ }
+
+ // try to generate all pod names
+ _, err = GenerateAllInstanceNames(itsExt)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/pkg/controller/instanceset/instancetemplate/validation_test.go b/pkg/controller/instanceset/instancetemplate/validation_test.go
new file mode 100644
index 00000000000..9f68935b12e
--- /dev/null
+++ b/pkg/controller/instanceset/instancetemplate/validation_test.go
@@ -0,0 +1,185 @@
+/*
+Copyright (C) 2022-2025 ApeCloud Co., Ltd
+
+This file is part of KubeBlocks project
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Affero General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Affero General Public License for more details.
+
+You should have received a copy of the GNU Affero General Public License
+along with this program. If not, see .
+*/
+
+package instancetemplate
+
+import (
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+ "k8s.io/utils/ptr"
+
+ kbappsv1 "github.com/apecloud/kubeblocks/apis/apps/v1"
+ workloads "github.com/apecloud/kubeblocks/apis/workloads/v1"
+ "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx"
+)
+
+var _ = Describe("Validation", func() {
+ Describe("validateOrdinals", func() {
+ It("should validate ordinals successfully", func() {
+ its := &workloads.InstanceSet{
+ Spec: workloads.InstanceSetSpec{
+ Instances: []workloads.InstanceTemplate{
+ {
+ Name: "template1",
+ Ordinals: kbappsv1.Ordinals{
+ Discrete: []int32{0, 1, 2},
+ },
+ },
+ },
+ },
+ }
+ err := validateOrdinals(its)
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ It("should fail validation for negative ordinals", func() {
+ its := &workloads.InstanceSet{
+ Spec: workloads.InstanceSetSpec{
+ Instances: []workloads.InstanceTemplate{
+ {
+ Name: "template1",
+ Ordinals: kbappsv1.Ordinals{
+ Discrete: []int32{-1, 0, 1},
+ },
+ },
+ },
+ },
+ }
+ err := validateOrdinals(its)
+ Expect(err).To(HaveOccurred())
+ Expect(err.Error()).To(ContainSubstring("ordinal(-1) must >= 0"))
+ })
+
+ It("should fail validation for duplicate ordinals", func() {
+ its := &workloads.InstanceSet{
+ Spec: workloads.InstanceSetSpec{
+ DefaultTemplateOrdinals: kbappsv1.Ordinals{
+ Discrete: []int32{1},
+ },
+ Instances: []workloads.InstanceTemplate{
+ {
+ Name: "template1",
+ Ordinals: kbappsv1.Ordinals{
+ Discrete: []int32{0, 1},
+ },
+ },
+ },
+ },
+ }
+ err := validateOrdinals(its)
+ Expect(err).To(HaveOccurred())
+ Expect(err.Error()).To(ContainSubstring("duplicate ordinal(1)"))
+ })
+
+ It("should take offlineInstances into consideration", func() {
+ its := &workloads.InstanceSet{
+ Spec: workloads.InstanceSetSpec{
+ DefaultTemplateOrdinals: kbappsv1.Ordinals{
+ Discrete: []int32{2},
+ },
+ OfflineInstances: []string{"instance-1"},
+ Instances: []workloads.InstanceTemplate{
+ {
+ Name: "template1",
+ Ordinals: kbappsv1.Ordinals{
+ Discrete: []int32{0, 1},
+ },
+ },
+ },
+ },
+ }
+ err := validateOrdinals(its)
+ Expect(err).To(HaveOccurred())
+ Expect(err.Error()).To(ContainSubstring("ordinal(1) exists in offlineInstances"))
+ })
+ })
+
+ Describe("ValidateInstanceTemplates", func() {
+ It("should validate instance templates successfully", func() {
+ its := &workloads.InstanceSet{
+ Spec: workloads.InstanceSetSpec{
+ Replicas: ptr.To[int32](3),
+ Instances: []workloads.InstanceTemplate{
+ {
+ Name: "template1",
+ Replicas: ptr.To[int32](2),
+ Ordinals: kbappsv1.Ordinals{
+ Discrete: []int32{0, 1},
+ },
+ },
+ {
+ Name: "template2",
+ Replicas: ptr.To[int32](1),
+ Ordinals: kbappsv1.Ordinals{
+ Discrete: []int32{2},
+ },
+ },
+ },
+ },
+ }
+ tree := &kubebuilderx.ObjectTree{}
+ err := ValidateInstanceTemplates(its, tree)
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ It("should fail validation for duplicate template names", func() {
+ its := &workloads.InstanceSet{
+ Spec: workloads.InstanceSetSpec{
+ Replicas: ptr.To[int32](3),
+ Instances: []workloads.InstanceTemplate{
+ {
+ Name: "template1",
+ Replicas: ptr.To[int32](2),
+ },
+ {
+ Name: "template1",
+ Replicas: ptr.To[int32](1),
+ },
+ },
+ },
+ }
+ tree := &kubebuilderx.ObjectTree{}
+ err := ValidateInstanceTemplates(its, tree)
+ Expect(err).To(HaveOccurred())
+ Expect(err.Error()).To(ContainSubstring("duplicate instance template name: template1"))
+ })
+
+ It("should fail validation when total replicas exceed spec.replicas", func() {
+ its := &workloads.InstanceSet{
+ Spec: workloads.InstanceSetSpec{
+ Replicas: ptr.To[int32](3),
+ Instances: []workloads.InstanceTemplate{
+ {
+ Name: "template1",
+ Replicas: ptr.To[int32](2),
+ },
+ {
+ Name: "template2",
+ Replicas: ptr.To[int32](2),
+ },
+ },
+ },
+ }
+ tree := &kubebuilderx.ObjectTree{}
+ err := ValidateInstanceTemplates(its, tree)
+ Expect(err).To(HaveOccurred())
+ Expect(err.Error()).To(ContainSubstring("total replicas in instances(4) should not greater than replicas in spec(3)"))
+ })
+ })
+})
diff --git a/pkg/controller/instanceset/reconciler_instance_alignment.go b/pkg/controller/instanceset/reconciler_instance_alignment.go
index 81955186a18..5f25fe0c46e 100644
--- a/pkg/controller/instanceset/reconciler_instance_alignment.go
+++ b/pkg/controller/instanceset/reconciler_instance_alignment.go
@@ -27,6 +27,7 @@ import (
kbappsv1 "github.com/apecloud/kubeblocks/apis/apps/v1"
workloads "github.com/apecloud/kubeblocks/apis/workloads/v1"
"github.com/apecloud/kubeblocks/pkg/constant"
+ "github.com/apecloud/kubeblocks/pkg/controller/instanceset/instancetemplate"
"github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx"
"github.com/apecloud/kubeblocks/pkg/controller/model"
)
@@ -50,7 +51,8 @@ func (r *instanceAlignmentReconciler) PreCondition(tree *kubebuilderx.ObjectTree
return kubebuilderx.ConditionUnsatisfied
}
its, _ := tree.GetRoot().(*workloads.InstanceSet)
- if err := validateSpec(its, tree); err != nil {
+ if err := instancetemplate.ValidateInstanceTemplates(its, tree); err != nil {
+ tree.EventRecorder.Event(its, corev1.EventTypeWarning, EventReasonInvalidSpec, err.Error())
return kubebuilderx.ConditionUnsatisfiedWithError(err)
}
return kubebuilderx.ConditionSatisfied
@@ -58,13 +60,13 @@ func (r *instanceAlignmentReconciler) PreCondition(tree *kubebuilderx.ObjectTree
func (r *instanceAlignmentReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kubebuilderx.Result, error) {
its, _ := tree.GetRoot().(*workloads.InstanceSet)
- itsExt, err := buildInstanceSetExt(its, tree)
+ itsExt, err := instancetemplate.BuildInstanceSetExt(its, tree)
if err != nil {
return kubebuilderx.Continue, err
}
// 1. build desired name to template map
- nameToTemplateMap, err := buildInstanceName2TemplateMap(itsExt)
+ nameToTemplateMap, err := instancetemplate.BuildInstanceName2TemplateMap(itsExt)
if err != nil {
return kubebuilderx.Continue, err
}
diff --git a/pkg/controller/instanceset/reconciler_revision_update.go b/pkg/controller/instanceset/reconciler_revision_update.go
index 74970df75ed..bb495b93c9b 100644
--- a/pkg/controller/instanceset/reconciler_revision_update.go
+++ b/pkg/controller/instanceset/reconciler_revision_update.go
@@ -20,10 +20,13 @@ along with this program. If not, see .
package instanceset
import (
+ "fmt"
+
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
workloads "github.com/apecloud/kubeblocks/apis/workloads/v1"
+ "github.com/apecloud/kubeblocks/pkg/controller/instanceset/instancetemplate"
"github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx"
"github.com/apecloud/kubeblocks/pkg/controller/model"
)
@@ -45,7 +48,8 @@ func (r *revisionUpdateReconciler) PreCondition(tree *kubebuilderx.ObjectTree) *
return kubebuilderx.ConditionUnsatisfied
}
its, _ := tree.GetRoot().(*workloads.InstanceSet)
- if err := validateSpec(its, tree); err != nil {
+ if err := instancetemplate.ValidateInstanceTemplates(its, tree); err != nil {
+ tree.EventRecorder.Event(its, corev1.EventTypeWarning, EventReasonInvalidSpec, err.Error())
return kubebuilderx.ConditionUnsatisfiedWithError(err)
}
return kubebuilderx.ConditionSatisfied
@@ -53,40 +57,35 @@ func (r *revisionUpdateReconciler) PreCondition(tree *kubebuilderx.ObjectTree) *
func (r *revisionUpdateReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kubebuilderx.Result, error) {
its, _ := tree.GetRoot().(*workloads.InstanceSet)
- itsExt, err := buildInstanceSetExt(its, tree)
+ itsExt, err := instancetemplate.BuildInstanceSetExt(its, tree)
if err != nil {
return kubebuilderx.Continue, err
}
// 1. build all templates by applying instance template overrides to default pod template
- instanceTemplateList := buildInstanceTemplateExts(itsExt)
+ instanceTemplateList, err := instancetemplate.BuildInstanceTemplateExts(itsExt)
+ if err != nil {
+ return kubebuilderx.Continue, err
+ }
- // build instance revision list from instance templates
+ template2OrdinalSetMap, err := instancetemplate.GenerateTemplateName2OrdinalMap(itsExt)
+ if err != nil {
+ return kubebuilderx.Continue, err
+ }
+
+ // 2. build instance revision list from instance templates
var instanceRevisionList []instanceRevision
for _, template := range instanceTemplateList {
- ordinalList, err := GetOrdinalListByTemplateName(itsExt.its, template.Name)
- if err != nil {
- return kubebuilderx.Continue, err
- }
- instanceNames, err := GenerateInstanceNamesFromTemplate(its.Name, template.Name, template.Replicas, itsExt.its.Spec.OfflineInstances, ordinalList)
- if err != nil {
- return kubebuilderx.Continue, err
- }
+ ordinalSet := template2OrdinalSetMap[template.Name]
revision, err := BuildInstanceTemplateRevision(&template.PodTemplateSpec, its)
if err != nil {
return kubebuilderx.Continue, err
}
- for _, name := range instanceNames {
- instanceRevisionList = append(instanceRevisionList, instanceRevision{name: name, revision: revision})
+ for ordinal := range ordinalSet {
+ instanceName := fmt.Sprintf("%v-%v", its.Name, ordinal)
+ instanceRevisionList = append(instanceRevisionList, instanceRevision{name: instanceName, revision: revision})
}
}
- // validate duplicate pod names
- getNameFunc := func(r instanceRevision) string {
- return r.name
- }
- if err := ValidateDupInstanceNames(instanceRevisionList, getNameFunc); err != nil {
- return kubebuilderx.Continue, err
- }
updatedRevisions := make(map[string]string, len(instanceRevisionList))
for _, r := range instanceRevisionList {
diff --git a/pkg/controller/instanceset/reconciler_status.go b/pkg/controller/instanceset/reconciler_status.go
index 6c4810efa0f..ccc8261e709 100644
--- a/pkg/controller/instanceset/reconciler_status.go
+++ b/pkg/controller/instanceset/reconciler_status.go
@@ -31,6 +31,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
workloads "github.com/apecloud/kubeblocks/apis/workloads/v1"
+ "github.com/apecloud/kubeblocks/pkg/controller/instanceset/instancetemplate"
"github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx"
"github.com/apecloud/kubeblocks/pkg/controller/model"
intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil"
@@ -192,6 +193,13 @@ func (r *statusReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kubebuilder
// 5. set instance status
setInstanceStatus(its, podList)
+ currentInstances, err := instancetemplate.PodsToCurrentInstances(podList, its)
+ if err != nil {
+ // FIXME: log error and continue?
+ return kubebuilderx.Continue, err
+ }
+ its.Status.CurrentInstances = currentInstances
+
if its.Spec.MinReadySeconds > 0 && availableReplicas != readyReplicas {
return kubebuilderx.RetryAfter(time.Second), nil
}
diff --git a/pkg/controller/instanceset/reconciler_status_test.go b/pkg/controller/instanceset/reconciler_status_test.go
index 4799f5bf638..c4282222db5 100644
--- a/pkg/controller/instanceset/reconciler_status_test.go
+++ b/pkg/controller/instanceset/reconciler_status_test.go
@@ -252,7 +252,7 @@ var _ = Describe("status reconciler test", func() {
}
Expect(its.Status.CurrentRevisions).Should(Equal(its.Status.UpdateRevisions))
Expect(its.Status.Conditions).Should(HaveLen(3))
- failureNames := []string{"bar-0", "bar-1", "bar-2", "bar-3", "bar-foo-0", "bar-foo-1", "bar-hello-0"}
+ failureNames := []string{"bar-0", "bar-1", "bar-2", "bar-3", "bar-4", "bar-5", "bar-6"}
message, err := json.Marshal(failureNames)
Expect(err).Should(BeNil())
Expect(its.Status.Conditions[0].Type).Should(BeEquivalentTo(workloads.InstanceReady))
diff --git a/pkg/controller/instanceset/reconciler_update.go b/pkg/controller/instanceset/reconciler_update.go
index 48ed676e67d..97252bdf69d 100644
--- a/pkg/controller/instanceset/reconciler_update.go
+++ b/pkg/controller/instanceset/reconciler_update.go
@@ -34,6 +34,7 @@ import (
kbappsv1 "github.com/apecloud/kubeblocks/apis/apps/v1"
workloads "github.com/apecloud/kubeblocks/apis/workloads/v1"
"github.com/apecloud/kubeblocks/pkg/constant"
+ "github.com/apecloud/kubeblocks/pkg/controller/instanceset/instancetemplate"
"github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx"
"github.com/apecloud/kubeblocks/pkg/controller/lifecycle"
"github.com/apecloud/kubeblocks/pkg/controller/model"
@@ -58,7 +59,8 @@ func (r *updateReconciler) PreCondition(tree *kubebuilderx.ObjectTree) *kubebuil
return kubebuilderx.ConditionUnsatisfied
}
its, _ := tree.GetRoot().(*workloads.InstanceSet)
- if err := validateSpec(its, tree); err != nil {
+ if err := instancetemplate.ValidateInstanceTemplates(its, tree); err != nil {
+ tree.EventRecorder.Event(its, corev1.EventTypeWarning, EventReasonInvalidSpec, err.Error())
return kubebuilderx.ConditionUnsatisfiedWithError(err)
}
return kubebuilderx.ConditionSatisfied
@@ -66,13 +68,13 @@ func (r *updateReconciler) PreCondition(tree *kubebuilderx.ObjectTree) *kubebuil
func (r *updateReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (kubebuilderx.Result, error) {
its, _ := tree.GetRoot().(*workloads.InstanceSet)
- itsExt, err := buildInstanceSetExt(its, tree)
+ itsExt, err := instancetemplate.BuildInstanceSetExt(its, tree)
if err != nil {
return kubebuilderx.Continue, err
}
// 1. build desired name to template map
- nameToTemplateMap, err := buildInstanceName2TemplateMap(itsExt)
+ nameToTemplateMap, err := instancetemplate.BuildInstanceName2TemplateMap(itsExt)
if err != nil {
return kubebuilderx.Continue, err
}
diff --git a/pkg/controller/scheduling/scheduling_utils.go b/pkg/controller/scheduling/scheduling_utils.go
index e11a4f1077e..54753e00d7f 100644
--- a/pkg/controller/scheduling/scheduling_utils.go
+++ b/pkg/controller/scheduling/scheduling_utils.go
@@ -23,9 +23,11 @@ import (
"encoding/json"
corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/equality"
appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1"
"github.com/apecloud/kubeblocks/pkg/constant"
+ intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil"
viper "github.com/apecloud/kubeblocks/pkg/viperx"
)
@@ -47,7 +49,7 @@ func buildSchedulingPolicy(cluster *appsv1.Cluster, compSpec *appsv1.ClusterComp
if err != nil {
return err
}
- schedulingPolicy.Affinity = mergeAffinity(schedulingPolicy.Affinity, affinity)
+ schedulingPolicy.Affinity = MergeAffinity(schedulingPolicy.Affinity, affinity)
return nil
}
@@ -98,68 +100,57 @@ func buildClusterWideTolerations() ([]corev1.Toleration, error) {
return tolerations, nil
}
-// mergeAffinity merges affinity from src to dest
-func mergeAffinity(dest, src *corev1.Affinity) *corev1.Affinity {
+func makeCmp[E any]() func(E) func(E) bool {
+ return func(a E) func(E) bool {
+ return func(b E) bool {
+ return equality.Semantic.DeepEqual(a, b)
+ }
+ }
+}
+
+// MergeAffinity merges src to dst, return value is deepcopied
+func MergeAffinity(src, dst *corev1.Affinity) *corev1.Affinity {
if src == nil {
- return dest
+ return dst.DeepCopy()
}
- if dest == nil {
+ if dst == nil {
return src.DeepCopy()
}
- rst := dest.DeepCopy()
- skipPodAffinity := src.PodAffinity == nil
- skipPodAntiAffinity := src.PodAntiAffinity == nil
- skipNodeAffinity := src.NodeAffinity == nil
+ rtn := dst.DeepCopy()
- if rst.PodAffinity == nil && !skipPodAffinity {
- rst.PodAffinity = src.PodAffinity
- skipPodAffinity = true
- }
- if rst.PodAntiAffinity == nil && !skipPodAntiAffinity {
- rst.PodAntiAffinity = src.PodAntiAffinity
- skipPodAntiAffinity = true
- }
- if rst.NodeAffinity == nil && !skipNodeAffinity {
- rst.NodeAffinity = src.NodeAffinity
- skipNodeAffinity = true
+ // Merge PodAffinity
+ if src.PodAffinity != nil {
+ if rtn.PodAffinity == nil {
+ rtn.PodAffinity = &corev1.PodAffinity{}
+ }
+ intctrlutil.MergeList(&src.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution, &rtn.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution, makeCmp[corev1.PodAffinityTerm]())
+ intctrlutil.MergeList(&src.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution, &rtn.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution, makeCmp[corev1.WeightedPodAffinityTerm]())
}
- // if not skip, both are not nil
- if !skipPodAffinity {
- rst.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(
- rst.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution,
- src.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution...)
-
- rst.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(
- rst.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution,
- src.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution...)
- }
- if !skipPodAntiAffinity {
- rst.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(
- rst.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution,
- src.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution...)
-
- rst.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(
- rst.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution,
- src.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution...)
+ // Merge PodAntiAffinity
+ if src.PodAntiAffinity != nil {
+ if rtn.PodAntiAffinity == nil {
+ rtn.PodAntiAffinity = &corev1.PodAntiAffinity{}
+ }
+ intctrlutil.MergeList(&src.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, &rtn.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, makeCmp[corev1.PodAffinityTerm]())
+ intctrlutil.MergeList(&src.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, &rtn.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, makeCmp[corev1.WeightedPodAffinityTerm]())
}
- if !skipNodeAffinity {
- rst.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(
- rst.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution,
- src.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution...)
-
- skip := src.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil
- if rst.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil && !skip {
- rst.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = src.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution
- skip = true
+
+ // Merge NodeAffinity
+ if src.NodeAffinity != nil {
+ if rtn.NodeAffinity == nil {
+ rtn.NodeAffinity = &corev1.NodeAffinity{}
}
- if !skip {
- rst.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = append(
- rst.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms,
- src.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms...)
+ if src.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+ if rtn.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
+ rtn.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &corev1.NodeSelector{}
+ }
+ intctrlutil.MergeList(&src.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, &rtn.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, makeCmp[corev1.NodeSelectorTerm]())
}
+ intctrlutil.MergeList(&src.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution, &rtn.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution, makeCmp[corev1.PreferredSchedulingTerm]())
}
- return rst
+
+ return rtn
}
diff --git a/pkg/controller/scheduling/scheduling_utils_test.go b/pkg/controller/scheduling/scheduling_utils_test.go
new file mode 100644
index 00000000000..9bf566ac9d6
--- /dev/null
+++ b/pkg/controller/scheduling/scheduling_utils_test.go
@@ -0,0 +1,573 @@
+/*
+Copyright (C) 2022-2025 ApeCloud Co., Ltd
+
+This file is part of KubeBlocks project
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Affero General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Affero General Public License for more details.
+
+You should have received a copy of the GNU Affero General Public License
+along with this program. If not, see .
+*/
+
+package scheduling
+
+import (
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+var _ = Describe("Scheduling util test", func() {
+ Context("mergeAffinity", func() {
+ It("merge all configs", func() {
+ affinity1 := &corev1.Affinity{
+ NodeAffinity: &corev1.NodeAffinity{
+ RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
+ NodeSelectorTerms: []corev1.NodeSelectorTerm{
+ {
+ MatchExpressions: []corev1.NodeSelectorRequirement{
+ {
+ Key: "node-role.kubernetes.io/worker",
+ Operator: corev1.NodeSelectorOpExists,
+ },
+ },
+ MatchFields: nil,
+ },
+ {
+ MatchExpressions: []corev1.NodeSelectorRequirement{
+ {
+ Key: "topology.kubernetes.io/zone",
+ Operator: corev1.NodeSelectorOpIn,
+ Values: []string{
+ "east1",
+ },
+ },
+ },
+ },
+ },
+ },
+ PreferredDuringSchedulingIgnoredDuringExecution: nil,
+ },
+ PodAffinity: &corev1.PodAffinity{
+ RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{
+ {
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: nil,
+ MatchExpressions: []metav1.LabelSelectorRequirement{
+ {
+ Key: "app",
+ Operator: metav1.LabelSelectorOpIn,
+ Values: []string{"myapp"},
+ },
+ },
+ },
+ Namespaces: nil,
+ TopologyKey: "",
+ NamespaceSelector: &metav1.LabelSelector{
+ MatchLabels: nil,
+ MatchExpressions: nil,
+ },
+ },
+ },
+ PreferredDuringSchedulingIgnoredDuringExecution: nil,
+ },
+ PodAntiAffinity: &corev1.PodAntiAffinity{
+ RequiredDuringSchedulingIgnoredDuringExecution: nil,
+ PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{
+ {
+ Weight: 100,
+ PodAffinityTerm: corev1.PodAffinityTerm{
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: nil,
+ MatchExpressions: []metav1.LabelSelectorRequirement{
+ {
+ Key: "app",
+ Operator: metav1.LabelSelectorOpIn,
+ Values: []string{"myapp"},
+ },
+ },
+ },
+ TopologyKey: "kubernetes.io/hostname",
+ NamespaceSelector: &metav1.LabelSelector{
+ MatchLabels: nil,
+ MatchExpressions: nil,
+ },
+ },
+ },
+ },
+ },
+ }
+ affinity2 := &corev1.Affinity{
+ NodeAffinity: &corev1.NodeAffinity{
+ RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
+ NodeSelectorTerms: []corev1.NodeSelectorTerm{
+ {
+ MatchExpressions: []corev1.NodeSelectorRequirement{
+ {
+ Key: "disktype",
+ Operator: corev1.NodeSelectorOpIn,
+ Values: []string{
+ "hdd",
+ },
+ },
+ },
+ MatchFields: nil,
+ },
+ {
+ MatchExpressions: []corev1.NodeSelectorRequirement{
+ {
+ Key: "topology.kubernetes.io/zone",
+ Operator: corev1.NodeSelectorOpIn,
+ Values: []string{
+ "west1",
+ },
+ },
+ },
+ },
+ },
+ },
+ PreferredDuringSchedulingIgnoredDuringExecution: nil,
+ },
+ PodAffinity: &corev1.PodAffinity{
+ RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{
+ {
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: nil,
+ MatchExpressions: []metav1.LabelSelectorRequirement{
+ {
+ Key: "app",
+ Operator: metav1.LabelSelectorOpIn,
+ Values: []string{"myapp"},
+ },
+ },
+ },
+ Namespaces: nil,
+ TopologyKey: "",
+ NamespaceSelector: &metav1.LabelSelector{
+ MatchLabels: nil,
+ MatchExpressions: nil,
+ },
+ },
+ },
+ PreferredDuringSchedulingIgnoredDuringExecution: nil,
+ },
+ PodAntiAffinity: &corev1.PodAntiAffinity{
+ RequiredDuringSchedulingIgnoredDuringExecution: nil,
+ PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{
+ {
+ Weight: 100,
+ PodAffinityTerm: corev1.PodAffinityTerm{
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: nil,
+ MatchExpressions: []metav1.LabelSelectorRequirement{
+ {
+ Key: "app",
+ Operator: metav1.LabelSelectorOpIn,
+ Values: []string{"myapp"},
+ },
+ },
+ },
+ TopologyKey: "kubernetes.io/hostname",
+ NamespaceSelector: &metav1.LabelSelector{
+ MatchLabels: nil,
+ MatchExpressions: nil,
+ },
+ },
+ },
+ },
+ },
+ }
+
+ rtn := MergeAffinity(affinity1, affinity2)
+
+ expectMergedAffinity := &corev1.Affinity{
+ NodeAffinity: &corev1.NodeAffinity{
+ RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
+ NodeSelectorTerms: []corev1.NodeSelectorTerm{
+ {
+ MatchExpressions: []corev1.NodeSelectorRequirement{
+ {
+ Key: "disktype",
+ Operator: corev1.NodeSelectorOpIn,
+ Values: []string{
+ "hdd",
+ },
+ },
+ },
+ MatchFields: nil,
+ },
+ {
+ MatchExpressions: []corev1.NodeSelectorRequirement{
+ {
+ Key: "topology.kubernetes.io/zone",
+ Operator: corev1.NodeSelectorOpIn,
+ Values: []string{
+ "west1",
+ },
+ },
+ },
+ },
+ {
+ MatchExpressions: []corev1.NodeSelectorRequirement{
+ {
+ Key: "node-role.kubernetes.io/worker",
+ Operator: corev1.NodeSelectorOpExists,
+ },
+ },
+ MatchFields: nil,
+ },
+ {
+ MatchExpressions: []corev1.NodeSelectorRequirement{
+ {
+ Key: "topology.kubernetes.io/zone",
+ Operator: corev1.NodeSelectorOpIn,
+ Values: []string{
+ "east1",
+ },
+ },
+ },
+ },
+ },
+ },
+ PreferredDuringSchedulingIgnoredDuringExecution: nil,
+ },
+ PodAffinity: &corev1.PodAffinity{
+ RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{
+ {
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: nil,
+ MatchExpressions: []metav1.LabelSelectorRequirement{
+ {
+ Key: "app",
+ Operator: metav1.LabelSelectorOpIn,
+ Values: []string{"myapp"},
+ },
+ },
+ },
+ Namespaces: nil,
+ TopologyKey: "",
+ NamespaceSelector: &metav1.LabelSelector{
+ MatchLabels: nil,
+ MatchExpressions: nil,
+ },
+ },
+ },
+ PreferredDuringSchedulingIgnoredDuringExecution: nil,
+ },
+ PodAntiAffinity: &corev1.PodAntiAffinity{
+ RequiredDuringSchedulingIgnoredDuringExecution: nil,
+ PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{
+ {
+ Weight: 100,
+ PodAffinityTerm: corev1.PodAffinityTerm{
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: nil,
+ MatchExpressions: []metav1.LabelSelectorRequirement{
+ {
+ Key: "app",
+ Operator: metav1.LabelSelectorOpIn,
+ Values: []string{"myapp"},
+ },
+ },
+ },
+ TopologyKey: "kubernetes.io/hostname",
+ NamespaceSelector: &metav1.LabelSelector{
+ MatchLabels: nil,
+ MatchExpressions: nil,
+ },
+ },
+ },
+ },
+ },
+ }
+ Expect(rtn).Should(Equal(expectMergedAffinity))
+ })
+ It("merge with nil src", func() {
+ var affinity1 *corev1.Affinity
+ affinity2 := &corev1.Affinity{
+ NodeAffinity: &corev1.NodeAffinity{
+ RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
+ NodeSelectorTerms: []corev1.NodeSelectorTerm{
+ {
+ MatchExpressions: []corev1.NodeSelectorRequirement{
+ {
+ Key: "node-role.kubernetes.io/worker",
+ Operator: corev1.NodeSelectorOpExists,
+ },
+ },
+ MatchFields: nil,
+ },
+ },
+ },
+ PreferredDuringSchedulingIgnoredDuringExecution: nil,
+ },
+ PodAffinity: &corev1.PodAffinity{
+ RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{
+ {
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: nil,
+ MatchExpressions: []metav1.LabelSelectorRequirement{
+ {
+ Key: "app",
+ Operator: metav1.LabelSelectorOpIn,
+ Values: []string{"myapp"},
+ },
+ },
+ },
+ Namespaces: nil,
+ TopologyKey: "",
+ NamespaceSelector: &metav1.LabelSelector{
+ MatchLabels: nil,
+ MatchExpressions: nil,
+ },
+ },
+ },
+ PreferredDuringSchedulingIgnoredDuringExecution: nil,
+ },
+ PodAntiAffinity: &corev1.PodAntiAffinity{
+ RequiredDuringSchedulingIgnoredDuringExecution: nil,
+ PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{
+ {
+ Weight: 100,
+ PodAffinityTerm: corev1.PodAffinityTerm{
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: nil,
+ MatchExpressions: []metav1.LabelSelectorRequirement{
+ {
+ Key: "app",
+ Operator: metav1.LabelSelectorOpIn,
+ Values: []string{"myapp"},
+ },
+ },
+ },
+ TopologyKey: "kubernetes.io/hostname",
+ NamespaceSelector: &metav1.LabelSelector{
+ MatchLabels: nil,
+ MatchExpressions: nil,
+ },
+ },
+ },
+ },
+ },
+ }
+
+ rtn := MergeAffinity(affinity1, affinity2)
+
+ expectMergedAffinity := &corev1.Affinity{
+ NodeAffinity: &corev1.NodeAffinity{
+ RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
+ NodeSelectorTerms: []corev1.NodeSelectorTerm{
+ {
+ MatchExpressions: []corev1.NodeSelectorRequirement{
+ {
+ Key: "node-role.kubernetes.io/worker",
+ Operator: corev1.NodeSelectorOpExists,
+ },
+ },
+ MatchFields: nil,
+ },
+ },
+ },
+ PreferredDuringSchedulingIgnoredDuringExecution: nil,
+ },
+ PodAffinity: &corev1.PodAffinity{
+ RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{
+ {
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: nil,
+ MatchExpressions: []metav1.LabelSelectorRequirement{
+ {
+ Key: "app",
+ Operator: metav1.LabelSelectorOpIn,
+ Values: []string{"myapp"},
+ },
+ },
+ },
+ Namespaces: nil,
+ TopologyKey: "",
+ NamespaceSelector: &metav1.LabelSelector{
+ MatchLabels: nil,
+ MatchExpressions: nil,
+ },
+ },
+ },
+ PreferredDuringSchedulingIgnoredDuringExecution: nil,
+ },
+ PodAntiAffinity: &corev1.PodAntiAffinity{
+ RequiredDuringSchedulingIgnoredDuringExecution: nil,
+ PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{
+ {
+ Weight: 100,
+ PodAffinityTerm: corev1.PodAffinityTerm{
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: nil,
+ MatchExpressions: []metav1.LabelSelectorRequirement{
+ {
+ Key: "app",
+ Operator: metav1.LabelSelectorOpIn,
+ Values: []string{"myapp"},
+ },
+ },
+ },
+ TopologyKey: "kubernetes.io/hostname",
+ NamespaceSelector: &metav1.LabelSelector{
+ MatchLabels: nil,
+ MatchExpressions: nil,
+ },
+ },
+ },
+ },
+ },
+ }
+ Expect(rtn).Should(Equal(expectMergedAffinity))
+ })
+ It("merge with nil dst", func() {
+ affinity1 := &corev1.Affinity{
+ NodeAffinity: &corev1.NodeAffinity{
+ RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
+ NodeSelectorTerms: []corev1.NodeSelectorTerm{
+ {
+ MatchExpressions: []corev1.NodeSelectorRequirement{
+ {
+ Key: "node-role.kubernetes.io/worker",
+ Operator: corev1.NodeSelectorOpExists,
+ },
+ },
+ MatchFields: nil,
+ },
+ },
+ },
+ PreferredDuringSchedulingIgnoredDuringExecution: nil,
+ },
+ PodAffinity: &corev1.PodAffinity{
+ RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{
+ {
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: nil,
+ MatchExpressions: []metav1.LabelSelectorRequirement{
+ {
+ Key: "app",
+ Operator: metav1.LabelSelectorOpIn,
+ Values: []string{"myapp"},
+ },
+ },
+ },
+ Namespaces: nil,
+ TopologyKey: "",
+ NamespaceSelector: &metav1.LabelSelector{
+ MatchLabels: nil,
+ MatchExpressions: nil,
+ },
+ },
+ },
+ PreferredDuringSchedulingIgnoredDuringExecution: nil,
+ },
+ PodAntiAffinity: &corev1.PodAntiAffinity{
+ RequiredDuringSchedulingIgnoredDuringExecution: nil,
+ PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{
+ {
+ Weight: 100,
+ PodAffinityTerm: corev1.PodAffinityTerm{
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: nil,
+ MatchExpressions: []metav1.LabelSelectorRequirement{
+ {
+ Key: "app",
+ Operator: metav1.LabelSelectorOpIn,
+ Values: []string{"myapp"},
+ },
+ },
+ },
+ TopologyKey: "kubernetes.io/hostname",
+ NamespaceSelector: &metav1.LabelSelector{
+ MatchLabels: nil,
+ MatchExpressions: nil,
+ },
+ },
+ },
+ },
+ },
+ }
+ var affinity2 *corev1.Affinity = nil
+
+ rtn := MergeAffinity(affinity1, affinity2)
+
+ expectMergedAffinity := &corev1.Affinity{
+ NodeAffinity: &corev1.NodeAffinity{
+ RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
+ NodeSelectorTerms: []corev1.NodeSelectorTerm{
+ {
+ MatchExpressions: []corev1.NodeSelectorRequirement{
+ {
+ Key: "node-role.kubernetes.io/worker",
+ Operator: corev1.NodeSelectorOpExists,
+ },
+ },
+ MatchFields: nil,
+ },
+ },
+ },
+ PreferredDuringSchedulingIgnoredDuringExecution: nil,
+ },
+ PodAffinity: &corev1.PodAffinity{
+ RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{
+ {
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: nil,
+ MatchExpressions: []metav1.LabelSelectorRequirement{
+ {
+ Key: "app",
+ Operator: metav1.LabelSelectorOpIn,
+ Values: []string{"myapp"},
+ },
+ },
+ },
+ Namespaces: nil,
+ TopologyKey: "",
+ NamespaceSelector: &metav1.LabelSelector{
+ MatchLabels: nil,
+ MatchExpressions: nil,
+ },
+ },
+ },
+ PreferredDuringSchedulingIgnoredDuringExecution: nil,
+ },
+ PodAntiAffinity: &corev1.PodAntiAffinity{
+ RequiredDuringSchedulingIgnoredDuringExecution: nil,
+ PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{
+ {
+ Weight: 100,
+ PodAffinityTerm: corev1.PodAffinityTerm{
+ LabelSelector: &metav1.LabelSelector{
+ MatchLabels: nil,
+ MatchExpressions: []metav1.LabelSelectorRequirement{
+ {
+ Key: "app",
+ Operator: metav1.LabelSelectorOpIn,
+ Values: []string{"myapp"},
+ },
+ },
+ },
+ TopologyKey: "kubernetes.io/hostname",
+ NamespaceSelector: &metav1.LabelSelector{
+ MatchLabels: nil,
+ MatchExpressions: nil,
+ },
+ },
+ },
+ },
+ },
+ }
+ Expect(rtn).Should(Equal(expectMergedAffinity))
+ })
+ })
+})
diff --git a/pkg/controller/scheduling/suite_test.go b/pkg/controller/scheduling/suite_test.go
new file mode 100644
index 00000000000..f0755b5ed15
--- /dev/null
+++ b/pkg/controller/scheduling/suite_test.go
@@ -0,0 +1,33 @@
+/*
+Copyright (C) 2022-2025 ApeCloud Co., Ltd
+
+This file is part of KubeBlocks project
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU Affero General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU Affero General Public License for more details.
+
+You should have received a copy of the GNU Affero General Public License
+along with this program. If not, see .
+*/
+
+package scheduling
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+func TestTemplateRender(t *testing.T) {
+ RegisterFailHandler(Fail)
+
+ RunSpecs(t, "Scheduling package Test Suite")
+}