diff --git a/api/swagger-spec/v1beta1.json b/api/swagger-spec/v1beta1.json index 1da9fff45e0ac..ef74de1fb506d 100644 --- a/api/swagger-spec/v1beta1.json +++ b/api/swagger-spec/v1beta1.json @@ -2076,7 +2076,7 @@ "models": { "v1beta1.HorizontalPodAutoscalerList": { "id": "v1beta1.HorizontalPodAutoscalerList", - "description": "HorizontalPodAutoscalerList is a list of HorizontalPodAutoscalers.", + "description": "list of horizontal pod autoscaler objects.", "required": [ "items" ], @@ -2098,7 +2098,7 @@ "items": { "$ref": "v1beta1.HorizontalPodAutoscaler" }, - "description": "Items is the list of HorizontalPodAutoscalers." + "description": "list of horizontal pod autoscaler objects." } } }, @@ -2118,7 +2118,7 @@ }, "v1beta1.HorizontalPodAutoscaler": { "id": "v1beta1.HorizontalPodAutoscaler", - "description": "HorizontalPodAutoscaler represents the configuration of a horizontal pod autoscaler.", + "description": "configuration of a horizontal pod autoscaler.", "properties": { "kind": { "type": "string", @@ -2134,11 +2134,11 @@ }, "spec": { "$ref": "v1beta1.HorizontalPodAutoscalerSpec", - "description": "Spec defines the behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status." + "description": "behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status." }, "status": { "$ref": "v1beta1.HorizontalPodAutoscalerStatus", - "description": "Status represents the current information about the autoscaler." + "description": "current information about the autoscaler." } } }, @@ -2200,31 +2200,29 @@ }, "v1beta1.HorizontalPodAutoscalerSpec": { "id": "v1beta1.HorizontalPodAutoscalerSpec", - "description": "HorizontalPodAutoscalerSpec is the specification of a horizontal pod autoscaler.", + "description": "specification of a horizontal pod autoscaler.", "required": [ "scaleRef", - "minReplicas", - "maxReplicas", - "target" + "maxReplicas" ], "properties": { "scaleRef": { "$ref": "v1beta1.SubresourceReference", - "description": "ScaleRef is a reference to Scale subresource. HorizontalPodAutoscaler will learn the current resource consumption from its status, and will set the desired number of pods by modyfying its spec." + "description": "reference to Scale subresource; horizontal pod autoscaler will learn the current resource consumption from its status, and will set the desired number of pods by modifying its spec." }, "minReplicas": { "type": "integer", "format": "int32", - "description": "MinReplicas is the lower limit for the number of pods that can be set by the autoscaler." + "description": "lower limit for the number of pods that can be set by the autoscaler, default 1." }, "maxReplicas": { "type": "integer", "format": "int32", - "description": "MaxReplicas is the upper limit for the number of pods that can be set by the autoscaler. It cannot be smaller than MinReplicas." + "description": "upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas." }, - "target": { - "$ref": "v1beta1.ResourceConsumption", - "description": "Target is the target average consumption of the given resource that the autoscaler will try to maintain by adjusting the desired number of pods. Currently two types of resources are supported: \"cpu\" and \"memory\"." + "cpuUtilization": { + "$ref": "v1beta1.CPUTargetUtilization", + "description": "target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified it defaults to the target CPU utilization at 80% of the requested resources." } } }, @@ -2254,46 +2252,50 @@ } } }, - "v1beta1.ResourceConsumption": { - "id": "v1beta1.ResourceConsumption", - "description": "ResourceConsumption is an object for specifying average resource consumption of a particular resource.", + "v1beta1.CPUTargetUtilization": { + "id": "v1beta1.CPUTargetUtilization", + "required": [ + "targetPercentage" + ], "properties": { - "resource": { - "type": "string", - "description": "Resource specifies either the name of the target resource when present in the spec, or the name of the observed resource when present in the status." - }, - "quantity": { - "type": "string", - "description": "Quantity specifies either the target average consumption of the resource when present in the spec, or the observed average consumption when present in the status." + "targetPercentage": { + "type": "integer", + "format": "int32", + "description": "fraction of the requested CPU that should be utilized/used, e.g. 70 means that 70% of the requested CPU should be in use." } } }, "v1beta1.HorizontalPodAutoscalerStatus": { "id": "v1beta1.HorizontalPodAutoscalerStatus", - "description": "HorizontalPodAutoscalerStatus contains the current status of a horizontal pod autoscaler", + "description": "current status of a horizontal pod autoscaler", "required": [ "currentReplicas", - "desiredReplicas", - "currentConsumption" + "desiredReplicas" ], "properties": { + "observedGeneration": { + "type": "integer", + "format": "int64", + "description": "most recent generation observed by this autoscaler." + }, + "lastScaleTime": { + "type": "string", + "description": "last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed." + }, "currentReplicas": { "type": "integer", "format": "int32", - "description": "CurrentReplicas is the number of replicas of pods managed by this autoscaler." + "description": "current number of replicas of pods managed by this autoscaler." }, "desiredReplicas": { "type": "integer", "format": "int32", - "description": "DesiredReplicas is the desired number of replicas of pods managed by this autoscaler." + "description": "desired number of replicas of pods managed by this autoscaler." }, - "currentConsumption": { - "$ref": "v1beta1.ResourceConsumption", - "description": "CurrentConsumption is the current average consumption of the given resource that the autoscaler will try to maintain by adjusting the desired number of pods. Two types of resources are supported: \"cpu\" and \"memory\"." - }, - "lastScaleTimestamp": { - "type": "string", - "description": "LastScaleTimestamp is the last time the HorizontalPodAutoscaler scaled the number of pods. This is used by the autoscaler to controll how often the number of pods is changed." + "currentCPUUtilizationPercentage": { + "type": "integer", + "format": "int32", + "description": "current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU." } } }, diff --git a/pkg/api/testing/fuzzer.go b/pkg/api/testing/fuzzer.go index 7b9385fe9d388..2f99c07b43d44 100644 --- a/pkg/api/testing/fuzzer.go +++ b/pkg/api/testing/fuzzer.go @@ -368,6 +368,12 @@ func FuzzerFor(t *testing.T, version string, src rand.Source) *fuzz.Fuzzer { // string, which will cause tests failure. s.APIGroup = "something" }, + func(s *extensions.HorizontalPodAutoscalerSpec, c fuzz.Continue) { + c.FuzzNoCustom(s) // fuzz self without calling this function again + minReplicas := c.Rand.Int() + s.MinReplicas = &minReplicas + s.CPUUtilization = &extensions.CPUTargetUtilization{TargetPercentage: int(c.RandUint64())} + }, ) return f } diff --git a/pkg/apis/extensions/deep_copy_generated.go b/pkg/apis/extensions/deep_copy_generated.go index ceba7e3832075..7cf48baebf2c4 100644 --- a/pkg/apis/extensions/deep_copy_generated.go +++ b/pkg/apis/extensions/deep_copy_generated.go @@ -848,6 +848,11 @@ func deepCopy_extensions_APIVersion(in APIVersion, out *APIVersion, c *conversio return nil } +func deepCopy_extensions_CPUTargetUtilization(in CPUTargetUtilization, out *CPUTargetUtilization, c *conversion.Cloner) error { + out.TargetPercentage = in.TargetPercentage + return nil +} + func deepCopy_extensions_ClusterAutoscaler(in ClusterAutoscaler, out *ClusterAutoscaler, c *conversion.Cloner) error { if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err @@ -1099,40 +1104,49 @@ func deepCopy_extensions_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerL } func deepCopy_extensions_HorizontalPodAutoscalerSpec(in HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, c *conversion.Cloner) error { - if in.ScaleRef != nil { - out.ScaleRef = new(SubresourceReference) - if err := deepCopy_extensions_SubresourceReference(*in.ScaleRef, out.ScaleRef, c); err != nil { - return err - } + if err := deepCopy_extensions_SubresourceReference(in.ScaleRef, &out.ScaleRef, c); err != nil { + return err + } + if in.MinReplicas != nil { + out.MinReplicas = new(int) + *out.MinReplicas = *in.MinReplicas } else { - out.ScaleRef = nil + out.MinReplicas = nil } - out.MinReplicas = in.MinReplicas out.MaxReplicas = in.MaxReplicas - if err := deepCopy_extensions_ResourceConsumption(in.Target, &out.Target, c); err != nil { - return err + if in.CPUUtilization != nil { + out.CPUUtilization = new(CPUTargetUtilization) + if err := deepCopy_extensions_CPUTargetUtilization(*in.CPUUtilization, out.CPUUtilization, c); err != nil { + return err + } + } else { + out.CPUUtilization = nil } return nil } func deepCopy_extensions_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, c *conversion.Cloner) error { - out.CurrentReplicas = in.CurrentReplicas - out.DesiredReplicas = in.DesiredReplicas - if in.CurrentConsumption != nil { - out.CurrentConsumption = new(ResourceConsumption) - if err := deepCopy_extensions_ResourceConsumption(*in.CurrentConsumption, out.CurrentConsumption, c); err != nil { - return err - } + if in.ObservedGeneration != nil { + out.ObservedGeneration = new(int64) + *out.ObservedGeneration = *in.ObservedGeneration } else { - out.CurrentConsumption = nil + out.ObservedGeneration = nil } - if in.LastScaleTimestamp != nil { - out.LastScaleTimestamp = new(unversioned.Time) - if err := deepCopy_unversioned_Time(*in.LastScaleTimestamp, out.LastScaleTimestamp, c); err != nil { + if in.LastScaleTime != nil { + out.LastScaleTime = new(unversioned.Time) + if err := deepCopy_unversioned_Time(*in.LastScaleTime, out.LastScaleTime, c); err != nil { return err } } else { - out.LastScaleTimestamp = nil + out.LastScaleTime = nil + } + out.CurrentReplicas = in.CurrentReplicas + out.DesiredReplicas = in.DesiredReplicas + if in.CurrentCPUUtilizationPercentage != nil { + out.CurrentCPUUtilizationPercentage = new(int) + *out.CurrentCPUUtilizationPercentage = *in.CurrentCPUUtilizationPercentage + } else { + out.CurrentCPUUtilizationPercentage = nil } return nil } @@ -1389,14 +1403,6 @@ func deepCopy_extensions_ReplicationControllerDummy(in ReplicationControllerDumm return nil } -func deepCopy_extensions_ResourceConsumption(in ResourceConsumption, out *ResourceConsumption, c *conversion.Cloner) error { - out.Resource = in.Resource - if err := deepCopy_resource_Quantity(in.Quantity, &out.Quantity, c); err != nil { - return err - } - return nil -} - func deepCopy_extensions_RollingUpdateDeployment(in RollingUpdateDeployment, out *RollingUpdateDeployment, c *conversion.Cloner) error { if err := deepCopy_util_IntOrString(in.MaxUnavailable, &out.MaxUnavailable, c); err != nil { return err @@ -1586,6 +1592,7 @@ func init() { deepCopy_unversioned_Time, deepCopy_unversioned_TypeMeta, deepCopy_extensions_APIVersion, + deepCopy_extensions_CPUTargetUtilization, deepCopy_extensions_ClusterAutoscaler, deepCopy_extensions_ClusterAutoscalerList, deepCopy_extensions_ClusterAutoscalerSpec, @@ -1620,7 +1627,6 @@ func init() { deepCopy_extensions_PodSelector, deepCopy_extensions_PodSelectorRequirement, deepCopy_extensions_ReplicationControllerDummy, - deepCopy_extensions_ResourceConsumption, deepCopy_extensions_RollingUpdateDeployment, deepCopy_extensions_Scale, deepCopy_extensions_ScaleSpec, diff --git a/pkg/apis/extensions/types.go b/pkg/apis/extensions/types.go index 9b73b60762246..dfcf4499882b6 100644 --- a/pkg/apis/extensions/types.go +++ b/pkg/apis/extensions/types.go @@ -30,36 +30,35 @@ package extensions import ( "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/util" ) -// ScaleSpec describes the attributes a Scale subresource +// describes the attributes of a scale subresource type ScaleSpec struct { - // Replicas is the number of desired replicas. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller" + // desired number of instances for the scaled object. Replicas int `json:"replicas,omitempty"` } -// ScaleStatus represents the current status of a Scale subresource. +// represents the current status of a scale subresource. type ScaleStatus struct { - // Replicas is the number of actual replicas. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller + // actual number of observed instances of the scaled object. Replicas int `json:"replicas"` - // Selector is a label query over pods that should match the replicas count. If it is empty, it is defaulted to labels on Pod template; More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + // label query over pods that should match the replicas count. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors Selector map[string]string `json:"selector,omitempty"` } -// Scale subresource, applicable to ReplicationControllers and (in future) Deployment. +// represents a scaling request for a resource. type Scale struct { unversioned.TypeMeta `json:",inline"` // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. api.ObjectMeta `json:"metadata,omitempty"` - // Spec defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Spec ScaleSpec `json:"spec,omitempty"` - // Status represents the current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. Status ScaleStatus `json:"status,omitempty"` } @@ -82,63 +81,64 @@ type SubresourceReference struct { Subresource string `json:"subresource,omitempty"` } -// ResourceConsumption is an object for specifying average resource consumption of a particular resource. -type ResourceConsumption struct { - Resource api.ResourceName `json:"resource,omitempty"` - Quantity resource.Quantity `json:"quantity,omitempty"` +type CPUTargetUtilization struct { + // fraction of the requested CPU that should be utilized/used, + // e.g. 70 means that 70% of the requested CPU should be in use. + TargetPercentage int `json:"targetPercentage"` } -// HorizontalPodAutoscalerSpec is the specification of a horizontal pod autoscaler. +// specification of a horizontal pod autoscaler. type HorizontalPodAutoscalerSpec struct { - // ScaleRef is a reference to Scale subresource. HorizontalPodAutoscaler will learn the current resource consumption from its status, - // and will set the desired number of pods by modyfying its spec. - ScaleRef *SubresourceReference `json:"scaleRef"` - // MinReplicas is the lower limit for the number of pods that can be set by the autoscaler. - MinReplicas int `json:"minReplicas"` - // MaxReplicas is the upper limit for the number of pods that can be set by the autoscaler. It cannot be smaller than MinReplicas. + // reference to Scale subresource; horizontal pod autoscaler will learn the current resource consumption from its status, + // and will set the desired number of pods by modifying its spec. + ScaleRef SubresourceReference `json:"scaleRef"` + // lower limit for the number of pods that can be set by the autoscaler, default 1. + MinReplicas *int `json:"minReplicas,omitempty"` + // upper limit for the number of pods that can be set by the autoscaler. It cannot be smaller than MinReplicas. MaxReplicas int `json:"maxReplicas"` - // Target is the target average consumption of the given resource that the autoscaler will try to maintain by adjusting the desired number of pods. - // Currently two types of resources are supported: "cpu" and "memory". - Target ResourceConsumption `json:"target"` + // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; + // if not specified it defaults to the target CPU utilization at 80% of the requested resources. + CPUUtilization *CPUTargetUtilization `json:"cpuUtilization,omitempty"` } -// HorizontalPodAutoscalerStatus contains the current status of a horizontal pod autoscaler +// current status of a horizontal pod autoscaler type HorizontalPodAutoscalerStatus struct { - // TODO: Consider if it is needed. - // CurrentReplicas is the number of replicas of pods managed by this autoscaler. + // most recent generation observed by this autoscaler. + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + + // last time the HorizontalPodAutoscaler scaled the number of pods; + // used by the autoscaler to control how often the number of pods is changed. + LastScaleTime *unversioned.Time `json:"lastScaleTime,omitempty"` + + // current number of replicas of pods managed by this autoscaler. CurrentReplicas int `json:"currentReplicas"` - // DesiredReplicas is the desired number of replicas of pods managed by this autoscaler. + // desired number of replicas of pods managed by this autoscaler. DesiredReplicas int `json:"desiredReplicas"` - // CurrentConsumption is the current average consumption of the given resource that the autoscaler will - // try to maintain by adjusting the desired number of pods. - // Two types of resources are supported: "cpu" and "memory". - CurrentConsumption *ResourceConsumption `json:"currentConsumption"` - - // LastScaleTimestamp is the last time the HorizontalPodAutoscaler scaled the number of pods. - // This is used by the autoscaler to controll how often the number of pods is changed. - LastScaleTimestamp *unversioned.Time `json:"lastScaleTimestamp,omitempty"` + // current average CPU utilization over all pods, represented as a percentage of requested CPU, + // e.g. 70 means that an average pod is using now 70% of its requested CPU. + CurrentCPUUtilizationPercentage *int `json:"currentCPUUtilizationPercentage,omitempty"` } -// HorizontalPodAutoscaler represents the configuration of a horizontal pod autoscaler. +// configuration of a horizontal pod autoscaler. type HorizontalPodAutoscaler struct { unversioned.TypeMeta `json:",inline"` api.ObjectMeta `json:"metadata,omitempty"` - // Spec defines the behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty"` - // Status represents the current information about the autoscaler. + // current information about the autoscaler. Status HorizontalPodAutoscalerStatus `json:"status,omitempty"` } -// HorizontalPodAutoscaler is a collection of pod autoscalers. +// list of horizontal pod autoscaler objects. type HorizontalPodAutoscalerList struct { unversioned.TypeMeta `json:",inline"` unversioned.ListMeta `json:"metadata,omitempty"` - // Items is the list of horizontal pod autoscalers. + // list of horizontal pod autoscaler objects. Items []HorizontalPodAutoscaler `json:"items"` } diff --git a/pkg/apis/extensions/v1beta1/conversion_generated.go b/pkg/apis/extensions/v1beta1/conversion_generated.go index e233725ae901b..1661f9f94be01 100644 --- a/pkg/apis/extensions/v1beta1/conversion_generated.go +++ b/pkg/apis/extensions/v1beta1/conversion_generated.go @@ -2145,6 +2145,18 @@ func convert_extensions_APIVersion_To_v1beta1_APIVersion(in *extensions.APIVersi return autoconvert_extensions_APIVersion_To_v1beta1_APIVersion(in, out, s) } +func autoconvert_extensions_CPUTargetUtilization_To_v1beta1_CPUTargetUtilization(in *extensions.CPUTargetUtilization, out *CPUTargetUtilization, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*extensions.CPUTargetUtilization))(in) + } + out.TargetPercentage = in.TargetPercentage + return nil +} + +func convert_extensions_CPUTargetUtilization_To_v1beta1_CPUTargetUtilization(in *extensions.CPUTargetUtilization, out *CPUTargetUtilization, s conversion.Scope) error { + return autoconvert_extensions_CPUTargetUtilization_To_v1beta1_CPUTargetUtilization(in, out, s) +} + func autoconvert_extensions_ClusterAutoscaler_To_v1beta1_ClusterAutoscaler(in *extensions.ClusterAutoscaler, out *ClusterAutoscaler, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*extensions.ClusterAutoscaler))(in) @@ -2507,18 +2519,23 @@ func autoconvert_extensions_HorizontalPodAutoscalerSpec_To_v1beta1_HorizontalPod if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*extensions.HorizontalPodAutoscalerSpec))(in) } - if in.ScaleRef != nil { - out.ScaleRef = new(SubresourceReference) - if err := convert_extensions_SubresourceReference_To_v1beta1_SubresourceReference(in.ScaleRef, out.ScaleRef, s); err != nil { - return err - } + if err := convert_extensions_SubresourceReference_To_v1beta1_SubresourceReference(&in.ScaleRef, &out.ScaleRef, s); err != nil { + return err + } + if in.MinReplicas != nil { + out.MinReplicas = new(int) + *out.MinReplicas = *in.MinReplicas } else { - out.ScaleRef = nil + out.MinReplicas = nil } - out.MinReplicas = in.MinReplicas out.MaxReplicas = in.MaxReplicas - if err := convert_extensions_ResourceConsumption_To_v1beta1_ResourceConsumption(&in.Target, &out.Target, s); err != nil { - return err + if in.CPUUtilization != nil { + out.CPUUtilization = new(CPUTargetUtilization) + if err := convert_extensions_CPUTargetUtilization_To_v1beta1_CPUTargetUtilization(in.CPUUtilization, out.CPUUtilization, s); err != nil { + return err + } + } else { + out.CPUUtilization = nil } return nil } @@ -2531,22 +2548,26 @@ func autoconvert_extensions_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalP if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*extensions.HorizontalPodAutoscalerStatus))(in) } - out.CurrentReplicas = in.CurrentReplicas - out.DesiredReplicas = in.DesiredReplicas - if in.CurrentConsumption != nil { - out.CurrentConsumption = new(ResourceConsumption) - if err := convert_extensions_ResourceConsumption_To_v1beta1_ResourceConsumption(in.CurrentConsumption, out.CurrentConsumption, s); err != nil { - return err - } + if in.ObservedGeneration != nil { + out.ObservedGeneration = new(int64) + *out.ObservedGeneration = *in.ObservedGeneration } else { - out.CurrentConsumption = nil + out.ObservedGeneration = nil } - if in.LastScaleTimestamp != nil { - if err := s.Convert(&in.LastScaleTimestamp, &out.LastScaleTimestamp, 0); err != nil { + if in.LastScaleTime != nil { + if err := s.Convert(&in.LastScaleTime, &out.LastScaleTime, 0); err != nil { return err } } else { - out.LastScaleTimestamp = nil + out.LastScaleTime = nil + } + out.CurrentReplicas = in.CurrentReplicas + out.DesiredReplicas = in.DesiredReplicas + if in.CurrentCPUUtilizationPercentage != nil { + out.CurrentCPUUtilizationPercentage = new(int) + *out.CurrentCPUUtilizationPercentage = *in.CurrentCPUUtilizationPercentage + } else { + out.CurrentCPUUtilizationPercentage = nil } return nil } @@ -2917,21 +2938,6 @@ func convert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControl return autoconvert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy(in, out, s) } -func autoconvert_extensions_ResourceConsumption_To_v1beta1_ResourceConsumption(in *extensions.ResourceConsumption, out *ResourceConsumption, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*extensions.ResourceConsumption))(in) - } - out.Resource = v1.ResourceName(in.Resource) - if err := s.Convert(&in.Quantity, &out.Quantity, 0); err != nil { - return err - } - return nil -} - -func convert_extensions_ResourceConsumption_To_v1beta1_ResourceConsumption(in *extensions.ResourceConsumption, out *ResourceConsumption, s conversion.Scope) error { - return autoconvert_extensions_ResourceConsumption_To_v1beta1_ResourceConsumption(in, out, s) -} - func autoconvert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *RollingUpdateDeployment, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*extensions.RollingUpdateDeployment))(in) @@ -3132,6 +3138,18 @@ func convert_v1beta1_APIVersion_To_extensions_APIVersion(in *APIVersion, out *ex return autoconvert_v1beta1_APIVersion_To_extensions_APIVersion(in, out, s) } +func autoconvert_v1beta1_CPUTargetUtilization_To_extensions_CPUTargetUtilization(in *CPUTargetUtilization, out *extensions.CPUTargetUtilization, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*CPUTargetUtilization))(in) + } + out.TargetPercentage = in.TargetPercentage + return nil +} + +func convert_v1beta1_CPUTargetUtilization_To_extensions_CPUTargetUtilization(in *CPUTargetUtilization, out *extensions.CPUTargetUtilization, s conversion.Scope) error { + return autoconvert_v1beta1_CPUTargetUtilization_To_extensions_CPUTargetUtilization(in, out, s) +} + func autoconvert_v1beta1_ClusterAutoscaler_To_extensions_ClusterAutoscaler(in *ClusterAutoscaler, out *extensions.ClusterAutoscaler, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ClusterAutoscaler))(in) @@ -3474,18 +3492,23 @@ func autoconvert_v1beta1_HorizontalPodAutoscalerSpec_To_extensions_HorizontalPod if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*HorizontalPodAutoscalerSpec))(in) } - if in.ScaleRef != nil { - out.ScaleRef = new(extensions.SubresourceReference) - if err := convert_v1beta1_SubresourceReference_To_extensions_SubresourceReference(in.ScaleRef, out.ScaleRef, s); err != nil { - return err - } + if err := convert_v1beta1_SubresourceReference_To_extensions_SubresourceReference(&in.ScaleRef, &out.ScaleRef, s); err != nil { + return err + } + if in.MinReplicas != nil { + out.MinReplicas = new(int) + *out.MinReplicas = *in.MinReplicas } else { - out.ScaleRef = nil + out.MinReplicas = nil } - out.MinReplicas = in.MinReplicas out.MaxReplicas = in.MaxReplicas - if err := convert_v1beta1_ResourceConsumption_To_extensions_ResourceConsumption(&in.Target, &out.Target, s); err != nil { - return err + if in.CPUUtilization != nil { + out.CPUUtilization = new(extensions.CPUTargetUtilization) + if err := convert_v1beta1_CPUTargetUtilization_To_extensions_CPUTargetUtilization(in.CPUUtilization, out.CPUUtilization, s); err != nil { + return err + } + } else { + out.CPUUtilization = nil } return nil } @@ -3498,22 +3521,26 @@ func autoconvert_v1beta1_HorizontalPodAutoscalerStatus_To_extensions_HorizontalP if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*HorizontalPodAutoscalerStatus))(in) } - out.CurrentReplicas = in.CurrentReplicas - out.DesiredReplicas = in.DesiredReplicas - if in.CurrentConsumption != nil { - out.CurrentConsumption = new(extensions.ResourceConsumption) - if err := convert_v1beta1_ResourceConsumption_To_extensions_ResourceConsumption(in.CurrentConsumption, out.CurrentConsumption, s); err != nil { - return err - } + if in.ObservedGeneration != nil { + out.ObservedGeneration = new(int64) + *out.ObservedGeneration = *in.ObservedGeneration } else { - out.CurrentConsumption = nil + out.ObservedGeneration = nil } - if in.LastScaleTimestamp != nil { - if err := s.Convert(&in.LastScaleTimestamp, &out.LastScaleTimestamp, 0); err != nil { + if in.LastScaleTime != nil { + if err := s.Convert(&in.LastScaleTime, &out.LastScaleTime, 0); err != nil { return err } } else { - out.LastScaleTimestamp = nil + out.LastScaleTime = nil + } + out.CurrentReplicas = in.CurrentReplicas + out.DesiredReplicas = in.DesiredReplicas + if in.CurrentCPUUtilizationPercentage != nil { + out.CurrentCPUUtilizationPercentage = new(int) + *out.CurrentCPUUtilizationPercentage = *in.CurrentCPUUtilizationPercentage + } else { + out.CurrentCPUUtilizationPercentage = nil } return nil } @@ -3884,21 +3911,6 @@ func convert_v1beta1_ReplicationControllerDummy_To_extensions_ReplicationControl return autoconvert_v1beta1_ReplicationControllerDummy_To_extensions_ReplicationControllerDummy(in, out, s) } -func autoconvert_v1beta1_ResourceConsumption_To_extensions_ResourceConsumption(in *ResourceConsumption, out *extensions.ResourceConsumption, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ResourceConsumption))(in) - } - out.Resource = api.ResourceName(in.Resource) - if err := s.Convert(&in.Quantity, &out.Quantity, 0); err != nil { - return err - } - return nil -} - -func convert_v1beta1_ResourceConsumption_To_extensions_ResourceConsumption(in *ResourceConsumption, out *extensions.ResourceConsumption, s conversion.Scope) error { - return autoconvert_v1beta1_ResourceConsumption_To_extensions_ResourceConsumption(in, out, s) -} - func autoconvert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*RollingUpdateDeployment))(in) @@ -4126,6 +4138,7 @@ func init() { autoconvert_api_VolumeSource_To_v1_VolumeSource, autoconvert_api_Volume_To_v1_Volume, autoconvert_extensions_APIVersion_To_v1beta1_APIVersion, + autoconvert_extensions_CPUTargetUtilization_To_v1beta1_CPUTargetUtilization, autoconvert_extensions_ClusterAutoscalerList_To_v1beta1_ClusterAutoscalerList, autoconvert_extensions_ClusterAutoscalerSpec_To_v1beta1_ClusterAutoscalerSpec, autoconvert_extensions_ClusterAutoscaler_To_v1beta1_ClusterAutoscaler, @@ -4160,7 +4173,6 @@ func init() { autoconvert_extensions_PodSelectorRequirement_To_v1beta1_PodSelectorRequirement, autoconvert_extensions_PodSelector_To_v1beta1_PodSelector, autoconvert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy, - autoconvert_extensions_ResourceConsumption_To_v1beta1_ResourceConsumption, autoconvert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment, autoconvert_extensions_ScaleSpec_To_v1beta1_ScaleSpec, autoconvert_extensions_ScaleStatus_To_v1beta1_ScaleStatus, @@ -4212,6 +4224,7 @@ func init() { autoconvert_v1_VolumeSource_To_api_VolumeSource, autoconvert_v1_Volume_To_api_Volume, autoconvert_v1beta1_APIVersion_To_extensions_APIVersion, + autoconvert_v1beta1_CPUTargetUtilization_To_extensions_CPUTargetUtilization, autoconvert_v1beta1_ClusterAutoscalerList_To_extensions_ClusterAutoscalerList, autoconvert_v1beta1_ClusterAutoscalerSpec_To_extensions_ClusterAutoscalerSpec, autoconvert_v1beta1_ClusterAutoscaler_To_extensions_ClusterAutoscaler, @@ -4245,7 +4258,6 @@ func init() { autoconvert_v1beta1_PodSelectorRequirement_To_extensions_PodSelectorRequirement, autoconvert_v1beta1_PodSelector_To_extensions_PodSelector, autoconvert_v1beta1_ReplicationControllerDummy_To_extensions_ReplicationControllerDummy, - autoconvert_v1beta1_ResourceConsumption_To_extensions_ResourceConsumption, autoconvert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment, autoconvert_v1beta1_ScaleSpec_To_extensions_ScaleSpec, autoconvert_v1beta1_ScaleStatus_To_extensions_ScaleStatus, diff --git a/pkg/apis/extensions/v1beta1/deep_copy_generated.go b/pkg/apis/extensions/v1beta1/deep_copy_generated.go index c5fdaa95c6a57..bc2b769244a6b 100644 --- a/pkg/apis/extensions/v1beta1/deep_copy_generated.go +++ b/pkg/apis/extensions/v1beta1/deep_copy_generated.go @@ -850,6 +850,11 @@ func deepCopy_v1beta1_APIVersion(in APIVersion, out *APIVersion, c *conversion.C return nil } +func deepCopy_v1beta1_CPUTargetUtilization(in CPUTargetUtilization, out *CPUTargetUtilization, c *conversion.Cloner) error { + out.TargetPercentage = in.TargetPercentage + return nil +} + func deepCopy_v1beta1_ClusterAutoscaler(in ClusterAutoscaler, out *ClusterAutoscaler, c *conversion.Cloner) error { if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err @@ -1111,40 +1116,49 @@ func deepCopy_v1beta1_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList } func deepCopy_v1beta1_HorizontalPodAutoscalerSpec(in HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, c *conversion.Cloner) error { - if in.ScaleRef != nil { - out.ScaleRef = new(SubresourceReference) - if err := deepCopy_v1beta1_SubresourceReference(*in.ScaleRef, out.ScaleRef, c); err != nil { - return err - } + if err := deepCopy_v1beta1_SubresourceReference(in.ScaleRef, &out.ScaleRef, c); err != nil { + return err + } + if in.MinReplicas != nil { + out.MinReplicas = new(int) + *out.MinReplicas = *in.MinReplicas } else { - out.ScaleRef = nil + out.MinReplicas = nil } - out.MinReplicas = in.MinReplicas out.MaxReplicas = in.MaxReplicas - if err := deepCopy_v1beta1_ResourceConsumption(in.Target, &out.Target, c); err != nil { - return err + if in.CPUUtilization != nil { + out.CPUUtilization = new(CPUTargetUtilization) + if err := deepCopy_v1beta1_CPUTargetUtilization(*in.CPUUtilization, out.CPUUtilization, c); err != nil { + return err + } + } else { + out.CPUUtilization = nil } return nil } func deepCopy_v1beta1_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, c *conversion.Cloner) error { - out.CurrentReplicas = in.CurrentReplicas - out.DesiredReplicas = in.DesiredReplicas - if in.CurrentConsumption != nil { - out.CurrentConsumption = new(ResourceConsumption) - if err := deepCopy_v1beta1_ResourceConsumption(*in.CurrentConsumption, out.CurrentConsumption, c); err != nil { - return err - } + if in.ObservedGeneration != nil { + out.ObservedGeneration = new(int64) + *out.ObservedGeneration = *in.ObservedGeneration } else { - out.CurrentConsumption = nil + out.ObservedGeneration = nil } - if in.LastScaleTimestamp != nil { - out.LastScaleTimestamp = new(unversioned.Time) - if err := deepCopy_unversioned_Time(*in.LastScaleTimestamp, out.LastScaleTimestamp, c); err != nil { + if in.LastScaleTime != nil { + out.LastScaleTime = new(unversioned.Time) + if err := deepCopy_unversioned_Time(*in.LastScaleTime, out.LastScaleTime, c); err != nil { return err } } else { - out.LastScaleTimestamp = nil + out.LastScaleTime = nil + } + out.CurrentReplicas = in.CurrentReplicas + out.DesiredReplicas = in.DesiredReplicas + if in.CurrentCPUUtilizationPercentage != nil { + out.CurrentCPUUtilizationPercentage = new(int) + *out.CurrentCPUUtilizationPercentage = *in.CurrentCPUUtilizationPercentage + } else { + out.CurrentCPUUtilizationPercentage = nil } return nil } @@ -1401,14 +1415,6 @@ func deepCopy_v1beta1_ReplicationControllerDummy(in ReplicationControllerDummy, return nil } -func deepCopy_v1beta1_ResourceConsumption(in ResourceConsumption, out *ResourceConsumption, c *conversion.Cloner) error { - out.Resource = in.Resource - if err := deepCopy_resource_Quantity(in.Quantity, &out.Quantity, c); err != nil { - return err - } - return nil -} - func deepCopy_v1beta1_RollingUpdateDeployment(in RollingUpdateDeployment, out *RollingUpdateDeployment, c *conversion.Cloner) error { if in.MaxUnavailable != nil { out.MaxUnavailable = new(util.IntOrString) @@ -1608,6 +1614,7 @@ func init() { deepCopy_v1_VolumeMount, deepCopy_v1_VolumeSource, deepCopy_v1beta1_APIVersion, + deepCopy_v1beta1_CPUTargetUtilization, deepCopy_v1beta1_ClusterAutoscaler, deepCopy_v1beta1_ClusterAutoscalerList, deepCopy_v1beta1_ClusterAutoscalerSpec, @@ -1642,7 +1649,6 @@ func init() { deepCopy_v1beta1_PodSelector, deepCopy_v1beta1_PodSelectorRequirement, deepCopy_v1beta1_ReplicationControllerDummy, - deepCopy_v1beta1_ResourceConsumption, deepCopy_v1beta1_RollingUpdateDeployment, deepCopy_v1beta1_Scale, deepCopy_v1beta1_ScaleSpec, diff --git a/pkg/apis/extensions/v1beta1/defaults.go b/pkg/apis/extensions/v1beta1/defaults.go index e1018ff416b8c..48beebb69e604 100644 --- a/pkg/apis/extensions/v1beta1/defaults.go +++ b/pkg/apis/extensions/v1beta1/defaults.go @@ -109,5 +109,14 @@ func addDefaultingFuncs() { obj.Spec.Parallelism = obj.Spec.Completions } }, + func(obj *HorizontalPodAutoscaler) { + if obj.Spec.MinReplicas == nil { + minReplicas := 1 + obj.Spec.MinReplicas = &minReplicas + } + if obj.Spec.CPUUtilization == nil { + obj.Spec.CPUUtilization = &CPUTargetUtilization{TargetPercentage: 80} + } + }, ) } diff --git a/pkg/apis/extensions/v1beta1/types.go b/pkg/apis/extensions/v1beta1/types.go index 250c939ba0d71..7ab25c256210a 100644 --- a/pkg/apis/extensions/v1beta1/types.go +++ b/pkg/apis/extensions/v1beta1/types.go @@ -17,37 +17,36 @@ limitations under the License. package v1beta1 import ( - "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/util" ) -// ScaleSpec describes the attributes a Scale subresource +// describes the attributes of a scale subresource type ScaleSpec struct { - // Replicas is the number of desired replicas. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller" + // desired number of instances for the scaled object. Replicas int `json:"replicas,omitempty"` } -// ScaleStatus represents the current status of a Scale subresource. +// represents the current status of a scale subresource. type ScaleStatus struct { - // Replicas is the number of actual replicas. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller + // actual number of observed instances of the scaled object. Replicas int `json:"replicas"` - // Selector is a label query over pods that should match the replicas count. If it is empty, it is defaulted to labels on Pod template; More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + // label query over pods that should match the replicas count. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors Selector map[string]string `json:"selector,omitempty"` } -// Scale subresource, applicable to ReplicationControllers and (in future) Deployment. +// represents a scaling request for a resource. type Scale struct { unversioned.TypeMeta `json:",inline"` // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. v1.ObjectMeta `json:"metadata,omitempty"` - // Spec defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Spec ScaleSpec `json:"spec,omitempty"` - // Status represents the current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. Status ScaleStatus `json:"status,omitempty"` } @@ -70,66 +69,66 @@ type SubresourceReference struct { Subresource string `json:"subresource,omitempty"` } -// ResourceConsumption is an object for specifying average resource consumption of a particular resource. -type ResourceConsumption struct { - // Resource specifies either the name of the target resource when present in the spec, or the name of the observed resource when present in the status. - Resource v1.ResourceName `json:"resource,omitempty"` - // Quantity specifies either the target average consumption of the resource when present in the spec, or the observed average consumption when present in the status. - Quantity resource.Quantity `json:"quantity,omitempty"` +type CPUTargetUtilization struct { + // fraction of the requested CPU that should be utilized/used, + // e.g. 70 means that 70% of the requested CPU should be in use. + TargetPercentage int `json:"targetPercentage"` } -// HorizontalPodAutoscalerSpec is the specification of a horizontal pod autoscaler. +// specification of a horizontal pod autoscaler. type HorizontalPodAutoscalerSpec struct { - // ScaleRef is a reference to Scale subresource. HorizontalPodAutoscaler will learn the current resource consumption from its status, - // and will set the desired number of pods by modyfying its spec. - ScaleRef *SubresourceReference `json:"scaleRef"` - // MinReplicas is the lower limit for the number of pods that can be set by the autoscaler. - MinReplicas int `json:"minReplicas"` - // MaxReplicas is the upper limit for the number of pods that can be set by the autoscaler. It cannot be smaller than MinReplicas. + // reference to Scale subresource; horizontal pod autoscaler will learn the current resource consumption from its status, + // and will set the desired number of pods by modifying its spec. + ScaleRef SubresourceReference `json:"scaleRef"` + // lower limit for the number of pods that can be set by the autoscaler, default 1. + MinReplicas *int `json:"minReplicas,omitempty"` + // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. MaxReplicas int `json:"maxReplicas"` - // Target is the target average consumption of the given resource that the autoscaler will try to maintain by adjusting the desired number of pods. - // Currently two types of resources are supported: "cpu" and "memory". - Target ResourceConsumption `json:"target"` + // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; + // if not specified it defaults to the target CPU utilization at 80% of the requested resources. + CPUUtilization *CPUTargetUtilization `json:"cpuUtilization,omitempty"` } -// HorizontalPodAutoscalerStatus contains the current status of a horizontal pod autoscaler +// current status of a horizontal pod autoscaler type HorizontalPodAutoscalerStatus struct { - // CurrentReplicas is the number of replicas of pods managed by this autoscaler. + // most recent generation observed by this autoscaler. + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + + // last time the HorizontalPodAutoscaler scaled the number of pods; + // used by the autoscaler to control how often the number of pods is changed. + LastScaleTime *unversioned.Time `json:"lastScaleTime,omitempty"` + + // current number of replicas of pods managed by this autoscaler. CurrentReplicas int `json:"currentReplicas"` - // DesiredReplicas is the desired number of replicas of pods managed by this autoscaler. + // desired number of replicas of pods managed by this autoscaler. DesiredReplicas int `json:"desiredReplicas"` - // CurrentConsumption is the current average consumption of the given resource that the autoscaler will - // try to maintain by adjusting the desired number of pods. - // Two types of resources are supported: "cpu" and "memory". - CurrentConsumption *ResourceConsumption `json:"currentConsumption"` - - // LastScaleTimestamp is the last time the HorizontalPodAutoscaler scaled the number of pods. - // This is used by the autoscaler to controll how often the number of pods is changed. - LastScaleTimestamp *unversioned.Time `json:"lastScaleTimestamp,omitempty"` + // current average CPU utilization over all pods, represented as a percentage of requested CPU, + // e.g. 70 means that an average pod is using now 70% of its requested CPU. + CurrentCPUUtilizationPercentage *int `json:"currentCPUUtilizationPercentage,omitempty"` } -// HorizontalPodAutoscaler represents the configuration of a horizontal pod autoscaler. +// configuration of a horizontal pod autoscaler. type HorizontalPodAutoscaler struct { unversioned.TypeMeta `json:",inline"` // Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata v1.ObjectMeta `json:"metadata,omitempty"` - // Spec defines the behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty"` - // Status represents the current information about the autoscaler. + // current information about the autoscaler. Status HorizontalPodAutoscalerStatus `json:"status,omitempty"` } -// HorizontalPodAutoscalerList is a list of HorizontalPodAutoscalers. +// list of horizontal pod autoscaler objects. type HorizontalPodAutoscalerList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. unversioned.ListMeta `json:"metadata,omitempty"` - // Items is the list of HorizontalPodAutoscalers. + // list of horizontal pod autoscaler objects. Items []HorizontalPodAutoscaler `json:"items"` } diff --git a/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go b/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go index 5994aafd3ecb1..7b26605f0d4a6 100644 --- a/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go +++ b/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go @@ -37,6 +37,14 @@ func (APIVersion) SwaggerDoc() map[string]string { return map_APIVersion } +var map_CPUTargetUtilization = map[string]string{ + "targetPercentage": "fraction of the requested CPU that should be utilized/used, e.g. 70 means that 70% of the requested CPU should be in use.", +} + +func (CPUTargetUtilization) SwaggerDoc() map[string]string { + return map_CPUTargetUtilization +} + var map_ClusterAutoscaler = map[string]string{ "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata For now (experimental api) it is required that the name is set to \"ClusterAutoscaler\" and namespace is \"default\".", "spec": "Spec defines the desired behavior of this daemon set. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", @@ -182,10 +190,10 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { } var map_HorizontalPodAutoscaler = map[string]string{ - "": "HorizontalPodAutoscaler represents the configuration of a horizontal pod autoscaler.", + "": "configuration of a horizontal pod autoscaler.", "metadata": "Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", - "status": "Status represents the current information about the autoscaler.", + "spec": "behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", + "status": "current information about the autoscaler.", } func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { @@ -193,9 +201,9 @@ func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { } var map_HorizontalPodAutoscalerList = map[string]string{ - "": "HorizontalPodAutoscalerList is a list of HorizontalPodAutoscalers.", + "": "list of horizontal pod autoscaler objects.", "metadata": "Standard list metadata.", - "items": "Items is the list of HorizontalPodAutoscalers.", + "items": "list of horizontal pod autoscaler objects.", } func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { @@ -203,11 +211,11 @@ func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { } var map_HorizontalPodAutoscalerSpec = map[string]string{ - "": "HorizontalPodAutoscalerSpec is the specification of a horizontal pod autoscaler.", - "scaleRef": "ScaleRef is a reference to Scale subresource. HorizontalPodAutoscaler will learn the current resource consumption from its status, and will set the desired number of pods by modyfying its spec.", - "minReplicas": "MinReplicas is the lower limit for the number of pods that can be set by the autoscaler.", - "maxReplicas": "MaxReplicas is the upper limit for the number of pods that can be set by the autoscaler. It cannot be smaller than MinReplicas.", - "target": "Target is the target average consumption of the given resource that the autoscaler will try to maintain by adjusting the desired number of pods. Currently two types of resources are supported: \"cpu\" and \"memory\".", + "": "specification of a horizontal pod autoscaler.", + "scaleRef": "reference to Scale subresource; horizontal pod autoscaler will learn the current resource consumption from its status, and will set the desired number of pods by modifying its spec.", + "minReplicas": "lower limit for the number of pods that can be set by the autoscaler, default 1.", + "maxReplicas": "upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.", + "cpuUtilization": "target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified it defaults to the target CPU utilization at 80% of the requested resources.", } func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { @@ -215,11 +223,12 @@ func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { } var map_HorizontalPodAutoscalerStatus = map[string]string{ - "": "HorizontalPodAutoscalerStatus contains the current status of a horizontal pod autoscaler", - "currentReplicas": "CurrentReplicas is the number of replicas of pods managed by this autoscaler.", - "desiredReplicas": "DesiredReplicas is the desired number of replicas of pods managed by this autoscaler.", - "currentConsumption": "CurrentConsumption is the current average consumption of the given resource that the autoscaler will try to maintain by adjusting the desired number of pods. Two types of resources are supported: \"cpu\" and \"memory\".", - "lastScaleTimestamp": "LastScaleTimestamp is the last time the HorizontalPodAutoscaler scaled the number of pods. This is used by the autoscaler to controll how often the number of pods is changed.", + "": "current status of a horizontal pod autoscaler", + "observedGeneration": "most recent generation observed by this autoscaler.", + "lastScaleTime": "last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed.", + "currentReplicas": "current number of replicas of pods managed by this autoscaler.", + "desiredReplicas": "desired number of replicas of pods managed by this autoscaler.", + "currentCPUUtilizationPercentage": "current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU.", } func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { @@ -392,16 +401,6 @@ func (ReplicationControllerDummy) SwaggerDoc() map[string]string { return map_ReplicationControllerDummy } -var map_ResourceConsumption = map[string]string{ - "": "ResourceConsumption is an object for specifying average resource consumption of a particular resource.", - "resource": "Resource specifies either the name of the target resource when present in the spec, or the name of the observed resource when present in the status.", - "quantity": "Quantity specifies either the target average consumption of the resource when present in the spec, or the observed average consumption when present in the status.", -} - -func (ResourceConsumption) SwaggerDoc() map[string]string { - return map_ResourceConsumption -} - var map_RollingUpdateDeployment = map[string]string{ "": "Spec to control the desired behavior of rolling update.", "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding up. This can not be 0 if MaxSurge is 0. By default, a fixed value of 1 is used. Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", @@ -414,10 +413,10 @@ func (RollingUpdateDeployment) SwaggerDoc() map[string]string { } var map_Scale = map[string]string{ - "": "Scale subresource, applicable to ReplicationControllers and (in future) Deployment.", + "": "represents a scaling request for a resource.", "metadata": "Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata.", - "spec": "Spec defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", - "status": "Status represents the current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only.", + "spec": "defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { @@ -425,8 +424,8 @@ func (Scale) SwaggerDoc() map[string]string { } var map_ScaleSpec = map[string]string{ - "": "ScaleSpec describes the attributes a Scale subresource", - "replicas": "Replicas is the number of desired replicas. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller\"", + "": "describes the attributes of a scale subresource", + "replicas": "desired number of instances for the scaled object.", } func (ScaleSpec) SwaggerDoc() map[string]string { @@ -434,9 +433,9 @@ func (ScaleSpec) SwaggerDoc() map[string]string { } var map_ScaleStatus = map[string]string{ - "": "ScaleStatus represents the current status of a Scale subresource.", - "replicas": "Replicas is the number of actual replicas. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller", - "selector": "Selector is a label query over pods that should match the replicas count. If it is empty, it is defaulted to labels on Pod template; More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors", + "": "represents the current status of a scale subresource.", + "replicas": "actual number of observed instances of the scaled object.", + "selector": "label query over pods that should match the replicas count. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors", } func (ScaleStatus) SwaggerDoc() map[string]string { diff --git a/pkg/apis/extensions/validation/validation.go b/pkg/apis/extensions/validation/validation.go index a9ba7d98e01a8..c8980090096cb 100644 --- a/pkg/apis/extensions/validation/validation.go +++ b/pkg/apis/extensions/validation/validation.go @@ -51,37 +51,19 @@ func ValidateHorizontalPodAutoscalerName(name string, prefix bool) (bool, string return apivalidation.ValidateReplicationControllerName(name, prefix) } -func validateResourceConsumption(consumption *extensions.ResourceConsumption, fieldName string) errs.ValidationErrorList { - allErrs := errs.ValidationErrorList{} - resource := consumption.Resource.String() - if resource != string(api.ResourceMemory) && resource != string(api.ResourceCPU) { - allErrs = append(allErrs, errs.NewFieldInvalid(fieldName+".resource", resource, "resource not supported by autoscaler")) - } - quantity := consumption.Quantity.Value() - if quantity < 0 { - allErrs = append(allErrs, errs.NewFieldInvalid(fieldName+".quantity", quantity, "must be non-negative")) - } - return allErrs -} - func validateHorizontalPodAutoscalerSpec(autoscaler extensions.HorizontalPodAutoscalerSpec) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} - if autoscaler.MinReplicas < 0 { - allErrs = append(allErrs, errs.NewFieldInvalid("minReplicas", autoscaler.MinReplicas, isNegativeErrorMsg)) - } - if autoscaler.MaxReplicas < autoscaler.MinReplicas { - allErrs = append(allErrs, errs.NewFieldInvalid("maxReplicas", autoscaler.MaxReplicas, `must be bigger or equal to minReplicas`)) + if autoscaler.MinReplicas != nil && *autoscaler.MinReplicas < 1 { + allErrs = append(allErrs, errs.NewFieldInvalid("minReplicas", autoscaler.MinReplicas, `must be bigger or equal to 1`)) } - if autoscaler.ScaleRef == nil { - allErrs = append(allErrs, errs.NewFieldRequired("scaleRef")) + if autoscaler.MaxReplicas < 1 { + allErrs = append(allErrs, errs.NewFieldInvalid("maxReplicas", autoscaler.MaxReplicas, `must be bigger or equal to 1`)) } - resource := autoscaler.Target.Resource.String() - if resource != string(api.ResourceMemory) && resource != string(api.ResourceCPU) { - allErrs = append(allErrs, errs.NewFieldInvalid("target.resource", resource, "resource not supported by autoscaler")) + if autoscaler.MinReplicas != nil && autoscaler.MaxReplicas < *autoscaler.MinReplicas { + allErrs = append(allErrs, errs.NewFieldInvalid("maxReplicas", autoscaler.MaxReplicas, `must be bigger or equal to minReplicas`)) } - quantity := autoscaler.Target.Quantity.Value() - if quantity < 0 { - allErrs = append(allErrs, errs.NewFieldInvalid("target.quantity", quantity, isNegativeErrorMsg)) + if autoscaler.CPUUtilization != nil && autoscaler.CPUUtilization.TargetPercentage < 1 { + allErrs = append(allErrs, errs.NewFieldInvalid("cpuUtilization.targetPercentage", autoscaler.CPUUtilization.TargetPercentage, isNegativeErrorMsg)) } return allErrs } @@ -107,9 +89,6 @@ func ValidateHorizontalPodAutoscalerStatusUpdate(controller, oldController *exte status := controller.Status allErrs = append(allErrs, apivalidation.ValidatePositiveField(int64(status.CurrentReplicas), "currentReplicas")...) allErrs = append(allErrs, apivalidation.ValidatePositiveField(int64(status.DesiredReplicas), "desiredReplicas")...) - if status.CurrentConsumption != nil { - allErrs = append(allErrs, validateResourceConsumption(status.CurrentConsumption, "currentConsumption")...) - } return allErrs } diff --git a/pkg/apis/extensions/validation/validation_test.go b/pkg/apis/extensions/validation/validation_test.go index f1654309432e6..47b97390fdfd4 100644 --- a/pkg/apis/extensions/validation/validation_test.go +++ b/pkg/apis/extensions/validation/validation_test.go @@ -22,7 +22,6 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/util" errors "k8s.io/kubernetes/pkg/util/fielderrors" @@ -36,87 +35,73 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { Namespace: api.NamespaceDefault, }, Spec: extensions.HorizontalPodAutoscalerSpec{ - ScaleRef: &extensions.SubresourceReference{ + ScaleRef: extensions.SubresourceReference{ Subresource: "scale", }, - MinReplicas: 1, - MaxReplicas: 5, - Target: extensions.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse("0.8")}, + MinReplicas: newInt(1), + MaxReplicas: 5, + CPUUtilization: &extensions.CPUTargetUtilization{TargetPercentage: 70}, }, }, - } - for _, successCase := range successCases { - if errs := ValidateHorizontalPodAutoscaler(&successCase); len(errs) != 0 { - t.Errorf("expected success: %v", errs) - } - } - - errorCases := map[string]extensions.HorizontalPodAutoscaler{ - "must be non-negative": { + { ObjectMeta: api.ObjectMeta{ Name: "myautoscaler", Namespace: api.NamespaceDefault, }, Spec: extensions.HorizontalPodAutoscalerSpec{ - ScaleRef: &extensions.SubresourceReference{ + ScaleRef: extensions.SubresourceReference{ Subresource: "scale", }, - MinReplicas: -1, + MinReplicas: newInt(1), MaxReplicas: 5, - Target: extensions.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse("0.8")}, }, }, - "must be bigger or equal to minReplicas": { + } + for _, successCase := range successCases { + if errs := ValidateHorizontalPodAutoscaler(&successCase); len(errs) != 0 { + t.Errorf("expected success: %v", errs) + } + } + + errorCases := map[string]extensions.HorizontalPodAutoscaler{ + "must be bigger or equal to 1": { ObjectMeta: api.ObjectMeta{ Name: "myautoscaler", Namespace: api.NamespaceDefault, }, Spec: extensions.HorizontalPodAutoscalerSpec{ - ScaleRef: &extensions.SubresourceReference{ + ScaleRef: extensions.SubresourceReference{ Subresource: "scale", }, - MinReplicas: 7, + MinReplicas: newInt(-1), MaxReplicas: 5, - Target: extensions.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse("0.8")}, }, }, - "invalid value": { + "must be bigger or equal to minReplicas": { ObjectMeta: api.ObjectMeta{ Name: "myautoscaler", Namespace: api.NamespaceDefault, }, Spec: extensions.HorizontalPodAutoscalerSpec{ - ScaleRef: &extensions.SubresourceReference{ + ScaleRef: extensions.SubresourceReference{ Subresource: "scale", }, - MinReplicas: 1, + MinReplicas: newInt(7), MaxReplicas: 5, - Target: extensions.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse("-0.8")}, }, }, - "resource not supported": { + "must be non-negative": { ObjectMeta: api.ObjectMeta{ Name: "myautoscaler", Namespace: api.NamespaceDefault, }, Spec: extensions.HorizontalPodAutoscalerSpec{ - ScaleRef: &extensions.SubresourceReference{ + ScaleRef: extensions.SubresourceReference{ Subresource: "scale", }, - MinReplicas: 1, - MaxReplicas: 5, - Target: extensions.ResourceConsumption{Resource: api.ResourceName("NotSupportedResource"), Quantity: resource.MustParse("0.8")}, - }, - }, - "required value": { - ObjectMeta: api.ObjectMeta{ - Name: "myautoscaler", - Namespace: api.NamespaceDefault, - }, - Spec: extensions.HorizontalPodAutoscalerSpec{ - MinReplicas: 1, - MaxReplicas: 5, - Target: extensions.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse("0.8")}, + MinReplicas: newInt(1), + MaxReplicas: 5, + CPUUtilization: &extensions.CPUTargetUtilization{TargetPercentage: -70}, }, }, } @@ -1147,3 +1132,9 @@ func TestValidateClusterAutoscaler(t *testing.T) { } } } + +func newInt(val int) *int { + p := new(int) + *p = val + return p +} diff --git a/pkg/controller/podautoscaler/horizontal.go b/pkg/controller/podautoscaler/horizontal.go index 3071feddc1d09..cdef71b6fc5f6 100644 --- a/pkg/controller/podautoscaler/horizontal.go +++ b/pkg/controller/podautoscaler/horizontal.go @@ -68,6 +68,31 @@ func (a *HorizontalController) Run(syncPeriod time.Duration) { }, syncPeriod, util.NeverStop) } +func (a *HorizontalController) computeReplicasForCPUUtilization(hpa extensions.HorizontalPodAutoscaler, + scale *extensions.Scale) (int, *int, error) { + if hpa.Spec.CPUUtilization == nil { + // If CPUTarget is not specified than we should return some default values. + // Since we always take maximum number of replicas from all policies it is safe + // to just return 0. + return 0, nil, nil + } + currentReplicas := scale.Status.Replicas + currentUtilization, err := a.metricsClient.GetCPUUtilization(hpa.Spec.ScaleRef.Namespace, scale.Status.Selector) + + // TODO: what to do on partial errors (like metrics obtained for 75% of pods). + if err != nil { + a.eventRecorder.Event(&hpa, "FailedGetMetrics", err.Error()) + return 0, nil, fmt.Errorf("failed to get cpu utilization: %v", err) + } + + usageRatio := float64(*currentUtilization) / float64(hpa.Spec.CPUUtilization.TargetPercentage) + if math.Abs(1.0-usageRatio) > tolerance { + return int(math.Ceil(usageRatio * float64(currentReplicas))), currentUtilization, nil + } else { + return currentReplicas, currentUtilization, nil + } +} + func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodAutoscaler) error { reference := fmt.Sprintf("%s/%s/%s", hpa.Spec.ScaleRef.Kind, hpa.Spec.ScaleRef.Namespace, hpa.Spec.ScaleRef.Name) @@ -77,24 +102,18 @@ func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodA return fmt.Errorf("failed to query scale subresource for %s: %v", reference, err) } currentReplicas := scale.Status.Replicas - currentConsumption, err := a.metricsClient. - ResourceConsumption(hpa.Spec.ScaleRef.Namespace). - Get(hpa.Spec.Target.Resource, scale.Status.Selector) - // TODO: what to do on partial errors (like metrics obtained for 75% of pods). + desiredReplicas, currentUtilization, err := a.computeReplicasForCPUUtilization(hpa, scale) if err != nil { - a.eventRecorder.Event(&hpa, "FailedGetMetrics", err.Error()) - return fmt.Errorf("failed to get metrics for %s: %v", reference, err) + a.eventRecorder.Event(&hpa, "FailedComputeReplicas", err.Error()) + return fmt.Errorf("failed to compute desired number of replicas based on CPU utilization for %s: %v", reference, err) } - usageRatio := float64(currentConsumption.Quantity.MilliValue()) / float64(hpa.Spec.Target.Quantity.MilliValue()) - desiredReplicas := int(math.Ceil(usageRatio * float64(currentReplicas))) - - if desiredReplicas < hpa.Spec.MinReplicas { - desiredReplicas = hpa.Spec.MinReplicas + if hpa.Spec.MinReplicas != nil && desiredReplicas < *hpa.Spec.MinReplicas { + desiredReplicas = *hpa.Spec.MinReplicas } - // TODO: remove when pod ideling is done. + // TODO: remove when pod idling is done. if desiredReplicas == 0 { desiredReplicas = 1 } @@ -108,17 +127,17 @@ func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodA if desiredReplicas != currentReplicas { // Going down only if the usageRatio dropped significantly below the target // and there was no rescaling in the last downscaleForbiddenWindow. - if desiredReplicas < currentReplicas && usageRatio < (1-tolerance) && - (hpa.Status.LastScaleTimestamp == nil || - hpa.Status.LastScaleTimestamp.Add(downscaleForbiddenWindow).Before(now)) { + if desiredReplicas < currentReplicas && + (hpa.Status.LastScaleTime == nil || + hpa.Status.LastScaleTime.Add(downscaleForbiddenWindow).Before(now)) { rescale = true } // Going up only if the usage ratio increased significantly above the target // and there was no rescaling in the last upscaleForbiddenWindow. - if desiredReplicas > currentReplicas && usageRatio > (1+tolerance) && - (hpa.Status.LastScaleTimestamp == nil || - hpa.Status.LastScaleTimestamp.Add(upscaleForbiddenWindow).Before(now)) { + if desiredReplicas > currentReplicas && + (hpa.Status.LastScaleTime == nil || + hpa.Status.LastScaleTime.Add(upscaleForbiddenWindow).Before(now)) { rescale = true } } @@ -131,20 +150,20 @@ func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodA return fmt.Errorf("failed to rescale %s: %v", reference, err) } a.eventRecorder.Eventf(&hpa, "SuccessfulRescale", "New size: %d", desiredReplicas) - glog.Infof("Successfull rescale of %s, old size: %d, new size: %d, usage ratio: %f", - hpa.Name, currentReplicas, desiredReplicas, usageRatio) + glog.Infof("Successfull rescale of %s, old size: %d, new size: %d", + hpa.Name, currentReplicas, desiredReplicas) } else { desiredReplicas = currentReplicas } hpa.Status = extensions.HorizontalPodAutoscalerStatus{ - CurrentReplicas: currentReplicas, - DesiredReplicas: desiredReplicas, - CurrentConsumption: currentConsumption, + CurrentReplicas: currentReplicas, + DesiredReplicas: desiredReplicas, + CurrentCPUUtilizationPercentage: currentUtilization, } if rescale { now := unversioned.NewTime(now) - hpa.Status.LastScaleTimestamp = &now + hpa.Status.LastScaleTime = &now } _, err = a.client.Extensions().HorizontalPodAutoscalers(hpa.Namespace).UpdateStatus(&hpa) diff --git a/pkg/controller/podautoscaler/horizontal_test.go b/pkg/controller/podautoscaler/horizontal_test.go index 4b96a67587835..5f96de060f595 100644 --- a/pkg/controller/podautoscaler/horizontal_test.go +++ b/pkg/controller/podautoscaler/horizontal_test.go @@ -58,12 +58,13 @@ type testCase struct { maxReplicas int initialReplicas int desiredReplicas int - targetResource api.ResourceName - targetLevel resource.Quantity - reportedLevels []uint64 - scaleUpdated bool - eventCreated bool - verifyEvents bool + // CPU target utilization as a percentage of the requested resources. + CPUTarget int + reportedLevels []uint64 + reportedCPURequests []resource.Quantity + scaleUpdated bool + eventCreated bool + verifyEvents bool } func (tc *testCase) prepareTestClient(t *testing.T) *testclient.Fake { @@ -86,19 +87,21 @@ func (tc *testCase) prepareTestClient(t *testing.T) *testclient.Fake { SelfLink: "experimental/v1/namespaces/" + namespace + "/horizontalpodautoscalers/" + hpaName, }, Spec: extensions.HorizontalPodAutoscalerSpec{ - ScaleRef: &extensions.SubresourceReference{ + ScaleRef: extensions.SubresourceReference{ Kind: "replicationController", Name: rcName, Namespace: namespace, Subresource: "scale", }, - MinReplicas: tc.minReplicas, + MinReplicas: &tc.minReplicas, MaxReplicas: tc.maxReplicas, - Target: extensions.ResourceConsumption{Resource: tc.targetResource, Quantity: tc.targetLevel}, }, }, }, } + if tc.CPUTarget > 0.0 { + obj.Items[0].Spec.CPUUtilization = &extensions.CPUTargetUtilization{TargetPercentage: tc.CPUTarget} + } return true, obj, nil }) @@ -121,7 +124,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *testclient.Fake { fakeClient.AddReactor("list", "pods", func(action testclient.Action) (handled bool, ret runtime.Object, err error) { obj := &api.PodList{} - for i := 0; i < tc.initialReplicas; i++ { + for i := 0; i < len(tc.reportedCPURequests); i++ { podName := fmt.Sprintf("%s-%d", podNamePrefix, i) pod := api.Pod{ Status: api.PodStatus{ @@ -134,6 +137,17 @@ func (tc *testCase) prepareTestClient(t *testing.T) *testclient.Fake { "name": podNamePrefix, }, }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceCPU: tc.reportedCPURequests[i], + }, + }, + }, + }, + }, } obj.Items = append(obj.Items, pod) } @@ -202,160 +216,148 @@ func (tc *testCase) runTest(t *testing.T) { tc.verifyResults(t) } -func TestCPU(t *testing.T) { - tc := testCase{ - minReplicas: 1, - maxReplicas: 5, - initialReplicas: 1, - desiredReplicas: 2, - targetResource: api.ResourceCPU, - targetLevel: resource.MustParse("0.1"), - reportedLevels: []uint64{200}, - } - tc.runTest(t) -} - -func TestMemory(t *testing.T) { - tc := testCase{ - minReplicas: 1, - maxReplicas: 5, - initialReplicas: 1, - desiredReplicas: 2, - targetResource: api.ResourceMemory, - targetLevel: resource.MustParse("1k"), - reportedLevels: []uint64{2000}, - } - tc.runTest(t) -} - func TestScaleUp(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 3, - desiredReplicas: 5, - targetResource: api.ResourceMemory, - targetLevel: resource.MustParse("3k"), - reportedLevels: []uint64{3000, 5000, 7000}, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 3, + desiredReplicas: 5, + CPUTarget: 30, + reportedLevels: []uint64{300, 500, 700}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, } tc.runTest(t) } func TestScaleDown(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 5, - desiredReplicas: 3, - targetResource: api.ResourceCPU, - targetLevel: resource.MustParse("0.5"), - reportedLevels: []uint64{100, 300, 500, 250, 250}, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 5, + desiredReplicas: 3, + CPUTarget: 50, + reportedLevels: []uint64{100, 300, 500, 250, 250}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, } tc.runTest(t) } func TestTolerance(t *testing.T) { tc := testCase{ - minReplicas: 1, - maxReplicas: 5, - initialReplicas: 3, - desiredReplicas: 3, - targetResource: api.ResourceMemory, - targetLevel: resource.MustParse("1k"), - reportedLevels: []uint64{1010, 1030, 1020}, + minReplicas: 1, + maxReplicas: 5, + initialReplicas: 3, + desiredReplicas: 3, + CPUTarget: 100, + reportedLevels: []uint64{1010, 1030, 1020}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")}, } tc.runTest(t) } func TestMinReplicas(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 5, - initialReplicas: 3, - desiredReplicas: 2, - targetResource: api.ResourceMemory, - targetLevel: resource.MustParse("1k"), - reportedLevels: []uint64{10, 95, 10}, + minReplicas: 2, + maxReplicas: 5, + initialReplicas: 3, + desiredReplicas: 2, + CPUTarget: 90, + reportedLevels: []uint64{10, 95, 10}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")}, } tc.runTest(t) } func TestMaxReplicas(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 5, - initialReplicas: 3, - desiredReplicas: 5, - targetResource: api.ResourceMemory, - targetLevel: resource.MustParse("1k"), - reportedLevels: []uint64{8000, 9500, 1000}, + minReplicas: 2, + maxReplicas: 5, + initialReplicas: 3, + desiredReplicas: 5, + CPUTarget: 90, + reportedLevels: []uint64{8000, 9500, 1000}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")}, } tc.runTest(t) } func TestSuperfluousMetrics(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 4, - desiredReplicas: 4, - targetResource: api.ResourceMemory, - targetLevel: resource.MustParse("1k"), - reportedLevels: []uint64{4000, 9500, 3000, 7000, 3200, 2000}, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 4, + desiredReplicas: 4, + CPUTarget: 100, + reportedLevels: []uint64{4000, 9500, 3000, 7000, 3200, 2000}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, } tc.runTest(t) } func TestMissingMetrics(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 4, - desiredReplicas: 4, - targetResource: api.ResourceMemory, - targetLevel: resource.MustParse("1k"), - reportedLevels: []uint64{400, 95}, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 4, + desiredReplicas: 4, + CPUTarget: 100, + reportedLevels: []uint64{400, 95}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, } tc.runTest(t) } func TestEmptyMetrics(t *testing.T) { tc := testCase{ - minReplicas: 2, - maxReplicas: 6, - initialReplicas: 4, - desiredReplicas: 4, - targetResource: api.ResourceMemory, - targetLevel: resource.MustParse("1k"), - reportedLevels: []uint64{}, + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 4, + desiredReplicas: 4, + CPUTarget: 100, + reportedLevels: []uint64{}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, } tc.runTest(t) } -func TestEventCreated(t *testing.T) { +func TestEmptyCPURequest(t *testing.T) { tc := testCase{ minReplicas: 1, maxReplicas: 5, initialReplicas: 1, - desiredReplicas: 2, - targetResource: api.ResourceCPU, - targetLevel: resource.MustParse("0.1"), + desiredReplicas: 1, + CPUTarget: 100, reportedLevels: []uint64{200}, - verifyEvents: true, + } + tc.runTest(t) +} + +func TestEventCreated(t *testing.T) { + tc := testCase{ + minReplicas: 1, + maxReplicas: 5, + initialReplicas: 1, + desiredReplicas: 2, + CPUTarget: 50, + reportedLevels: []uint64{200}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.2")}, + verifyEvents: true, } tc.runTest(t) } func TestEventNotCreated(t *testing.T) { tc := testCase{ - minReplicas: 1, - maxReplicas: 5, - initialReplicas: 2, - desiredReplicas: 2, - targetResource: api.ResourceCPU, - targetLevel: resource.MustParse("0.2"), - reportedLevels: []uint64{200, 200}, - verifyEvents: true, + minReplicas: 1, + maxReplicas: 5, + initialReplicas: 2, + desiredReplicas: 2, + CPUTarget: 50, + reportedLevels: []uint64{200, 200}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.4"), resource.MustParse("0.4")}, + verifyEvents: true, } tc.runTest(t) } + +// TODO: add more tests diff --git a/pkg/controller/podautoscaler/metrics/metrics_client.go b/pkg/controller/podautoscaler/metrics/metrics_client.go index 68497f2b7681e..13bb79c142c80 100644 --- a/pkg/controller/podautoscaler/metrics/metrics_client.go +++ b/pkg/controller/podautoscaler/metrics/metrics_client.go @@ -25,7 +25,6 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/apis/extensions" client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" @@ -40,85 +39,110 @@ const ( var heapsterQueryStart = -5 * time.Minute -// An interface for getting metrics for pods. +// MetricsClient is an interface for getting metrics for pods. type MetricsClient interface { - ResourceConsumption(namespace string) ResourceConsumptionClient + // GetCPUUtilization returns average utilization over all pods + // represented as a percent of requested CPU, e.g. 70 means that + // an average pod uses 70% of the requested CPU. + GetCPUUtilization(namespace string, selector map[string]string) (*int, error) } -type ResourceConsumptionClient interface { - // Gets average resource consumption for pods under the given selector. - Get(resourceName api.ResourceName, selector map[string]string) (*extensions.ResourceConsumption, error) +// ResourceConsumption specifies consumption of a particular resource. +type ResourceConsumption struct { + Resource api.ResourceName + Quantity resource.Quantity } // Aggregates results into ResourceConsumption. Also returns number of // pods included in the aggregation. -type metricAggregator func(heapster.MetricResultList) (extensions.ResourceConsumption, int) +type metricAggregator func(heapster.MetricResultList) (ResourceConsumption, int) type metricDefinition struct { name string aggregator metricAggregator } -// Heapster-based implementation of MetricsClient +// HeapsterMetricsClient is Heapster-based implementation of MetricsClient type HeapsterMetricsClient struct { - client client.Interface -} - -type HeapsterResourceConsumptionClient struct { - namespace string client client.Interface resourceDefinitions map[api.ResourceName]metricDefinition } -func NewHeapsterMetricsClient(client client.Interface) *HeapsterMetricsClient { - return &HeapsterMetricsClient{client: client} -} - var heapsterMetricDefinitions = map[api.ResourceName]metricDefinition{ api.ResourceCPU: {"cpu-usage", - func(metrics heapster.MetricResultList) (extensions.ResourceConsumption, int) { + func(metrics heapster.MetricResultList) (ResourceConsumption, int) { sum, count := calculateSumFromLatestSample(metrics) value := "0" if count > 0 { // assumes that cpu usage is in millis value = fmt.Sprintf("%dm", sum/uint64(count)) } - return extensions.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse(value)}, count + return ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse(value)}, count }}, api.ResourceMemory: {"memory-usage", - func(metrics heapster.MetricResultList) (extensions.ResourceConsumption, int) { + func(metrics heapster.MetricResultList) (ResourceConsumption, int) { sum, count := calculateSumFromLatestSample(metrics) value := int64(0) if count > 0 { value = int64(sum) / int64(count) } - return extensions.ResourceConsumption{Resource: api.ResourceMemory, Quantity: *resource.NewQuantity(value, resource.DecimalSI)}, count + return ResourceConsumption{Resource: api.ResourceMemory, Quantity: *resource.NewQuantity(value, resource.DecimalSI)}, count }}, } -func (h *HeapsterMetricsClient) ResourceConsumption(namespace string) ResourceConsumptionClient { - return &HeapsterResourceConsumptionClient{ - namespace: namespace, - client: h.client, +// NewHeapsterMetricsClient returns a new instance of Heapster-based implementation of MetricsClient interface. +func NewHeapsterMetricsClient(client client.Interface) *HeapsterMetricsClient { + return &HeapsterMetricsClient{ + client: client, resourceDefinitions: heapsterMetricDefinitions, } } -func (h *HeapsterResourceConsumptionClient) Get(resourceName api.ResourceName, selector map[string]string) (*extensions.ResourceConsumption, error) { - podList, err := h.client.Pods(h.namespace). +func (h *HeapsterMetricsClient) GetCPUUtilization(namespace string, selector map[string]string) (*int, error) { + consumption, request, err := h.GetResourceConsumptionAndRequest(api.ResourceCPU, namespace, selector) + if err != nil { + return nil, fmt.Errorf("failed to get CPU consumption and request: %v", err) + } + utilization := new(int) + *utilization = int(float64(consumption.Quantity.MilliValue()) / float64(request.MilliValue()) * 100) + return utilization, nil +} + +func (h *HeapsterMetricsClient) GetResourceConsumptionAndRequest(resourceName api.ResourceName, namespace string, selector map[string]string) (consumption *ResourceConsumption, request *resource.Quantity, err error) { + podList, err := h.client.Pods(namespace). List(labels.SelectorFromSet(labels.Set(selector)), fields.Everything()) if err != nil { - return nil, fmt.Errorf("failed to get pod list: %v", err) + return nil, nil, fmt.Errorf("failed to get pod list: %v", err) } podNames := []string{} + sum := resource.MustParse("0") + missing := false for _, pod := range podList.Items { podNames = append(podNames, pod.Name) + for _, container := range pod.Spec.Containers { + containerRequest := container.Resources.Requests[resourceName] + if containerRequest.Amount != nil { + sum.Add(containerRequest) + } else { + missing = true + } + } + } + if missing || sum.Cmp(resource.MustParse("0")) == 0 { + return nil, nil, fmt.Errorf("some pods do not have request for %s", resourceName) + } + glog.Infof("Sum of %s requested: %v", resourceName, sum) + avg := resource.MustParse(fmt.Sprintf("%dm", sum.MilliValue()/int64(len(podList.Items)))) + request = &avg + consumption, err = h.getForPods(resourceName, namespace, podNames) + if err != nil { + return nil, nil, err } - return h.getForPods(resourceName, podNames) + return consumption, request, nil } -func (h *HeapsterResourceConsumptionClient) getForPods(resourceName api.ResourceName, podNames []string) (*extensions.ResourceConsumption, error) { +func (h *HeapsterMetricsClient) getForPods(resourceName api.ResourceName, namespace string, podNames []string) (*ResourceConsumption, error) { metricSpec, metricDefined := h.resourceDefinitions[resourceName] if !metricDefined { return nil, fmt.Errorf("heapster metric not defined for %v", resourceName) @@ -127,7 +151,7 @@ func (h *HeapsterResourceConsumptionClient) getForPods(resourceName api.Resource startTime := now.Add(heapsterQueryStart) metricPath := fmt.Sprintf("/api/v1/model/namespaces/%s/pod-list/%s/metrics/%s", - h.namespace, + namespace, strings.Join(podNames, ","), metricSpec.name) diff --git a/pkg/controller/podautoscaler/metrics/metrics_client_test.go b/pkg/controller/podautoscaler/metrics/metrics_client_test.go index 86dd1d020466e..c1b4f994efbcf 100644 --- a/pkg/controller/podautoscaler/metrics/metrics_client_test.go +++ b/pkg/controller/podautoscaler/metrics/metrics_client_test.go @@ -25,7 +25,7 @@ import ( "k8s.io/kubernetes/pkg/api" _ "k8s.io/kubernetes/pkg/api/latest" - "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/api/resource" client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/client/unversioned/testclient" "k8s.io/kubernetes/pkg/runtime" @@ -81,14 +81,25 @@ func (tc *testCase) prepareTestClient(t *testing.T) *testclient.Fake { for i := 0; i < tc.replicas; i++ { podName := fmt.Sprintf("%s-%d", podNamePrefix, i) pod := api.Pod{ - Status: api.PodStatus{ - Phase: api.PodRunning, - }, ObjectMeta: api.ObjectMeta{ Name: podName, Namespace: namespace, Labels: selector, }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + tc.targetResource: resource.MustParse("10"), + }, + }, + }, + }, + }, + Status: api.PodStatus{ + Phase: api.PodRunning, + }, } obj.Items = append(obj.Items, pod) } @@ -122,7 +133,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *testclient.Fake { return fakeClient } -func (tc *testCase) verifyResults(t *testing.T, val *extensions.ResourceConsumption, err error) { +func (tc *testCase) verifyResults(t *testing.T, val *ResourceConsumption, err error) { assert.Equal(t, tc.desiredError, err) if tc.desiredError != nil { return @@ -138,7 +149,7 @@ func (tc *testCase) verifyResults(t *testing.T, val *extensions.ResourceConsumpt func (tc *testCase) runTest(t *testing.T) { testClient := tc.prepareTestClient(t) metricsClient := NewHeapsterMetricsClient(testClient) - val, err := metricsClient.ResourceConsumption(tc.namespace).Get(tc.targetResource, tc.selector) + val, _, err := metricsClient.GetResourceConsumptionAndRequest(tc.targetResource, tc.namespace, tc.selector) tc.verifyResults(t, val, err) } @@ -331,7 +342,7 @@ func TestCPUZeroReplicas(t *testing.T) { tc := testCase{ replicas: 0, targetResource: api.ResourceCPU, - desiredValue: 0, + desiredError: fmt.Errorf("some pods do not have request for cpu"), reportedMetricsPoints: [][]metricPoint{}, } tc.runTest(t) @@ -341,7 +352,7 @@ func TestMemoryZeroReplicas(t *testing.T) { tc := testCase{ replicas: 0, targetResource: api.ResourceMemory, - desiredValue: 0, + desiredError: fmt.Errorf("some pods do not have request for memory"), reportedMetricsPoints: [][]metricPoint{}, } tc.runTest(t) @@ -366,3 +377,5 @@ func TestMemoryEmptyMetricsForOnePod(t *testing.T) { } tc.runTest(t) } + +// TODO: add proper tests for request diff --git a/pkg/kubectl/describe.go b/pkg/kubectl/describe.go index 462d62c9711f7..a22edf865cd11 100644 --- a/pkg/kubectl/describe.go +++ b/pkg/kubectl/describe.go @@ -1250,17 +1250,14 @@ func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string) (str hpa.Spec.ScaleRef.Namespace, hpa.Spec.ScaleRef.Name, hpa.Spec.ScaleRef.Subresource) - fmt.Fprintf(out, "Target resource consumption:\t%s %s\n", - hpa.Spec.Target.Quantity.String(), - hpa.Spec.Target.Resource) - fmt.Fprintf(out, "Current resource consumption:\t") - - if hpa.Status.CurrentConsumption != nil { - fmt.Fprintf(out, "%s %s\n", - hpa.Status.CurrentConsumption.Quantity.String(), - hpa.Status.CurrentConsumption.Resource) - } else { - fmt.Fprintf(out, "\n") + if hpa.Spec.CPUUtilization != nil { + fmt.Fprintf(out, "Target CPU utilization:\t%d%%\n", hpa.Spec.CPUUtilization.TargetPercentage) + fmt.Fprintf(out, "Current CPU utilization:\t") + if hpa.Status.CurrentCPUUtilizationPercentage != nil { + fmt.Fprintf(out, "%d%%\n", *hpa.Status.CurrentCPUUtilizationPercentage) + } else { + fmt.Fprintf(out, "\n") + } } fmt.Fprintf(out, "Min pods:\t%d\n", hpa.Spec.MinReplicas) fmt.Fprintf(out, "Max pods:\t%d\n", hpa.Spec.MaxReplicas) diff --git a/pkg/kubectl/resource_printer.go b/pkg/kubectl/resource_printer.go index e7f83f45e0be9..6c3a4436118fc 100644 --- a/pkg/kubectl/resource_printer.go +++ b/pkg/kubectl/resource_printer.go @@ -1388,11 +1388,13 @@ func printHorizontalPodAutoscaler(hpa *extensions.HorizontalPodAutoscaler, w io. hpa.Spec.ScaleRef.Namespace, hpa.Spec.ScaleRef.Name, hpa.Spec.ScaleRef.Subresource) - target := fmt.Sprintf("%s %v", hpa.Spec.Target.Quantity.String(), hpa.Spec.Target.Resource) - + target := "" + if hpa.Spec.CPUUtilization != nil { + target = fmt.Sprintf("%d%%", hpa.Spec.CPUUtilization.TargetPercentage) + } current := "" - if hpa.Status.CurrentConsumption != nil { - current = fmt.Sprintf("%s %v", hpa.Status.CurrentConsumption.Quantity.String(), hpa.Status.CurrentConsumption.Resource) + if hpa.Status.CurrentCPUUtilizationPercentage != nil { + current = fmt.Sprintf("%d%%", *hpa.Status.CurrentCPUUtilizationPercentage) } minPods := hpa.Spec.MinReplicas maxPods := hpa.Spec.MaxReplicas diff --git a/pkg/registry/horizontalpodautoscaler/etcd/etcd_test.go b/pkg/registry/horizontalpodautoscaler/etcd/etcd_test.go index 027e8b500f0f9..497e27480f2cc 100644 --- a/pkg/registry/horizontalpodautoscaler/etcd/etcd_test.go +++ b/pkg/registry/horizontalpodautoscaler/etcd/etcd_test.go @@ -20,7 +20,6 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/apis/extensions" // Ensure that extensions/v1beta1 package is initialized. _ "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" @@ -44,12 +43,11 @@ func validNewHorizontalPodAutoscaler(name string) *extensions.HorizontalPodAutos Namespace: api.NamespaceDefault, }, Spec: extensions.HorizontalPodAutoscalerSpec{ - ScaleRef: &extensions.SubresourceReference{ + ScaleRef: extensions.SubresourceReference{ Subresource: "scale", }, - MinReplicas: 1, - MaxReplicas: 5, - Target: extensions.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse("0.8")}, + MaxReplicas: 5, + CPUUtilization: &extensions.CPUTargetUtilization{TargetPercentage: 70}, }, } } diff --git a/test/e2e/horizontal_pod_autoscaling.go b/test/e2e/horizontal_pod_autoscaling.go index 1eb1e8c47f7a0..d70bbf7ba894d 100644 --- a/test/e2e/horizontal_pod_autoscaling.go +++ b/test/e2e/horizontal_pod_autoscaling.go @@ -17,10 +17,7 @@ limitations under the License. package e2e import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/apis/extensions" - + "github.com/golang/glog" . "github.com/onsi/ginkgo" ) @@ -34,7 +31,7 @@ var _ = Describe("Horizontal pod autoscaling", func() { f := NewFramework("horizontal-pod-autoscaling") // CPU tests - It("[Skipped][Autoscaling Suite] should scale from 1 pod to 3 pods and from 3 to 5 (scale resource: CPU)", func() { + It("[Skipped] should scale from 1 pod to 3 pods and from 3 to 5 (scale resource: CPU)", func() { rc = NewDynamicResourceConsumer("rc", 1, 250, 0, 400, 100, f) defer rc.CleanUp() createCPUHorizontalPodAutoscaler(rc, "0.1") @@ -43,7 +40,7 @@ var _ = Describe("Horizontal pod autoscaling", func() { rc.WaitForReplicas(5) }) - It("[Skipped][Autoscaling Suite] should scale from 5 pods to 3 pods and from 3 to 1 (scale resource: CPU)", func() { + It("[Skipped] should scale from 5 pods to 3 pods and from 3 to 1 (scale resource: CPU)", func() { rc = NewDynamicResourceConsumer("rc", 5, 700, 0, 200, 100, f) defer rc.CleanUp() createCPUHorizontalPodAutoscaler(rc, "0.3") @@ -53,7 +50,7 @@ var _ = Describe("Horizontal pod autoscaling", func() { }) // Memory tests - It("[Skipped][Autoscaling Suite] should scale from 1 pod to 3 pods and from 3 to 5 (scale resource: Memory)", func() { + It("[Skipped] should scale from 1 pod to 3 pods and from 3 to 5 (scale resource: Memory)", func() { rc = NewDynamicResourceConsumer("rc", 1, 0, 2200, 100, 2500, f) defer rc.CleanUp() createMemoryHorizontalPodAutoscaler(rc, "1000") @@ -62,7 +59,7 @@ var _ = Describe("Horizontal pod autoscaling", func() { rc.WaitForReplicas(5) }) - It("[Skipped][Autoscaling Suite] should scale from 5 pods to 3 pods and from 3 to 1 (scale resource: Memory)", func() { + It("[Skipped] should scale from 5 pods to 3 pods and from 3 to 1 (scale resource: Memory)", func() { rc = NewDynamicResourceConsumer("rc", 5, 0, 2200, 100, 2500, f) defer rc.CleanUp() createMemoryHorizontalPodAutoscaler(rc, "1000") @@ -152,46 +149,12 @@ var _ = Describe("Horizontal pod autoscaling", func() { }) func createCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu string) { - hpa := &extensions.HorizontalPodAutoscaler{ - ObjectMeta: api.ObjectMeta{ - Name: rc.name, - Namespace: rc.framework.Namespace.Name, - }, - Spec: extensions.HorizontalPodAutoscalerSpec{ - ScaleRef: &extensions.SubresourceReference{ - Kind: kind, - Name: rc.name, - Namespace: rc.framework.Namespace.Name, - Subresource: subresource, - }, - MinReplicas: 1, - MaxReplicas: 5, - Target: extensions.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse(cpu)}, - }, - } - _, errHPA := rc.framework.Client.Extensions().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Create(hpa) - expectNoError(errHPA) + glog.Fatal("createCPUHorizontalPodAutoscaler not implemented!") + // TODO: reimplemente e2e tests for the new API. } // argument memory is in megabytes func createMemoryHorizontalPodAutoscaler(rc *ResourceConsumer, memory string) { - hpa := &extensions.HorizontalPodAutoscaler{ - ObjectMeta: api.ObjectMeta{ - Name: rc.name, - Namespace: rc.framework.Namespace.Name, - }, - Spec: extensions.HorizontalPodAutoscalerSpec{ - ScaleRef: &extensions.SubresourceReference{ - Kind: kind, - Name: rc.name, - Namespace: rc.framework.Namespace.Name, - Subresource: subresource, - }, - MinReplicas: 1, - MaxReplicas: 5, - Target: extensions.ResourceConsumption{Resource: api.ResourceMemory, Quantity: resource.MustParse(memory + "M")}, - }, - } - _, errHPA := rc.framework.Client.Extensions().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Create(hpa) - expectNoError(errHPA) + glog.Fatal("createMemoryHorizontalPodAutoscaler not implemented!") + // TODO: reimplemente e2e tests for the new API. }