Skip to content

Instantly share code, notes, and snippets.

@mgazza
Last active June 2, 2021 09:51
Show Gist options
  • Select an option

  • Save mgazza/f01d03464ac480a2de1fa9f6edeee1f6 to your computer and use it in GitHub Desktop.

Select an option

Save mgazza/f01d03464ac480a2de1fa9f6edeee1f6 to your computer and use it in GitHub Desktop.

Revisions

  1. mgazza revised this gist Jan 15, 2019. 1 changed file with 415 additions and 0 deletions.
    415 changes: 415 additions & 0 deletions replica_calculator.go
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,415 @@
    /*
    Copyright 2016 The Kubernetes Authors.
    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.
    You may obtain a copy of the License at
    http://www.apache.org/licenses/LICENSE-2.0
    Unless required by applicable law or agreed to in writing, software
    distributed under the License is distributed on an "AS IS" BASIS,
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License.
    */

    package podautoscaler

    import (
    "fmt"
    "math"
    "time"

    autoscaling "k8s.io/api/autoscaling/v2beta2"
    "k8s.io/api/core/v1"
    metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    "k8s.io/apimachinery/pkg/labels"
    "k8s.io/apimachinery/pkg/util/sets"
    corelisters "k8s.io/client-go/listers/core/v1"
    podutil "k8s.io/kubernetes/pkg/api/v1/pod"
    metricsclient "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
    )

    const (
    // defaultTestingTolerance is default value for calculating when to
    // scale up/scale down.
    defaultTestingTolerance = 0.1
    defaultTestingCpuInitializationPeriod = 2 * time.Minute
    defaultTestingDelayOfInitialReadinessStatus = 10 * time.Second
    )

    type ReplicaCalculator struct {
    metricsClient metricsclient.MetricsClient
    podLister corelisters.PodLister
    tolerance float64
    cpuInitializationPeriod time.Duration
    delayOfInitialReadinessStatus time.Duration
    }

    func NewReplicaCalculator(metricsClient metricsclient.MetricsClient, podLister corelisters.PodLister, tolerance float64, cpuInitializationPeriod, delayOfInitialReadinessStatus time.Duration) *ReplicaCalculator {
    return &ReplicaCalculator{
    metricsClient: metricsClient,
    podLister: podLister,
    tolerance: tolerance,
    cpuInitializationPeriod: cpuInitializationPeriod,
    delayOfInitialReadinessStatus: delayOfInitialReadinessStatus,
    }
    }

    // GetResourceReplicas calculates the desired replica count based on a target resource utilization percentage
    // of the given resource for pods matching the given selector in the given namespace, and the current replica count
    func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUtilization int32, resource v1.ResourceName, namespace string, selector labels.Selector) (replicaCount int32, utilization int32, rawUtilization int64, timestamp time.Time, err error) {
    metrics, timestamp, err := c.metricsClient.GetResourceMetric(resource, namespace, selector)
    if err != nil {
    return 0, 0, 0, time.Time{}, fmt.Errorf("unable to get metrics for resource %s: %v", resource, err)
    }
    podList, err := c.podLister.Pods(namespace).List(selector)
    if err != nil {
    return 0, 0, 0, time.Time{}, fmt.Errorf("unable to get pods while calculating replica count: %v", err)
    }

    itemsLen := len(podList)
    if itemsLen == 0 {
    return 0, 0, 0, time.Time{}, fmt.Errorf("no pods returned by selector while calculating replica count")
    }

    readyPodCount, ignoredPods, missingPods := groupPods(podList, metrics, resource, c.cpuInitializationPeriod, c.delayOfInitialReadinessStatus)
    removeMetricsForPods(metrics, ignoredPods)
    requests, err := calculatePodRequests(podList, resource)
    if err != nil {
    return 0, 0, 0, time.Time{}, err
    }

    if len(metrics) == 0 {
    return 0, 0, 0, time.Time{}, fmt.Errorf("did not receive metrics for any ready pods")
    }

    if resource == v1.ResourceMemory {
    for _,pod := range podList {
    // any pod that's not ready but running consider it to have 100% resource usage

    _, condition := podutil.GetPodCondition(&pod.Status, v1.PodReady)
    if condition == nil || pod.Status.StartTime == nil {
    continue
    }
    if condition.Status == v1.ConditionFalse && pod.Status.Phase == v1.PodRunning {
    metrics[pod.Name] = metricsclient.PodMetric{Value: requests[pod.Name]}

    }
    }
    }

    usageRatio, utilization, rawUtilization, err := metricsclient.GetResourceUtilizationRatio(metrics, requests, targetUtilization)
    if err != nil {
    return 0, 0, 0, time.Time{}, err
    }

    rebalanceIgnored := len(ignoredPods) > 0 && usageRatio > 1.0
    if !rebalanceIgnored && len(missingPods) == 0 {
    if math.Abs(1.0-usageRatio) <= c.tolerance {
    // return the current replicas if the change would be too small
    return currentReplicas, utilization, rawUtilization, timestamp, nil
    }

    // if we don't have any unready or missing pods, we can calculate the new replica count now
    return int32(math.Ceil(usageRatio * float64(readyPodCount))), utilization, rawUtilization, timestamp, nil
    }

    if len(missingPods) > 0 {
    if usageRatio < 1.0 {
    // on a scale-down, treat missing pods as using 100% of the resource request
    for podName := range missingPods {
    metrics[podName] = metricsclient.PodMetric{Value: requests[podName]}
    }
    } else if usageRatio > 1.0 {
    // on a scale-up, treat missing pods as using 0% of the resource request
    for podName := range missingPods {
    metrics[podName] = metricsclient.PodMetric{Value: 0}
    }
    }
    }

    if rebalanceIgnored {
    // on a scale-up, treat unready pods as using 0% of the resource request
    for podName := range ignoredPods {
    metrics[podName] = metricsclient.PodMetric{Value: 0}
    }
    }

    // re-run the utilization calculation with our new numbers
    newUsageRatio, _, _, err := metricsclient.GetResourceUtilizationRatio(metrics, requests, targetUtilization)
    if err != nil {
    return 0, utilization, rawUtilization, time.Time{}, err
    }

    if math.Abs(1.0-newUsageRatio) <= c.tolerance || (usageRatio < 1.0 && newUsageRatio > 1.0) || (usageRatio > 1.0 && newUsageRatio < 1.0) {
    // return the current replicas if the change would be too small,
    // or if the new usage ratio would cause a change in scale direction
    return currentReplicas, utilization, rawUtilization, timestamp, nil
    }

    // return the result, where the number of replicas considered is
    // however many replicas factored into our calculation
    return int32(math.Ceil(newUsageRatio * float64(len(metrics)))), utilization, rawUtilization, timestamp, nil
    }

    // GetRawResourceReplicas calculates the desired replica count based on a target resource utilization (as a raw milli-value)
    // for pods matching the given selector in the given namespace, and the current replica count
    func (c *ReplicaCalculator) GetRawResourceReplicas(currentReplicas int32, targetUtilization int64, resource v1.ResourceName, namespace string, selector labels.Selector) (replicaCount int32, utilization int64, timestamp time.Time, err error) {
    metrics, timestamp, err := c.metricsClient.GetResourceMetric(resource, namespace, selector)
    if err != nil {
    return 0, 0, time.Time{}, fmt.Errorf("unable to get metrics for resource %s: %v", resource, err)
    }

    replicaCount, utilization, err = c.calcPlainMetricReplicas(metrics, currentReplicas, targetUtilization, namespace, selector, resource)
    return replicaCount, utilization, timestamp, err
    }

    // GetMetricReplicas calculates the desired replica count based on a target metric utilization
    // (as a milli-value) for pods matching the given selector in the given namespace, and the
    // current replica count
    func (c *ReplicaCalculator) GetMetricReplicas(currentReplicas int32, targetUtilization int64, metricName string, namespace string, selector labels.Selector, metricSelector labels.Selector) (replicaCount int32, utilization int64, timestamp time.Time, err error) {
    metrics, timestamp, err := c.metricsClient.GetRawMetric(metricName, namespace, selector, metricSelector)
    if err != nil {
    return 0, 0, time.Time{}, fmt.Errorf("unable to get metric %s: %v", metricName, err)
    }

    replicaCount, utilization, err = c.calcPlainMetricReplicas(metrics, currentReplicas, targetUtilization, namespace, selector, v1.ResourceName(""))
    return replicaCount, utilization, timestamp, err
    }

    // calcPlainMetricReplicas calculates the desired replicas for plain (i.e. non-utilization percentage) metrics.
    func (c *ReplicaCalculator) calcPlainMetricReplicas(metrics metricsclient.PodMetricsInfo, currentReplicas int32, targetUtilization int64, namespace string, selector labels.Selector, resource v1.ResourceName) (replicaCount int32, utilization int64, err error) {

    podList, err := c.podLister.Pods(namespace).List(selector)
    if err != nil {
    return 0, 0, fmt.Errorf("unable to get pods while calculating replica count: %v", err)
    }

    if len(podList) == 0 {
    return 0, 0, fmt.Errorf("no pods returned by selector while calculating replica count")
    }

    readyPodCount, ignoredPods, missingPods := groupPods(podList, metrics, resource, c.cpuInitializationPeriod, c.delayOfInitialReadinessStatus)
    removeMetricsForPods(metrics, ignoredPods)

    if len(metrics) == 0 {
    return 0, 0, fmt.Errorf("did not receive metrics for any ready pods")
    }

    usageRatio, utilization := metricsclient.GetMetricUtilizationRatio(metrics, targetUtilization)

    rebalanceIgnored := len(ignoredPods) > 0 && usageRatio > 1.0

    if !rebalanceIgnored && len(missingPods) == 0 {
    if math.Abs(1.0-usageRatio) <= c.tolerance {
    // return the current replicas if the change would be too small
    return currentReplicas, utilization, nil
    }

    // if we don't have any unready or missing pods, we can calculate the new replica count now
    return int32(math.Ceil(usageRatio * float64(readyPodCount))), utilization, nil
    }

    if len(missingPods) > 0 {
    if usageRatio < 1.0 {
    // on a scale-down, treat missing pods as using 100% of the resource request
    for podName := range missingPods {
    metrics[podName] = metricsclient.PodMetric{Value: targetUtilization}
    }
    } else {
    // on a scale-up, treat missing pods as using 0% of the resource request
    for podName := range missingPods {
    metrics[podName] = metricsclient.PodMetric{Value: 0}
    }
    }
    }

    if rebalanceIgnored {
    // on a scale-up, treat unready pods as using 0% of the resource request
    for podName := range ignoredPods {
    metrics[podName] = metricsclient.PodMetric{Value: 0}
    }
    }

    // re-run the utilization calculation with our new numbers
    newUsageRatio, _ := metricsclient.GetMetricUtilizationRatio(metrics, targetUtilization)

    if math.Abs(1.0-newUsageRatio) <= c.tolerance || (usageRatio < 1.0 && newUsageRatio > 1.0) || (usageRatio > 1.0 && newUsageRatio < 1.0) {
    // return the current replicas if the change would be too small,
    // or if the new usage ratio would cause a change in scale direction
    return currentReplicas, utilization, nil
    }

    // return the result, where the number of replicas considered is
    // however many replicas factored into our calculation
    return int32(math.Ceil(newUsageRatio * float64(len(metrics)))), utilization, nil
    }

    // GetObjectMetricReplicas calculates the desired replica count based on a target metric utilization (as a milli-value)
    // for the given object in the given namespace, and the current replica count.
    func (c *ReplicaCalculator) GetObjectMetricReplicas(currentReplicas int32, targetUtilization int64, metricName string, namespace string, objectRef *autoscaling.CrossVersionObjectReference, selector labels.Selector, metricSelector labels.Selector) (replicaCount int32, utilization int64, timestamp time.Time, err error) {
    utilization, timestamp, err = c.metricsClient.GetObjectMetric(metricName, namespace, objectRef, metricSelector)
    if err != nil {
    return 0, 0, time.Time{}, fmt.Errorf("unable to get metric %s: %v on %s %s/%s", metricName, objectRef.Kind, namespace, objectRef.Name, err)
    }

    usageRatio := float64(utilization) / float64(targetUtilization)
    if math.Abs(1.0-usageRatio) <= c.tolerance {
    // return the current replicas if the change would be too small
    return currentReplicas, utilization, timestamp, nil
    }

    readyPodCount, err := c.getReadyPodsCount(namespace, selector)

    if err != nil {
    return 0, 0, time.Time{}, fmt.Errorf("unable to calculate ready pods: %s", err)
    }

    replicaCount = int32(math.Ceil(usageRatio * float64(readyPodCount)))

    return replicaCount, utilization, timestamp, nil
    }

    // @TODO(mattjmcnaughton) Many different functions in this module use variations
    // of this function. Make this function generic, so we don't repeat the same
    // logic in multiple places.
    func (c *ReplicaCalculator) getReadyPodsCount(namespace string, selector labels.Selector) (int64, error) {
    podList, err := c.podLister.Pods(namespace).List(selector)
    if err != nil {
    return 0, fmt.Errorf("unable to get pods while calculating replica count: %v", err)
    }

    if len(podList) == 0 {
    return 0, fmt.Errorf("no pods returned by selector while calculating replica count")
    }

    readyPodCount := 0

    for _, pod := range podList {
    if pod.Status.Phase == v1.PodRunning && podutil.IsPodReady(pod) {
    readyPodCount++
    }
    }

    return int64(readyPodCount), nil
    }

    // GetExternalMetricReplicas calculates the desired replica count based on a
    // target metric value (as a milli-value) for the external metric in the given
    // namespace, and the current replica count.
    func (c *ReplicaCalculator) GetExternalMetricReplicas(currentReplicas int32, targetUtilization int64, metricName, namespace string, metricSelector *metav1.LabelSelector, podSelector labels.Selector) (replicaCount int32, utilization int64, timestamp time.Time, err error) {
    metricLabelSelector, err := metav1.LabelSelectorAsSelector(metricSelector)
    if err != nil {
    return 0, 0, time.Time{}, err
    }
    metrics, timestamp, err := c.metricsClient.GetExternalMetric(metricName, namespace, metricLabelSelector)
    if err != nil {
    return 0, 0, time.Time{}, fmt.Errorf("unable to get external metric %s/%s/%+v: %s", namespace, metricName, metricSelector, err)
    }
    utilization = 0
    for _, val := range metrics {
    utilization = utilization + val
    }

    readyPodCount, err := c.getReadyPodsCount(namespace, podSelector)

    if err != nil {
    return 0, 0, time.Time{}, fmt.Errorf("unable to calculate ready pods: %s", err)
    }

    usageRatio := float64(utilization) / float64(targetUtilization)
    if math.Abs(1.0-usageRatio) <= c.tolerance {
    // return the current replicas if the change would be too small
    return currentReplicas, utilization, timestamp, nil
    }

    return int32(math.Ceil(usageRatio * float64(readyPodCount))), utilization, timestamp, nil
    }

    // GetExternalPerPodMetricReplicas calculates the desired replica count based on a
    // target metric value per pod (as a milli-value) for the external metric in the
    // given namespace, and the current replica count.
    func (c *ReplicaCalculator) GetExternalPerPodMetricReplicas(currentReplicas int32, targetUtilizationPerPod int64, metricName, namespace string, metricSelector *metav1.LabelSelector) (replicaCount int32, utilization int64, timestamp time.Time, err error) {
    metricLabelSelector, err := metav1.LabelSelectorAsSelector(metricSelector)
    if err != nil {
    return 0, 0, time.Time{}, err
    }
    metrics, timestamp, err := c.metricsClient.GetExternalMetric(metricName, namespace, metricLabelSelector)
    if err != nil {
    return 0, 0, time.Time{}, fmt.Errorf("unable to get external metric %s/%s/%+v: %s", namespace, metricName, metricSelector, err)
    }
    utilization = 0
    for _, val := range metrics {
    utilization = utilization + val
    }

    replicaCount = currentReplicas
    usageRatio := float64(utilization) / (float64(targetUtilizationPerPod) * float64(replicaCount))
    if math.Abs(1.0-usageRatio) > c.tolerance {
    // update number of replicas if the change is large enough
    replicaCount = int32(math.Ceil(float64(utilization) / float64(targetUtilizationPerPod)))
    }
    utilization = int64(math.Ceil(float64(utilization) / float64(currentReplicas)))
    return replicaCount, utilization, timestamp, nil
    }

    func groupPods(pods []*v1.Pod, metrics metricsclient.PodMetricsInfo, resource v1.ResourceName, cpuInitializationPeriod, delayOfInitialReadinessStatus time.Duration) (readyPodCount int, ignoredPods sets.String, missingPods sets.String) {
    missingPods = sets.NewString()
    ignoredPods = sets.NewString()
    for _, pod := range pods {
    if pod.DeletionTimestamp != nil || pod.Status.Phase == v1.PodFailed {
    continue
    }
    metric, found := metrics[pod.Name]
    if !found {
    missingPods.Insert(pod.Name)
    continue
    }
    if resource == v1.ResourceCPU {
    var ignorePod bool
    _, condition := podutil.GetPodCondition(&pod.Status, v1.PodReady)
    if condition == nil || pod.Status.StartTime == nil {
    ignorePod = true
    } else {
    // Pod still within possible initialisation period.
    if pod.Status.StartTime.Add(cpuInitializationPeriod).After(time.Now()) {
    // Ignore sample if pod is unready or one window of metric wasn't collected since last state transition.
    ignorePod = condition.Status == v1.ConditionFalse || metric.Timestamp.Before(condition.LastTransitionTime.Time.Add(metric.Window))
    } else {
    // Ignore metric if pod is unready and it has never been ready.
    ignorePod = condition.Status == v1.ConditionFalse && pod.Status.StartTime.Add(delayOfInitialReadinessStatus).After(condition.LastTransitionTime.Time)
    }
    }
    if ignorePod {
    ignoredPods.Insert(pod.Name)
    continue
    }
    }
    readyPodCount++
    }
    return
    }

    func calculatePodRequests(pods []*v1.Pod, resource v1.ResourceName) (map[string]int64, error) {
    requests := make(map[string]int64, len(pods))
    for _, pod := range pods {
    podSum := int64(0)
    for _, container := range pod.Spec.Containers {
    if containerRequest, ok := container.Resources.Requests[resource]; ok {
    podSum += containerRequest.MilliValue()
    } else {
    return nil, fmt.Errorf("missing request for %s", resource)
    }
    }
    requests[pod.Name] = podSum
    }
    return requests, nil
    }

    func removeMetricsForPods(metrics metricsclient.PodMetricsInfo, pods sets.String) {
    for _, pod := range pods.UnsortedList() {
    delete(metrics, pod)
    }
    }
  2. mgazza revised this gist Jan 15, 2019. 1 changed file with 30 additions and 30 deletions.
    60 changes: 30 additions & 30 deletions horizontal_test.go
    Original file line number Diff line number Diff line change
    @@ -96,20 +96,20 @@ type testCase struct {
    initialReplicas int32

    // Memory target utilization as a percentage of the requested resources.
    MemoryTarget int32
    reportedMemoryLevels []uint64
    reportedMemoryRequests []resource.Quantity
    reportedPodReadiness []v1.ConditionStatus
    reportedPodStartTime []metav1.Time
    reportedPodPhase []v1.PodPhase
    reportedPodDeletionTimestamp []bool
    reportedPodMemoryRequests []resource.Quantity
    scaleUpdated bool
    statusUpdated bool
    eventCreated bool
    verifyEvents bool
    useMetricsAPI bool
    metricsTarget []autoscalingv2.MetricSpec
    MemoryTargetUtilisationPercent int32
    reportedMemoryPercentageLevels []uint64
    reportedMemoryRequests []resource.Quantity
    reportedPodReadiness []v1.ConditionStatus
    reportedPodStartTime []metav1.Time
    reportedPodPhase []v1.PodPhase
    reportedPodDeletionTimestamp []bool
    reportedPodMemoryRequests []resource.Quantity
    scaleUpdated bool
    statusUpdated bool
    eventCreated bool
    verifyEvents bool
    useMetricsAPI bool
    metricsTarget []autoscalingv2.MetricSpec
    expectedDesiredReplicas int32
    expectedConditions []autoscalingv1.HorizontalPodAutoscalerCondition
    // Channel with names of HPA objects which we have reconciled.
    @@ -190,14 +190,14 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
    },
    }

    if tc.MemoryTarget > 0 {
    if tc.MemoryTargetUtilisationPercent > 0 {
    obj.Items[0].Spec.Metrics = []autoscalingv2.MetricSpec{
    {
    Type: autoscalingv2.ResourceMetricSourceType,
    Resource: &autoscalingv2.ResourceMetricSource{
    Name: v1.ResourceMemory,
    Target: autoscalingv2.MetricTarget{
    AverageUtilization: &tc.MemoryTarget,
    AverageUtilization: &tc.MemoryTargetUtilisationPercent,
    },
    },
    },
    @@ -446,7 +446,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
    defer tc.Unlock()

    metrics := &metricsapi.PodMetricsList{}
    for i, memMiB := range tc.reportedMemoryLevels {
    for i, memMiB := range tc.reportedMemoryPercentageLevels {
    // NB: the list reactor actually does label selector filtering for us,
    // so we have to make sure our results match the label selector
    podMetric := metricsapi.PodMetrics{
    @@ -494,7 +494,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
    assert.Equal(t, "pods", getForAction.GetResource().Resource, "the type of object that we requested multiple metrics for should have been pods")
    assert.Equal(t, "qps", getForAction.GetMetricName(), "the metric name requested should have been qps, as specified in the metric spec")

    for i, level := range tc.reportedMemoryLevels {
    for i, level := range tc.reportedMemoryPercentageLevels {
    podMetric := cmapi.MetricValue{
    DescribedObject: v1.ObjectReference{
    Kind: "Pod",
    @@ -546,7 +546,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
    Metric: cmapi.MetricIdentifier{
    Name: "qps",
    },
    Value: *resource.NewMilliQuantity(int64(tc.reportedMemoryLevels[0]), resource.DecimalSI),
    Value: *resource.NewMilliQuantity(int64(tc.reportedMemoryPercentageLevels[0]), resource.DecimalSI),
    },
    }

    @@ -568,7 +568,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa

    assert.Equal(t, "qps", listAction.GetResource().Resource, "the metric name requested should have been qps, as specified in the metric spec")

    for _, level := range tc.reportedMemoryLevels {
    for _, level := range tc.reportedMemoryPercentageLevels {
    metric := emapi.ExternalMetricValue{
    Timestamp: metav1.Time{Time: time.Now()},
    MetricName: "qps",
    @@ -631,7 +631,7 @@ func (tc *testCase) setupController(t *testing.T) (*HorizontalController, inform
    assert.Equal(t, fmt.Sprintf(
    "Computed the desired num of replicas: %d (avgMemoryutil: %d, current replicas: %d)",
    tc.expectedDesiredReplicas,
    (int64(tc.reportedMemoryLevels[0])*100)/tc.reportedMemoryRequests[0].MilliValue(), tc.initialReplicas), obj.Message)
    (int64(tc.reportedMemoryPercentageLevels[0])*100)/tc.reportedMemoryRequests[0].MilliValue(), tc.initialReplicas), obj.Message)
    default:
    assert.False(t, true, fmt.Sprintf("Unexpected event: %s / %s", obj.Reason, obj.Message))
    }
    @@ -699,15 +699,15 @@ func (tc *testCase) runTest(t *testing.T) {

    func TestScaleUpUnreadyMoreScale(t *testing.T) {
    tc := testCase{
    minReplicas: 2,
    maxReplicas: 6,
    initialReplicas: 3,
    expectedDesiredReplicas: 4,
    MemoryTarget: 300,
    reportedMemoryLevels: []uint64{300, 300, 300},
    reportedMemoryRequests: []resource.Quantity{resource.MustParse("300Mi"), resource.MustParse("300Mi"), resource.MustParse("300Mi")},
    reportedPodReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionFalse, v1.ConditionFalse},
    useMetricsAPI: true,
    minReplicas: 2,
    maxReplicas: 6,
    initialReplicas: 3,
    expectedDesiredReplicas: 4,
    MemoryTargetUtilisationPercent: 90,
    reportedMemoryPercentageLevels: []uint64{95, 95, 95},
    reportedMemoryRequests: []resource.Quantity{resource.MustParse("100Mi"), resource.MustParse("100Mi"), resource.MustParse("100Mi")},
    reportedPodReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionFalse, v1.ConditionFalse},
    useMetricsAPI: true,
    }
    tc.runTest(t)
    }
  3. mgazza created this gist Jan 15, 2019.
    713 changes: 713 additions & 0 deletions horizontal_test.go
    Original file line number Diff line number Diff line change
    @@ -0,0 +1,713 @@
    /*
    Copyright 2015 The Kubernetes Authors.
    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.
    You may obtain a copy of the License at
    http://www.apache.org/licenses/LICENSE-2.0
    Unless required by applicable law or agreed to in writing, software
    distributed under the License is distributed on an "AS IS" BASIS,
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License.
    */

    package podautoscaler

    import (
    "encoding/json"
    "fmt"
    "sync"
    "testing"
    "time"

    autoscalingv1 "k8s.io/api/autoscaling/v1"
    autoscalingv2 "k8s.io/api/autoscaling/v2beta2"
    "k8s.io/api/core/v1"
    "k8s.io/apimachinery/pkg/api/meta/testrestmapper"
    "k8s.io/apimachinery/pkg/api/resource"
    metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    "k8s.io/apimachinery/pkg/labels"
    "k8s.io/apimachinery/pkg/runtime"
    "k8s.io/apimachinery/pkg/runtime/schema"
    "k8s.io/apimachinery/pkg/watch"
    "k8s.io/client-go/informers"
    "k8s.io/client-go/kubernetes/fake"
    scalefake "k8s.io/client-go/scale/fake"
    core "k8s.io/client-go/testing"
    "k8s.io/kubernetes/pkg/api/legacyscheme"
    "k8s.io/kubernetes/pkg/apis/autoscaling"
    "k8s.io/kubernetes/pkg/controller"
    "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
    cmapi "k8s.io/metrics/pkg/apis/custom_metrics/v1beta2"
    emapi "k8s.io/metrics/pkg/apis/external_metrics/v1beta1"
    metricsapi "k8s.io/metrics/pkg/apis/metrics/v1beta1"
    metricsfake "k8s.io/metrics/pkg/client/clientset/versioned/fake"
    cmfake "k8s.io/metrics/pkg/client/custom_metrics/fake"
    emfake "k8s.io/metrics/pkg/client/external_metrics/fake"

    "github.com/stretchr/testify/assert"

    _ "k8s.io/kubernetes/pkg/apis/apps/install"
    _ "k8s.io/kubernetes/pkg/apis/autoscaling/install"
    )

    var statusOk = []autoscalingv2.HorizontalPodAutoscalerCondition{
    {Type: autoscalingv2.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededRescale"},
    {Type: autoscalingv2.ScalingActive, Status: v1.ConditionTrue, Reason: "ValidMetricFound"},
    {Type: autoscalingv2.ScalingLimited, Status: v1.ConditionFalse, Reason: "DesiredWithinRange"},
    }

    // statusOkWithOverrides returns the "ok" status with the given conditions as overridden
    func statusOkWithOverrides(overrides ...autoscalingv2.HorizontalPodAutoscalerCondition) []autoscalingv1.HorizontalPodAutoscalerCondition {
    resv2 := make([]autoscalingv2.HorizontalPodAutoscalerCondition, len(statusOk))
    copy(resv2, statusOk)
    for _, override := range overrides {
    resv2 = setConditionInList(resv2, override.Type, override.Status, override.Reason, override.Message)
    }

    // copy to a v1 slice
    resv1 := make([]autoscalingv1.HorizontalPodAutoscalerCondition, len(resv2))
    for i, cond := range resv2 {
    resv1[i] = autoscalingv1.HorizontalPodAutoscalerCondition{
    Type: autoscalingv1.HorizontalPodAutoscalerConditionType(cond.Type),
    Status: cond.Status,
    Reason: cond.Reason,
    }
    }

    return resv1
    }

    func alwaysReady() bool { return true }

    type fakeResource struct {
    name string
    apiVersion string
    kind string
    }

    type testCase struct {
    sync.Mutex
    minReplicas int32
    maxReplicas int32
    initialReplicas int32

    // Memory target utilization as a percentage of the requested resources.
    MemoryTarget int32
    reportedMemoryLevels []uint64
    reportedMemoryRequests []resource.Quantity
    reportedPodReadiness []v1.ConditionStatus
    reportedPodStartTime []metav1.Time
    reportedPodPhase []v1.PodPhase
    reportedPodDeletionTimestamp []bool
    reportedPodMemoryRequests []resource.Quantity
    scaleUpdated bool
    statusUpdated bool
    eventCreated bool
    verifyEvents bool
    useMetricsAPI bool
    metricsTarget []autoscalingv2.MetricSpec
    expectedDesiredReplicas int32
    expectedConditions []autoscalingv1.HorizontalPodAutoscalerCondition
    // Channel with names of HPA objects which we have reconciled.
    processed chan string

    // Target resource information.
    resource *fakeResource

    // Last scale time
    lastScaleTime *metav1.Time

    // override the test clients
    testClient *fake.Clientset
    testMetricsClient *metricsfake.Clientset
    testCMClient *cmfake.FakeCustomMetricsClient
    testEMClient *emfake.FakeExternalMetricsClient
    testScaleClient *scalefake.FakeScaleClient

    recommendations []timestampedRecommendation
    }

    func init() {
    // set this high so we don't accidentally run into it when testing
    scaleUpLimitFactor = 8
    }

    func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfake.Clientset, *cmfake.FakeCustomMetricsClient, *emfake.FakeExternalMetricsClient, *scalefake.FakeScaleClient) {
    namespace := "test-namespace"
    hpaName := "test-hpa"
    podNamePrefix := "test-pod"
    labelSet := map[string]string{"name": podNamePrefix}
    selector := labels.SelectorFromSet(labelSet).String()

    tc.Lock()

    tc.scaleUpdated = false
    tc.statusUpdated = false
    tc.eventCreated = false
    tc.processed = make(chan string, 100)

    if tc.resource == nil {
    tc.resource = &fakeResource{
    name: "test-rc",
    apiVersion: "v1",
    kind: "ReplicationController",
    }
    }
    tc.Unlock()

    fakeClient := &fake.Clientset{}
    fakeClient.AddReactor("list", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
    tc.Lock()
    defer tc.Unlock()

    obj := &autoscalingv2.HorizontalPodAutoscalerList{
    Items: []autoscalingv2.HorizontalPodAutoscaler{
    {
    ObjectMeta: metav1.ObjectMeta{
    Name: hpaName,
    Namespace: namespace,
    SelfLink: "experimental/v1/namespaces/" + namespace + "/horizontalpodautoscalers/" + hpaName,
    },
    Spec: autoscalingv2.HorizontalPodAutoscalerSpec{
    ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{
    Kind: tc.resource.kind,
    Name: tc.resource.name,
    APIVersion: tc.resource.apiVersion,
    },
    MinReplicas: &tc.minReplicas,
    MaxReplicas: tc.maxReplicas,
    },
    Status: autoscalingv2.HorizontalPodAutoscalerStatus{
    CurrentReplicas: tc.initialReplicas,
    DesiredReplicas: tc.initialReplicas,
    LastScaleTime: tc.lastScaleTime,
    },
    },
    },
    }

    if tc.MemoryTarget > 0 {
    obj.Items[0].Spec.Metrics = []autoscalingv2.MetricSpec{
    {
    Type: autoscalingv2.ResourceMetricSourceType,
    Resource: &autoscalingv2.ResourceMetricSource{
    Name: v1.ResourceMemory,
    Target: autoscalingv2.MetricTarget{
    AverageUtilization: &tc.MemoryTarget,
    },
    },
    },
    }
    }
    if len(tc.metricsTarget) > 0 {
    obj.Items[0].Spec.Metrics = append(obj.Items[0].Spec.Metrics, tc.metricsTarget...)
    }

    if len(obj.Items[0].Spec.Metrics) == 0 {
    // manually add in the defaulting logic
    obj.Items[0].Spec.Metrics = []autoscalingv2.MetricSpec{
    {
    Type: autoscalingv2.ResourceMetricSourceType,
    Resource: &autoscalingv2.ResourceMetricSource{
    Name: v1.ResourceMemory,
    },
    },
    }
    }

    // and... convert to autoscaling v1 to return the right type
    objv1, err := unsafeConvertToVersionVia(obj, autoscalingv1.SchemeGroupVersion)
    if err != nil {
    return true, nil, err
    }

    return true, objv1, nil
    })

    fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
    tc.Lock()
    defer tc.Unlock()

    obj := &v1.PodList{}

    specifiedMemoryRequests := tc.reportedMemoryRequests != nil

    numPodsToCreate := int(tc.initialReplicas)
    if specifiedMemoryRequests {
    numPodsToCreate = len(tc.reportedMemoryRequests)
    }

    for i := 0; i < numPodsToCreate; i++ {
    podReadiness := v1.ConditionTrue
    if tc.reportedPodReadiness != nil {
    podReadiness = tc.reportedPodReadiness[i]
    }
    var podStartTime metav1.Time
    if tc.reportedPodStartTime != nil {
    podStartTime = tc.reportedPodStartTime[i]
    }

    podPhase := v1.PodRunning
    if tc.reportedPodPhase != nil {
    podPhase = tc.reportedPodPhase[i]
    }

    podDeletionTimestamp := false
    if tc.reportedPodDeletionTimestamp != nil {
    podDeletionTimestamp = tc.reportedPodDeletionTimestamp[i]
    }

    podName := fmt.Sprintf("%s-%d", podNamePrefix, i)

    reportedMemoryRequest := resource.MustParse("100Mi")
    if specifiedMemoryRequests {
    reportedMemoryRequest = tc.reportedMemoryRequests[i]
    }

    pod := v1.Pod{
    Status: v1.PodStatus{
    Phase: podPhase,
    Conditions: []v1.PodCondition{
    {
    Type: v1.PodReady,
    Status: podReadiness,
    LastTransitionTime: podStartTime,
    },
    },
    StartTime: &podStartTime,
    },
    ObjectMeta: metav1.ObjectMeta{
    Name: podName,
    Namespace: namespace,
    Labels: map[string]string{
    "name": podNamePrefix,
    },
    },

    Spec: v1.PodSpec{
    Containers: []v1.Container{
    {
    Resources: v1.ResourceRequirements{
    Requests: v1.ResourceList{
    v1.ResourceMemory: reportedMemoryRequest,
    },
    },
    },
    },
    },
    }
    if podDeletionTimestamp {
    pod.DeletionTimestamp = &metav1.Time{Time: time.Now()}
    }
    obj.Items = append(obj.Items, pod)
    }
    return true, obj, nil
    })

    fakeClient.AddReactor("update", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
    tc.Lock()
    defer tc.Unlock()

    obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.HorizontalPodAutoscaler)
    assert.Equal(t, namespace, obj.Namespace, "the HPA namespace should be as expected")
    assert.Equal(t, hpaName, obj.Name, "the HPA name should be as expected")
    assert.Equal(t, tc.expectedDesiredReplicas, obj.Status.DesiredReplicas, "the desired replica count reported in the object status should be as expected")
    /*if tc.verifyMemoryCurrent {
    if assert.NotNil(t, obj.Status.CurrentMemoryUtilizationPercentage, "the reported Memory utilization percentage should be non-nil") {
    assert.Equal(t, tc.MemoryCurrent, *obj.Status.CurrentMemoryUtilizationPercentage, "the report Memory utilization percentage should be as expected")
    }
    }*/
    var actualConditions []autoscalingv1.HorizontalPodAutoscalerCondition
    if err := json.Unmarshal([]byte(obj.ObjectMeta.Annotations[autoscaling.HorizontalPodAutoscalerConditionsAnnotation]), &actualConditions); err != nil {
    return true, nil, err
    }
    // TODO: it's ok not to sort these becaues statusOk
    // contains all the conditions, so we'll never be appending.
    // Default to statusOk when missing any specific conditions
    if tc.expectedConditions == nil {
    tc.expectedConditions = statusOkWithOverrides()
    }
    // clear the message so that we can easily compare
    for i := range actualConditions {
    actualConditions[i].Message = ""
    actualConditions[i].LastTransitionTime = metav1.Time{}
    }
    assert.Equal(t, tc.expectedConditions, actualConditions, "the status conditions should have been as expected")
    tc.statusUpdated = true
    // Every time we reconcile HPA object we are updating status.
    tc.processed <- obj.Name
    return true, obj, nil
    })

    fakeScaleClient := &scalefake.FakeScaleClient{}
    fakeScaleClient.AddReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
    tc.Lock()
    defer tc.Unlock()

    obj := &autoscalingv1.Scale{
    ObjectMeta: metav1.ObjectMeta{
    Name: tc.resource.name,
    Namespace: namespace,
    },
    Spec: autoscalingv1.ScaleSpec{
    Replicas: tc.initialReplicas,
    },
    Status: autoscalingv1.ScaleStatus{
    Replicas: tc.initialReplicas,
    Selector: selector,
    },
    }
    return true, obj, nil
    })

    fakeScaleClient.AddReactor("get", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) {
    tc.Lock()
    defer tc.Unlock()

    obj := &autoscalingv1.Scale{
    ObjectMeta: metav1.ObjectMeta{
    Name: tc.resource.name,
    Namespace: namespace,
    },
    Spec: autoscalingv1.ScaleSpec{
    Replicas: tc.initialReplicas,
    },
    Status: autoscalingv1.ScaleStatus{
    Replicas: tc.initialReplicas,
    Selector: selector,
    },
    }
    return true, obj, nil
    })

    fakeScaleClient.AddReactor("get", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
    tc.Lock()
    defer tc.Unlock()

    obj := &autoscalingv1.Scale{
    ObjectMeta: metav1.ObjectMeta{
    Name: tc.resource.name,
    Namespace: namespace,
    },
    Spec: autoscalingv1.ScaleSpec{
    Replicas: tc.initialReplicas,
    },
    Status: autoscalingv1.ScaleStatus{
    Replicas: tc.initialReplicas,
    Selector: selector,
    },
    }
    return true, obj, nil
    })

    fakeScaleClient.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
    tc.Lock()
    defer tc.Unlock()

    obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale)
    replicas := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale).Spec.Replicas
    assert.Equal(t, tc.expectedDesiredReplicas, replicas, "the replica count of the RC should be as expected")
    tc.scaleUpdated = true
    return true, obj, nil
    })

    fakeScaleClient.AddReactor("update", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) {
    tc.Lock()
    defer tc.Unlock()

    obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale)
    replicas := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale).Spec.Replicas
    assert.Equal(t, tc.expectedDesiredReplicas, replicas, "the replica count of the deployment should be as expected")
    tc.scaleUpdated = true
    return true, obj, nil
    })

    fakeScaleClient.AddReactor("update", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
    tc.Lock()
    defer tc.Unlock()

    obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale)
    replicas := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale).Spec.Replicas
    assert.Equal(t, tc.expectedDesiredReplicas, replicas, "the replica count of the replicaset should be as expected")
    tc.scaleUpdated = true
    return true, obj, nil
    })

    fakeWatch := watch.NewFake()
    fakeClient.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))

    fakeMetricsClient := &metricsfake.Clientset{}
    fakeMetricsClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
    tc.Lock()
    defer tc.Unlock()

    metrics := &metricsapi.PodMetricsList{}
    for i, memMiB := range tc.reportedMemoryLevels {
    // NB: the list reactor actually does label selector filtering for us,
    // so we have to make sure our results match the label selector
    podMetric := metricsapi.PodMetrics{
    ObjectMeta: metav1.ObjectMeta{
    Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
    Namespace: namespace,
    Labels: labelSet,
    },
    Timestamp: metav1.Time{Time: time.Now()},
    Window: metav1.Duration{Duration: time.Minute},
    Containers: []metricsapi.ContainerMetrics{
    {
    Name: "container",
    Usage: v1.ResourceList{
    v1.ResourceCPU: *resource.NewMilliQuantity(
    int64(0),
    resource.DecimalSI),
    v1.ResourceMemory: *resource.NewQuantity(
    int64(memMiB*1024*1024),
    resource.BinarySI),
    },
    },
    },
    }
    metrics.Items = append(metrics.Items, podMetric)
    }

    return true, metrics, nil
    })

    fakeCMClient := &cmfake.FakeCustomMetricsClient{}
    fakeCMClient.AddReactor("get", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
    tc.Lock()
    defer tc.Unlock()

    getForAction, wasGetFor := action.(cmfake.GetForAction)
    if !wasGetFor {
    return true, nil, fmt.Errorf("expected a get-for action, got %v instead", action)
    }

    if getForAction.GetName() == "*" {
    metrics := &cmapi.MetricValueList{}

    // multiple objects
    assert.Equal(t, "pods", getForAction.GetResource().Resource, "the type of object that we requested multiple metrics for should have been pods")
    assert.Equal(t, "qps", getForAction.GetMetricName(), "the metric name requested should have been qps, as specified in the metric spec")

    for i, level := range tc.reportedMemoryLevels {
    podMetric := cmapi.MetricValue{
    DescribedObject: v1.ObjectReference{
    Kind: "Pod",
    Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
    Namespace: namespace,
    },
    Timestamp: metav1.Time{Time: time.Now()},
    Metric: cmapi.MetricIdentifier{
    Name: "qps",
    },
    Value: *resource.NewMilliQuantity(int64(level), resource.DecimalSI),
    }
    metrics.Items = append(metrics.Items, podMetric)
    }

    return true, metrics, nil
    }

    name := getForAction.GetName()
    mapper := testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)
    metrics := &cmapi.MetricValueList{}
    var matchedTarget *autoscalingv2.MetricSpec
    for i, target := range tc.metricsTarget {
    if target.Type == autoscalingv2.ObjectMetricSourceType && name == target.Object.DescribedObject.Name {
    gk := schema.FromAPIVersionAndKind(target.Object.DescribedObject.APIVersion, target.Object.DescribedObject.Kind).GroupKind()
    mapping, err := mapper.RESTMapping(gk)
    if err != nil {
    t.Logf("unable to get mapping for %s: %v", gk.String(), err)
    continue
    }
    groupResource := mapping.Resource.GroupResource()

    if getForAction.GetResource().Resource == groupResource.String() {
    matchedTarget = &tc.metricsTarget[i]
    }
    }
    }
    assert.NotNil(t, matchedTarget, "this request should have matched one of the metric specs")
    assert.Equal(t, "qps", getForAction.GetMetricName(), "the metric name requested should have been qps, as specified in the metric spec")

    metrics.Items = []cmapi.MetricValue{
    {
    DescribedObject: v1.ObjectReference{
    Kind: matchedTarget.Object.DescribedObject.Kind,
    APIVersion: matchedTarget.Object.DescribedObject.APIVersion,
    Name: name,
    },
    Timestamp: metav1.Time{Time: time.Now()},
    Metric: cmapi.MetricIdentifier{
    Name: "qps",
    },
    Value: *resource.NewMilliQuantity(int64(tc.reportedMemoryLevels[0]), resource.DecimalSI),
    },
    }

    return true, metrics, nil
    })

    fakeEMClient := &emfake.FakeExternalMetricsClient{}

    fakeEMClient.AddReactor("list", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
    tc.Lock()
    defer tc.Unlock()

    listAction, wasList := action.(core.ListAction)
    if !wasList {
    return true, nil, fmt.Errorf("expected a list action, got %v instead", action)
    }

    metrics := &emapi.ExternalMetricValueList{}

    assert.Equal(t, "qps", listAction.GetResource().Resource, "the metric name requested should have been qps, as specified in the metric spec")

    for _, level := range tc.reportedMemoryLevels {
    metric := emapi.ExternalMetricValue{
    Timestamp: metav1.Time{Time: time.Now()},
    MetricName: "qps",
    Value: *resource.NewMilliQuantity(int64(level), resource.DecimalSI),
    }
    metrics.Items = append(metrics.Items, metric)
    }

    return true, metrics, nil
    })

    return fakeClient, fakeMetricsClient, fakeCMClient, fakeEMClient, fakeScaleClient
    }

    func (tc *testCase) verifyResults(t *testing.T) {
    tc.Lock()
    defer tc.Unlock()

    assert.Equal(t, tc.initialReplicas != tc.expectedDesiredReplicas, tc.scaleUpdated, "the scale should only be updated if we expected a change in replicas")
    assert.True(t, tc.statusUpdated, "the status should have been updated")
    if tc.verifyEvents {
    assert.Equal(t, tc.initialReplicas != tc.expectedDesiredReplicas, tc.eventCreated, "an event should have been created only if we expected a change in replicas")
    }
    }

    func (tc *testCase) setupController(t *testing.T) (*HorizontalController, informers.SharedInformerFactory) {
    testClient, testMetricsClient, testCMClient, testEMClient, testScaleClient := tc.prepareTestClient(t)
    if tc.testClient != nil {
    testClient = tc.testClient
    }
    if tc.testMetricsClient != nil {
    testMetricsClient = tc.testMetricsClient
    }
    if tc.testCMClient != nil {
    testCMClient = tc.testCMClient
    }
    if tc.testEMClient != nil {
    testEMClient = tc.testEMClient
    }
    if tc.testScaleClient != nil {
    testScaleClient = tc.testScaleClient
    }
    metricsClient := metrics.NewRESTMetricsClient(
    testMetricsClient.MetricsV1beta1(),
    testCMClient,
    testEMClient,
    )

    eventClient := &fake.Clientset{}
    eventClient.AddReactor("create", "events", func(action core.Action) (handled bool, ret runtime.Object, err error) {
    tc.Lock()
    defer tc.Unlock()

    obj := action.(core.CreateAction).GetObject().(*v1.Event)
    if tc.verifyEvents {
    switch obj.Reason {
    case "SuccessfulRescale":
    assert.Equal(t, fmt.Sprintf("New size: %d; reason: cpu resource utilization (percentage of request) above target", tc.expectedDesiredReplicas), obj.Message)
    case "DesiredReplicasComputed":
    assert.Equal(t, fmt.Sprintf(
    "Computed the desired num of replicas: %d (avgMemoryutil: %d, current replicas: %d)",
    tc.expectedDesiredReplicas,
    (int64(tc.reportedMemoryLevels[0])*100)/tc.reportedMemoryRequests[0].MilliValue(), tc.initialReplicas), obj.Message)
    default:
    assert.False(t, true, fmt.Sprintf("Unexpected event: %s / %s", obj.Reason, obj.Message))
    }
    }
    tc.eventCreated = true
    return true, obj, nil
    })

    informerFactory := informers.NewSharedInformerFactory(testClient, controller.NoResyncPeriodFunc())
    defaultDownscalestabilizationWindow := 5 * time.Minute

    hpaController := NewHorizontalController(
    eventClient.Core(),
    testScaleClient,
    testClient.Autoscaling(),
    testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme),
    metricsClient,
    informerFactory.Autoscaling().V1().HorizontalPodAutoscalers(),
    informerFactory.Core().V1().Pods(),
    controller.NoResyncPeriodFunc(),
    defaultDownscalestabilizationWindow,
    defaultTestingTolerance,
    defaultTestingCpuInitializationPeriod,
    defaultTestingDelayOfInitialReadinessStatus,
    )
    hpaController.hpaListerSynced = alwaysReady
    if tc.recommendations != nil {
    hpaController.recommendations["test-namespace/test-hpa"] = tc.recommendations
    }

    return hpaController, informerFactory
    }

    func hotCpuCreationTime() metav1.Time {
    return metav1.Time{Time: time.Now()}
    }

    func coolCpuCreationTime() metav1.Time {
    return metav1.Time{Time: time.Now().Add(-3 * time.Minute)}
    }

    func (tc *testCase) runTestWithController(t *testing.T, hpaController *HorizontalController, informerFactory informers.SharedInformerFactory) {
    stop := make(chan struct{})
    defer close(stop)
    informerFactory.Start(stop)
    go hpaController.Run(stop)

    tc.Lock()
    if tc.verifyEvents {
    tc.Unlock()
    // We need to wait for events to be broadcasted (sleep for longer than record.sleepDuration).
    time.Sleep(2 * time.Second)
    } else {
    tc.Unlock()
    }
    // Wait for HPA to be processed.
    <-tc.processed
    tc.verifyResults(t)
    }

    func (tc *testCase) runTest(t *testing.T) {
    hpaController, informerFactory := tc.setupController(t)
    tc.runTestWithController(t, hpaController, informerFactory)
    }

    func TestScaleUpUnreadyMoreScale(t *testing.T) {
    tc := testCase{
    minReplicas: 2,
    maxReplicas: 6,
    initialReplicas: 3,
    expectedDesiredReplicas: 4,
    MemoryTarget: 300,
    reportedMemoryLevels: []uint64{300, 300, 300},
    reportedMemoryRequests: []resource.Quantity{resource.MustParse("300Mi"), resource.MustParse("300Mi"), resource.MustParse("300Mi")},
    reportedPodReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionFalse, v1.ConditionFalse},
    useMetricsAPI: true,
    }
    tc.runTest(t)
    }