Last active
June 2, 2021 09:51
-
-
Save mgazza/f01d03464ac480a2de1fa9f6edeee1f6 to your computer and use it in GitHub Desktop.
modified tests to show unready pods at memory targets do not scale
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| /* | |
| Copyright 2015 The Kubernetes Authors. | |
| Licensed under the Apache License, Version 2.0 (the "License"); | |
| you may not use this file except in compliance with the License. | |
| You may obtain a copy of the License at | |
| http://www.apache.org/licenses/LICENSE-2.0 | |
| Unless required by applicable law or agreed to in writing, software | |
| distributed under the License is distributed on an "AS IS" BASIS, | |
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| See the License for the specific language governing permissions and | |
| limitations under the License. | |
| */ | |
| package podautoscaler | |
| import ( | |
| "encoding/json" | |
| "fmt" | |
| "sync" | |
| "testing" | |
| "time" | |
| autoscalingv1 "k8s.io/api/autoscaling/v1" | |
| autoscalingv2 "k8s.io/api/autoscaling/v2beta2" | |
| "k8s.io/api/core/v1" | |
| "k8s.io/apimachinery/pkg/api/meta/testrestmapper" | |
| "k8s.io/apimachinery/pkg/api/resource" | |
| metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | |
| "k8s.io/apimachinery/pkg/labels" | |
| "k8s.io/apimachinery/pkg/runtime" | |
| "k8s.io/apimachinery/pkg/runtime/schema" | |
| "k8s.io/apimachinery/pkg/watch" | |
| "k8s.io/client-go/informers" | |
| "k8s.io/client-go/kubernetes/fake" | |
| scalefake "k8s.io/client-go/scale/fake" | |
| core "k8s.io/client-go/testing" | |
| "k8s.io/kubernetes/pkg/api/legacyscheme" | |
| "k8s.io/kubernetes/pkg/apis/autoscaling" | |
| "k8s.io/kubernetes/pkg/controller" | |
| "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics" | |
| cmapi "k8s.io/metrics/pkg/apis/custom_metrics/v1beta2" | |
| emapi "k8s.io/metrics/pkg/apis/external_metrics/v1beta1" | |
| metricsapi "k8s.io/metrics/pkg/apis/metrics/v1beta1" | |
| metricsfake "k8s.io/metrics/pkg/client/clientset/versioned/fake" | |
| cmfake "k8s.io/metrics/pkg/client/custom_metrics/fake" | |
| emfake "k8s.io/metrics/pkg/client/external_metrics/fake" | |
| "github.com/stretchr/testify/assert" | |
| _ "k8s.io/kubernetes/pkg/apis/apps/install" | |
| _ "k8s.io/kubernetes/pkg/apis/autoscaling/install" | |
| ) | |
| var statusOk = []autoscalingv2.HorizontalPodAutoscalerCondition{ | |
| {Type: autoscalingv2.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededRescale"}, | |
| {Type: autoscalingv2.ScalingActive, Status: v1.ConditionTrue, Reason: "ValidMetricFound"}, | |
| {Type: autoscalingv2.ScalingLimited, Status: v1.ConditionFalse, Reason: "DesiredWithinRange"}, | |
| } | |
| // statusOkWithOverrides returns the "ok" status with the given conditions as overridden | |
| func statusOkWithOverrides(overrides ...autoscalingv2.HorizontalPodAutoscalerCondition) []autoscalingv1.HorizontalPodAutoscalerCondition { | |
| resv2 := make([]autoscalingv2.HorizontalPodAutoscalerCondition, len(statusOk)) | |
| copy(resv2, statusOk) | |
| for _, override := range overrides { | |
| resv2 = setConditionInList(resv2, override.Type, override.Status, override.Reason, override.Message) | |
| } | |
| // copy to a v1 slice | |
| resv1 := make([]autoscalingv1.HorizontalPodAutoscalerCondition, len(resv2)) | |
| for i, cond := range resv2 { | |
| resv1[i] = autoscalingv1.HorizontalPodAutoscalerCondition{ | |
| Type: autoscalingv1.HorizontalPodAutoscalerConditionType(cond.Type), | |
| Status: cond.Status, | |
| Reason: cond.Reason, | |
| } | |
| } | |
| return resv1 | |
| } | |
| func alwaysReady() bool { return true } | |
| type fakeResource struct { | |
| name string | |
| apiVersion string | |
| kind string | |
| } | |
| type testCase struct { | |
| sync.Mutex | |
| minReplicas int32 | |
| maxReplicas int32 | |
| initialReplicas int32 | |
| // Memory target utilization as a percentage of the requested resources. | |
| MemoryTarget int32 | |
| reportedMemoryLevels []uint64 | |
| reportedMemoryRequests []resource.Quantity | |
| reportedPodReadiness []v1.ConditionStatus | |
| reportedPodStartTime []metav1.Time | |
| reportedPodPhase []v1.PodPhase | |
| reportedPodDeletionTimestamp []bool | |
| reportedPodMemoryRequests []resource.Quantity | |
| scaleUpdated bool | |
| statusUpdated bool | |
| eventCreated bool | |
| verifyEvents bool | |
| useMetricsAPI bool | |
| metricsTarget []autoscalingv2.MetricSpec | |
| expectedDesiredReplicas int32 | |
| expectedConditions []autoscalingv1.HorizontalPodAutoscalerCondition | |
| // Channel with names of HPA objects which we have reconciled. | |
| processed chan string | |
| // Target resource information. | |
| resource *fakeResource | |
| // Last scale time | |
| lastScaleTime *metav1.Time | |
| // override the test clients | |
| testClient *fake.Clientset | |
| testMetricsClient *metricsfake.Clientset | |
| testCMClient *cmfake.FakeCustomMetricsClient | |
| testEMClient *emfake.FakeExternalMetricsClient | |
| testScaleClient *scalefake.FakeScaleClient | |
| recommendations []timestampedRecommendation | |
| } | |
| func init() { | |
| // set this high so we don't accidentally run into it when testing | |
| scaleUpLimitFactor = 8 | |
| } | |
| func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfake.Clientset, *cmfake.FakeCustomMetricsClient, *emfake.FakeExternalMetricsClient, *scalefake.FakeScaleClient) { | |
| namespace := "test-namespace" | |
| hpaName := "test-hpa" | |
| podNamePrefix := "test-pod" | |
| labelSet := map[string]string{"name": podNamePrefix} | |
| selector := labels.SelectorFromSet(labelSet).String() | |
| tc.Lock() | |
| tc.scaleUpdated = false | |
| tc.statusUpdated = false | |
| tc.eventCreated = false | |
| tc.processed = make(chan string, 100) | |
| if tc.resource == nil { | |
| tc.resource = &fakeResource{ | |
| name: "test-rc", | |
| apiVersion: "v1", | |
| kind: "ReplicationController", | |
| } | |
| } | |
| tc.Unlock() | |
| fakeClient := &fake.Clientset{} | |
| fakeClient.AddReactor("list", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) { | |
| tc.Lock() | |
| defer tc.Unlock() | |
| obj := &autoscalingv2.HorizontalPodAutoscalerList{ | |
| Items: []autoscalingv2.HorizontalPodAutoscaler{ | |
| { | |
| ObjectMeta: metav1.ObjectMeta{ | |
| Name: hpaName, | |
| Namespace: namespace, | |
| SelfLink: "experimental/v1/namespaces/" + namespace + "/horizontalpodautoscalers/" + hpaName, | |
| }, | |
| Spec: autoscalingv2.HorizontalPodAutoscalerSpec{ | |
| ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ | |
| Kind: tc.resource.kind, | |
| Name: tc.resource.name, | |
| APIVersion: tc.resource.apiVersion, | |
| }, | |
| MinReplicas: &tc.minReplicas, | |
| MaxReplicas: tc.maxReplicas, | |
| }, | |
| Status: autoscalingv2.HorizontalPodAutoscalerStatus{ | |
| CurrentReplicas: tc.initialReplicas, | |
| DesiredReplicas: tc.initialReplicas, | |
| LastScaleTime: tc.lastScaleTime, | |
| }, | |
| }, | |
| }, | |
| } | |
| if tc.MemoryTarget > 0 { | |
| obj.Items[0].Spec.Metrics = []autoscalingv2.MetricSpec{ | |
| { | |
| Type: autoscalingv2.ResourceMetricSourceType, | |
| Resource: &autoscalingv2.ResourceMetricSource{ | |
| Name: v1.ResourceMemory, | |
| Target: autoscalingv2.MetricTarget{ | |
| AverageUtilization: &tc.MemoryTarget, | |
| }, | |
| }, | |
| }, | |
| } | |
| } | |
| if len(tc.metricsTarget) > 0 { | |
| obj.Items[0].Spec.Metrics = append(obj.Items[0].Spec.Metrics, tc.metricsTarget...) | |
| } | |
| if len(obj.Items[0].Spec.Metrics) == 0 { | |
| // manually add in the defaulting logic | |
| obj.Items[0].Spec.Metrics = []autoscalingv2.MetricSpec{ | |
| { | |
| Type: autoscalingv2.ResourceMetricSourceType, | |
| Resource: &autoscalingv2.ResourceMetricSource{ | |
| Name: v1.ResourceMemory, | |
| }, | |
| }, | |
| } | |
| } | |
| // and... convert to autoscaling v1 to return the right type | |
| objv1, err := unsafeConvertToVersionVia(obj, autoscalingv1.SchemeGroupVersion) | |
| if err != nil { | |
| return true, nil, err | |
| } | |
| return true, objv1, nil | |
| }) | |
| fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) { | |
| tc.Lock() | |
| defer tc.Unlock() | |
| obj := &v1.PodList{} | |
| specifiedMemoryRequests := tc.reportedMemoryRequests != nil | |
| numPodsToCreate := int(tc.initialReplicas) | |
| if specifiedMemoryRequests { | |
| numPodsToCreate = len(tc.reportedMemoryRequests) | |
| } | |
| for i := 0; i < numPodsToCreate; i++ { | |
| podReadiness := v1.ConditionTrue | |
| if tc.reportedPodReadiness != nil { | |
| podReadiness = tc.reportedPodReadiness[i] | |
| } | |
| var podStartTime metav1.Time | |
| if tc.reportedPodStartTime != nil { | |
| podStartTime = tc.reportedPodStartTime[i] | |
| } | |
| podPhase := v1.PodRunning | |
| if tc.reportedPodPhase != nil { | |
| podPhase = tc.reportedPodPhase[i] | |
| } | |
| podDeletionTimestamp := false | |
| if tc.reportedPodDeletionTimestamp != nil { | |
| podDeletionTimestamp = tc.reportedPodDeletionTimestamp[i] | |
| } | |
| podName := fmt.Sprintf("%s-%d", podNamePrefix, i) | |
| reportedMemoryRequest := resource.MustParse("100Mi") | |
| if specifiedMemoryRequests { | |
| reportedMemoryRequest = tc.reportedMemoryRequests[i] | |
| } | |
| pod := v1.Pod{ | |
| Status: v1.PodStatus{ | |
| Phase: podPhase, | |
| Conditions: []v1.PodCondition{ | |
| { | |
| Type: v1.PodReady, | |
| Status: podReadiness, | |
| LastTransitionTime: podStartTime, | |
| }, | |
| }, | |
| StartTime: &podStartTime, | |
| }, | |
| ObjectMeta: metav1.ObjectMeta{ | |
| Name: podName, | |
| Namespace: namespace, | |
| Labels: map[string]string{ | |
| "name": podNamePrefix, | |
| }, | |
| }, | |
| Spec: v1.PodSpec{ | |
| Containers: []v1.Container{ | |
| { | |
| Resources: v1.ResourceRequirements{ | |
| Requests: v1.ResourceList{ | |
| v1.ResourceMemory: reportedMemoryRequest, | |
| }, | |
| }, | |
| }, | |
| }, | |
| }, | |
| } | |
| if podDeletionTimestamp { | |
| pod.DeletionTimestamp = &metav1.Time{Time: time.Now()} | |
| } | |
| obj.Items = append(obj.Items, pod) | |
| } | |
| return true, obj, nil | |
| }) | |
| fakeClient.AddReactor("update", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) { | |
| tc.Lock() | |
| defer tc.Unlock() | |
| obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.HorizontalPodAutoscaler) | |
| assert.Equal(t, namespace, obj.Namespace, "the HPA namespace should be as expected") | |
| assert.Equal(t, hpaName, obj.Name, "the HPA name should be as expected") | |
| assert.Equal(t, tc.expectedDesiredReplicas, obj.Status.DesiredReplicas, "the desired replica count reported in the object status should be as expected") | |
| /*if tc.verifyMemoryCurrent { | |
| if assert.NotNil(t, obj.Status.CurrentMemoryUtilizationPercentage, "the reported Memory utilization percentage should be non-nil") { | |
| assert.Equal(t, tc.MemoryCurrent, *obj.Status.CurrentMemoryUtilizationPercentage, "the report Memory utilization percentage should be as expected") | |
| } | |
| }*/ | |
| var actualConditions []autoscalingv1.HorizontalPodAutoscalerCondition | |
| if err := json.Unmarshal([]byte(obj.ObjectMeta.Annotations[autoscaling.HorizontalPodAutoscalerConditionsAnnotation]), &actualConditions); err != nil { | |
| return true, nil, err | |
| } | |
| // TODO: it's ok not to sort these becaues statusOk | |
| // contains all the conditions, so we'll never be appending. | |
| // Default to statusOk when missing any specific conditions | |
| if tc.expectedConditions == nil { | |
| tc.expectedConditions = statusOkWithOverrides() | |
| } | |
| // clear the message so that we can easily compare | |
| for i := range actualConditions { | |
| actualConditions[i].Message = "" | |
| actualConditions[i].LastTransitionTime = metav1.Time{} | |
| } | |
| assert.Equal(t, tc.expectedConditions, actualConditions, "the status conditions should have been as expected") | |
| tc.statusUpdated = true | |
| // Every time we reconcile HPA object we are updating status. | |
| tc.processed <- obj.Name | |
| return true, obj, nil | |
| }) | |
| fakeScaleClient := &scalefake.FakeScaleClient{} | |
| fakeScaleClient.AddReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) { | |
| tc.Lock() | |
| defer tc.Unlock() | |
| obj := &autoscalingv1.Scale{ | |
| ObjectMeta: metav1.ObjectMeta{ | |
| Name: tc.resource.name, | |
| Namespace: namespace, | |
| }, | |
| Spec: autoscalingv1.ScaleSpec{ | |
| Replicas: tc.initialReplicas, | |
| }, | |
| Status: autoscalingv1.ScaleStatus{ | |
| Replicas: tc.initialReplicas, | |
| Selector: selector, | |
| }, | |
| } | |
| return true, obj, nil | |
| }) | |
| fakeScaleClient.AddReactor("get", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) { | |
| tc.Lock() | |
| defer tc.Unlock() | |
| obj := &autoscalingv1.Scale{ | |
| ObjectMeta: metav1.ObjectMeta{ | |
| Name: tc.resource.name, | |
| Namespace: namespace, | |
| }, | |
| Spec: autoscalingv1.ScaleSpec{ | |
| Replicas: tc.initialReplicas, | |
| }, | |
| Status: autoscalingv1.ScaleStatus{ | |
| Replicas: tc.initialReplicas, | |
| Selector: selector, | |
| }, | |
| } | |
| return true, obj, nil | |
| }) | |
| fakeScaleClient.AddReactor("get", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) { | |
| tc.Lock() | |
| defer tc.Unlock() | |
| obj := &autoscalingv1.Scale{ | |
| ObjectMeta: metav1.ObjectMeta{ | |
| Name: tc.resource.name, | |
| Namespace: namespace, | |
| }, | |
| Spec: autoscalingv1.ScaleSpec{ | |
| Replicas: tc.initialReplicas, | |
| }, | |
| Status: autoscalingv1.ScaleStatus{ | |
| Replicas: tc.initialReplicas, | |
| Selector: selector, | |
| }, | |
| } | |
| return true, obj, nil | |
| }) | |
| fakeScaleClient.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) { | |
| tc.Lock() | |
| defer tc.Unlock() | |
| obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale) | |
| replicas := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale).Spec.Replicas | |
| assert.Equal(t, tc.expectedDesiredReplicas, replicas, "the replica count of the RC should be as expected") | |
| tc.scaleUpdated = true | |
| return true, obj, nil | |
| }) | |
| fakeScaleClient.AddReactor("update", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) { | |
| tc.Lock() | |
| defer tc.Unlock() | |
| obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale) | |
| replicas := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale).Spec.Replicas | |
| assert.Equal(t, tc.expectedDesiredReplicas, replicas, "the replica count of the deployment should be as expected") | |
| tc.scaleUpdated = true | |
| return true, obj, nil | |
| }) | |
| fakeScaleClient.AddReactor("update", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) { | |
| tc.Lock() | |
| defer tc.Unlock() | |
| obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale) | |
| replicas := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale).Spec.Replicas | |
| assert.Equal(t, tc.expectedDesiredReplicas, replicas, "the replica count of the replicaset should be as expected") | |
| tc.scaleUpdated = true | |
| return true, obj, nil | |
| }) | |
| fakeWatch := watch.NewFake() | |
| fakeClient.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil)) | |
| fakeMetricsClient := &metricsfake.Clientset{} | |
| fakeMetricsClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) { | |
| tc.Lock() | |
| defer tc.Unlock() | |
| metrics := &metricsapi.PodMetricsList{} | |
| for i, memMiB := range tc.reportedMemoryLevels { | |
| // NB: the list reactor actually does label selector filtering for us, | |
| // so we have to make sure our results match the label selector | |
| podMetric := metricsapi.PodMetrics{ | |
| ObjectMeta: metav1.ObjectMeta{ | |
| Name: fmt.Sprintf("%s-%d", podNamePrefix, i), | |
| Namespace: namespace, | |
| Labels: labelSet, | |
| }, | |
| Timestamp: metav1.Time{Time: time.Now()}, | |
| Window: metav1.Duration{Duration: time.Minute}, | |
| Containers: []metricsapi.ContainerMetrics{ | |
| { | |
| Name: "container", | |
| Usage: v1.ResourceList{ | |
| v1.ResourceCPU: *resource.NewMilliQuantity( | |
| int64(0), | |
| resource.DecimalSI), | |
| v1.ResourceMemory: *resource.NewQuantity( | |
| int64(memMiB*1024*1024), | |
| resource.BinarySI), | |
| }, | |
| }, | |
| }, | |
| } | |
| metrics.Items = append(metrics.Items, podMetric) | |
| } | |
| return true, metrics, nil | |
| }) | |
| fakeCMClient := &cmfake.FakeCustomMetricsClient{} | |
| fakeCMClient.AddReactor("get", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) { | |
| tc.Lock() | |
| defer tc.Unlock() | |
| getForAction, wasGetFor := action.(cmfake.GetForAction) | |
| if !wasGetFor { | |
| return true, nil, fmt.Errorf("expected a get-for action, got %v instead", action) | |
| } | |
| if getForAction.GetName() == "*" { | |
| metrics := &cmapi.MetricValueList{} | |
| // multiple objects | |
| assert.Equal(t, "pods", getForAction.GetResource().Resource, "the type of object that we requested multiple metrics for should have been pods") | |
| assert.Equal(t, "qps", getForAction.GetMetricName(), "the metric name requested should have been qps, as specified in the metric spec") | |
| for i, level := range tc.reportedMemoryLevels { | |
| podMetric := cmapi.MetricValue{ | |
| DescribedObject: v1.ObjectReference{ | |
| Kind: "Pod", | |
| Name: fmt.Sprintf("%s-%d", podNamePrefix, i), | |
| Namespace: namespace, | |
| }, | |
| Timestamp: metav1.Time{Time: time.Now()}, | |
| Metric: cmapi.MetricIdentifier{ | |
| Name: "qps", | |
| }, | |
| Value: *resource.NewMilliQuantity(int64(level), resource.DecimalSI), | |
| } | |
| metrics.Items = append(metrics.Items, podMetric) | |
| } | |
| return true, metrics, nil | |
| } | |
| name := getForAction.GetName() | |
| mapper := testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme) | |
| metrics := &cmapi.MetricValueList{} | |
| var matchedTarget *autoscalingv2.MetricSpec | |
| for i, target := range tc.metricsTarget { | |
| if target.Type == autoscalingv2.ObjectMetricSourceType && name == target.Object.DescribedObject.Name { | |
| gk := schema.FromAPIVersionAndKind(target.Object.DescribedObject.APIVersion, target.Object.DescribedObject.Kind).GroupKind() | |
| mapping, err := mapper.RESTMapping(gk) | |
| if err != nil { | |
| t.Logf("unable to get mapping for %s: %v", gk.String(), err) | |
| continue | |
| } | |
| groupResource := mapping.Resource.GroupResource() | |
| if getForAction.GetResource().Resource == groupResource.String() { | |
| matchedTarget = &tc.metricsTarget[i] | |
| } | |
| } | |
| } | |
| assert.NotNil(t, matchedTarget, "this request should have matched one of the metric specs") | |
| assert.Equal(t, "qps", getForAction.GetMetricName(), "the metric name requested should have been qps, as specified in the metric spec") | |
| metrics.Items = []cmapi.MetricValue{ | |
| { | |
| DescribedObject: v1.ObjectReference{ | |
| Kind: matchedTarget.Object.DescribedObject.Kind, | |
| APIVersion: matchedTarget.Object.DescribedObject.APIVersion, | |
| Name: name, | |
| }, | |
| Timestamp: metav1.Time{Time: time.Now()}, | |
| Metric: cmapi.MetricIdentifier{ | |
| Name: "qps", | |
| }, | |
| Value: *resource.NewMilliQuantity(int64(tc.reportedMemoryLevels[0]), resource.DecimalSI), | |
| }, | |
| } | |
| return true, metrics, nil | |
| }) | |
| fakeEMClient := &emfake.FakeExternalMetricsClient{} | |
| fakeEMClient.AddReactor("list", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) { | |
| tc.Lock() | |
| defer tc.Unlock() | |
| listAction, wasList := action.(core.ListAction) | |
| if !wasList { | |
| return true, nil, fmt.Errorf("expected a list action, got %v instead", action) | |
| } | |
| metrics := &emapi.ExternalMetricValueList{} | |
| assert.Equal(t, "qps", listAction.GetResource().Resource, "the metric name requested should have been qps, as specified in the metric spec") | |
| for _, level := range tc.reportedMemoryLevels { | |
| metric := emapi.ExternalMetricValue{ | |
| Timestamp: metav1.Time{Time: time.Now()}, | |
| MetricName: "qps", | |
| Value: *resource.NewMilliQuantity(int64(level), resource.DecimalSI), | |
| } | |
| metrics.Items = append(metrics.Items, metric) | |
| } | |
| return true, metrics, nil | |
| }) | |
| return fakeClient, fakeMetricsClient, fakeCMClient, fakeEMClient, fakeScaleClient | |
| } | |
| func (tc *testCase) verifyResults(t *testing.T) { | |
| tc.Lock() | |
| defer tc.Unlock() | |
| assert.Equal(t, tc.initialReplicas != tc.expectedDesiredReplicas, tc.scaleUpdated, "the scale should only be updated if we expected a change in replicas") | |
| assert.True(t, tc.statusUpdated, "the status should have been updated") | |
| if tc.verifyEvents { | |
| assert.Equal(t, tc.initialReplicas != tc.expectedDesiredReplicas, tc.eventCreated, "an event should have been created only if we expected a change in replicas") | |
| } | |
| } | |
| func (tc *testCase) setupController(t *testing.T) (*HorizontalController, informers.SharedInformerFactory) { | |
| testClient, testMetricsClient, testCMClient, testEMClient, testScaleClient := tc.prepareTestClient(t) | |
| if tc.testClient != nil { | |
| testClient = tc.testClient | |
| } | |
| if tc.testMetricsClient != nil { | |
| testMetricsClient = tc.testMetricsClient | |
| } | |
| if tc.testCMClient != nil { | |
| testCMClient = tc.testCMClient | |
| } | |
| if tc.testEMClient != nil { | |
| testEMClient = tc.testEMClient | |
| } | |
| if tc.testScaleClient != nil { | |
| testScaleClient = tc.testScaleClient | |
| } | |
| metricsClient := metrics.NewRESTMetricsClient( | |
| testMetricsClient.MetricsV1beta1(), | |
| testCMClient, | |
| testEMClient, | |
| ) | |
| eventClient := &fake.Clientset{} | |
| eventClient.AddReactor("create", "events", func(action core.Action) (handled bool, ret runtime.Object, err error) { | |
| tc.Lock() | |
| defer tc.Unlock() | |
| obj := action.(core.CreateAction).GetObject().(*v1.Event) | |
| if tc.verifyEvents { | |
| switch obj.Reason { | |
| case "SuccessfulRescale": | |
| assert.Equal(t, fmt.Sprintf("New size: %d; reason: cpu resource utilization (percentage of request) above target", tc.expectedDesiredReplicas), obj.Message) | |
| case "DesiredReplicasComputed": | |
| assert.Equal(t, fmt.Sprintf( | |
| "Computed the desired num of replicas: %d (avgMemoryutil: %d, current replicas: %d)", | |
| tc.expectedDesiredReplicas, | |
| (int64(tc.reportedMemoryLevels[0])*100)/tc.reportedMemoryRequests[0].MilliValue(), tc.initialReplicas), obj.Message) | |
| default: | |
| assert.False(t, true, fmt.Sprintf("Unexpected event: %s / %s", obj.Reason, obj.Message)) | |
| } | |
| } | |
| tc.eventCreated = true | |
| return true, obj, nil | |
| }) | |
| informerFactory := informers.NewSharedInformerFactory(testClient, controller.NoResyncPeriodFunc()) | |
| defaultDownscalestabilizationWindow := 5 * time.Minute | |
| hpaController := NewHorizontalController( | |
| eventClient.Core(), | |
| testScaleClient, | |
| testClient.Autoscaling(), | |
| testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme), | |
| metricsClient, | |
| informerFactory.Autoscaling().V1().HorizontalPodAutoscalers(), | |
| informerFactory.Core().V1().Pods(), | |
| controller.NoResyncPeriodFunc(), | |
| defaultDownscalestabilizationWindow, | |
| defaultTestingTolerance, | |
| defaultTestingCpuInitializationPeriod, | |
| defaultTestingDelayOfInitialReadinessStatus, | |
| ) | |
| hpaController.hpaListerSynced = alwaysReady | |
| if tc.recommendations != nil { | |
| hpaController.recommendations["test-namespace/test-hpa"] = tc.recommendations | |
| } | |
| return hpaController, informerFactory | |
| } | |
| func hotCpuCreationTime() metav1.Time { | |
| return metav1.Time{Time: time.Now()} | |
| } | |
| func coolCpuCreationTime() metav1.Time { | |
| return metav1.Time{Time: time.Now().Add(-3 * time.Minute)} | |
| } | |
| func (tc *testCase) runTestWithController(t *testing.T, hpaController *HorizontalController, informerFactory informers.SharedInformerFactory) { | |
| stop := make(chan struct{}) | |
| defer close(stop) | |
| informerFactory.Start(stop) | |
| go hpaController.Run(stop) | |
| tc.Lock() | |
| if tc.verifyEvents { | |
| tc.Unlock() | |
| // We need to wait for events to be broadcasted (sleep for longer than record.sleepDuration). | |
| time.Sleep(2 * time.Second) | |
| } else { | |
| tc.Unlock() | |
| } | |
| // Wait for HPA to be processed. | |
| <-tc.processed | |
| tc.verifyResults(t) | |
| } | |
| func (tc *testCase) runTest(t *testing.T) { | |
| hpaController, informerFactory := tc.setupController(t) | |
| tc.runTestWithController(t, hpaController, informerFactory) | |
| } | |
| func TestScaleUpUnreadyMoreScale(t *testing.T) { | |
| tc := testCase{ | |
| minReplicas: 2, | |
| maxReplicas: 6, | |
| initialReplicas: 3, | |
| expectedDesiredReplicas: 4, | |
| MemoryTarget: 300, | |
| reportedMemoryLevels: []uint64{300, 300, 300}, | |
| reportedMemoryRequests: []resource.Quantity{resource.MustParse("300Mi"), resource.MustParse("300Mi"), resource.MustParse("300Mi")}, | |
| reportedPodReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionFalse, v1.ConditionFalse}, | |
| useMetricsAPI: true, | |
| } | |
| tc.runTest(t) | |
| } |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment