Skip to content

Commit 43d6fbd

Browse files
authored
Merge pull request #8004 from laoj2/fix-container-status
Also consider containerStatus.resources when handling container resources
2 parents 87a67e3 + b766a06 commit 43d6fbd

14 files changed

+528
-30
lines changed

vertical-pod-autoscaler/pkg/admission-controller/resource/pod/patch/resource_updates.go

+5-4
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ import (
2626
resource_admission "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/admission-controller/resource"
2727
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/recommendation"
2828
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
29+
resourcehelpers "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/resources"
2930
vpa_api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa"
3031
)
3132

@@ -76,8 +77,8 @@ func (c *resourcesUpdatesPatchCalculator) CalculatePatches(pod *core.Pod, vpa *v
7677
func getContainerPatch(pod *core.Pod, i int, annotationsPerContainer vpa_api_util.ContainerToAnnotationsMap, containerResources vpa_api_util.ContainerResources) ([]resource_admission.PatchRecord, string) {
7778
var patches []resource_admission.PatchRecord
7879
// Add empty resources object if missing.
79-
if pod.Spec.Containers[i].Resources.Limits == nil &&
80-
pod.Spec.Containers[i].Resources.Requests == nil {
80+
requests, limits := resourcehelpers.ContainerRequestsAndLimits(pod.Spec.Containers[i].Name, pod)
81+
if limits == nil && requests == nil {
8182
patches = append(patches, getPatchInitializingEmptyResources(i))
8283
}
8384

@@ -86,8 +87,8 @@ func getContainerPatch(pod *core.Pod, i int, annotationsPerContainer vpa_api_uti
8687
annotations = make([]string, 0)
8788
}
8889

89-
patches, annotations = appendPatchesAndAnnotations(patches, annotations, pod.Spec.Containers[i].Resources.Requests, i, containerResources.Requests, "requests", "request")
90-
patches, annotations = appendPatchesAndAnnotations(patches, annotations, pod.Spec.Containers[i].Resources.Limits, i, containerResources.Limits, "limits", "limit")
90+
patches, annotations = appendPatchesAndAnnotations(patches, annotations, requests, i, containerResources.Requests, "requests", "request")
91+
patches, annotations = appendPatchesAndAnnotations(patches, annotations, limits, i, containerResources.Limits, "limits", "limit")
9192

9293
updatesAnnotation := fmt.Sprintf("container %d: ", i) + strings.Join(annotations, ", ")
9394
return patches, updatesAnnotation

vertical-pod-autoscaler/pkg/admission-controller/resource/pod/patch/resource_updates_test.go

+42-1
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ func addAnnotationRequest(updateResources [][]string, kind string) resource_admi
103103
return GetAddAnnotationPatch(ResourceUpdatesAnnotation, vpaUpdates)
104104
}
105105

106-
func TestClalculatePatches_ResourceUpdates(t *testing.T) {
106+
func TestCalculatePatches_ResourceUpdates(t *testing.T) {
107107
tests := []struct {
108108
name string
109109
pod *core.Pod
@@ -164,11 +164,32 @@ func TestClalculatePatches_ResourceUpdates(t *testing.T) {
164164
addAnnotationRequest([][]string{{cpu}}, request),
165165
},
166166
},
167+
{
168+
name: "replacement cpu request recommendation from container status",
169+
pod: test.Pod().
170+
AddContainer(core.Container{}).
171+
AddContainerStatus(test.ContainerStatus().
172+
WithCPURequest(resource.MustParse("0")).Get()).Get(),
173+
namespace: "default",
174+
recommendResources: []vpa_api_util.ContainerResources{
175+
{
176+
Requests: core.ResourceList{
177+
cpu: resource.MustParse("1"),
178+
},
179+
},
180+
},
181+
recommendAnnotations: vpa_api_util.ContainerToAnnotationsMap{},
182+
expectPatches: []resource_admission.PatchRecord{
183+
addResourceRequestPatch(0, cpu, "1"),
184+
addAnnotationRequest([][]string{{cpu}}, request),
185+
},
186+
},
167187
{
168188
name: "two containers",
169189
pod: &core.Pod{
170190
Spec: core.PodSpec{
171191
Containers: []core.Container{{
192+
Name: "container-1",
172193
Resources: core.ResourceRequirements{
173194
Requests: core.ResourceList{
174195
cpu: resource.MustParse("0"),
@@ -249,6 +270,26 @@ func TestClalculatePatches_ResourceUpdates(t *testing.T) {
249270
addAnnotationRequest([][]string{{cpu}}, limit),
250271
},
251272
},
273+
{
274+
name: "replacement cpu limit from container status",
275+
pod: test.Pod().
276+
AddContainer(core.Container{}).
277+
AddContainerStatus(test.ContainerStatus().
278+
WithCPULimit(resource.MustParse("0")).Get()).Get(),
279+
namespace: "default",
280+
recommendResources: []vpa_api_util.ContainerResources{
281+
{
282+
Limits: core.ResourceList{
283+
cpu: resource.MustParse("1"),
284+
},
285+
},
286+
},
287+
recommendAnnotations: vpa_api_util.ContainerToAnnotationsMap{},
288+
expectPatches: []resource_admission.PatchRecord{
289+
addResourceLimitPatch(0, cpu, "1"),
290+
addAnnotationRequest([][]string{{cpu}}, limit),
291+
},
292+
},
252293
}
253294
for _, tc := range tests {
254295
t.Run(tc.name, func(t *testing.T) {

vertical-pod-autoscaler/pkg/admission-controller/resource/pod/recommendation/recommendation_provider.go

+8-6
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ import (
2424

2525
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
2626
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/limitrange"
27+
resourcehelpers "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/resources"
2728
vpa_api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa"
2829
)
2930

@@ -53,14 +54,15 @@ func GetContainersResources(pod *core.Pod, vpaResourcePolicy *vpa_types.PodResou
5354
addAll bool, annotations vpa_api_util.ContainerToAnnotationsMap) []vpa_api_util.ContainerResources {
5455
resources := make([]vpa_api_util.ContainerResources, len(pod.Spec.Containers))
5556
for i, container := range pod.Spec.Containers {
57+
containerRequests, containerLimits := resourcehelpers.ContainerRequestsAndLimits(container.Name, pod)
5658
recommendation := vpa_api_util.GetRecommendationForContainer(container.Name, &podRecommendation)
5759
if recommendation == nil {
5860
if !addAll {
5961
klog.V(2).InfoS("No recommendation found for container, skipping", "container", container.Name)
6062
continue
6163
}
6264
klog.V(2).InfoS("No match found for container, using Pod request", "container", container.Name)
63-
resources[i].Requests = container.Resources.Requests
65+
resources[i].Requests = containerRequests
6466
} else {
6567
resources[i].Requests = recommendation.Target
6668
}
@@ -70,7 +72,7 @@ func GetContainersResources(pod *core.Pod, vpaResourcePolicy *vpa_types.PodResou
7072
}
7173
containerControlledValues := vpa_api_util.GetContainerControlledValues(container.Name, vpaResourcePolicy)
7274
if containerControlledValues == vpa_types.ContainerControlledValuesRequestsAndLimits {
73-
proportionalLimits, limitAnnotations := vpa_api_util.GetProportionalLimit(container.Resources.Limits, container.Resources.Requests, resources[i].Requests, defaultLimit)
75+
proportionalLimits, limitAnnotations := vpa_api_util.GetProportionalLimit(containerLimits, containerRequests, resources[i].Requests, defaultLimit)
7476
if proportionalLimits != nil {
7577
resources[i].Limits = proportionalLimits
7678
if len(limitAnnotations) > 0 {
@@ -88,19 +90,19 @@ func GetContainersResources(pod *core.Pod, vpaResourcePolicy *vpa_types.PodResou
8890
resources[i].Limits = core.ResourceList{}
8991
}
9092

91-
cpuRequest, hasCpuRequest := container.Resources.Requests[core.ResourceCPU]
93+
cpuRequest, hasCpuRequest := containerRequests[core.ResourceCPU]
9294
if _, ok := resources[i].Requests[core.ResourceCPU]; !ok && hasCpuRequest {
9395
resources[i].Requests[core.ResourceCPU] = cpuRequest
9496
}
95-
memRequest, hasMemRequest := container.Resources.Requests[core.ResourceMemory]
97+
memRequest, hasMemRequest := containerRequests[core.ResourceMemory]
9698
if _, ok := resources[i].Requests[core.ResourceMemory]; !ok && hasMemRequest {
9799
resources[i].Requests[core.ResourceMemory] = memRequest
98100
}
99-
cpuLimit, hasCpuLimit := container.Resources.Limits[core.ResourceCPU]
101+
cpuLimit, hasCpuLimit := containerLimits[core.ResourceCPU]
100102
if _, ok := resources[i].Limits[core.ResourceCPU]; !ok && hasCpuLimit {
101103
resources[i].Limits[core.ResourceCPU] = cpuLimit
102104
}
103-
memLimit, hasMemLimit := container.Resources.Limits[core.ResourceMemory]
105+
memLimit, hasMemLimit := containerLimits[core.ResourceMemory]
104106
if _, ok := resources[i].Limits[core.ResourceMemory]; !ok && hasMemLimit {
105107
resources[i].Limits[core.ResourceMemory] = memLimit
106108
}

vertical-pod-autoscaler/pkg/admission-controller/resource/pod/recommendation/recommendation_provider_test.go

+17-1
Original file line numberDiff line numberDiff line change
@@ -365,6 +365,7 @@ func TestGetContainersResources(t *testing.T) {
365365
testCases := []struct {
366366
name string
367367
container apiv1.Container
368+
containerStatus apiv1.ContainerStatus
368369
vpa *vpa_types.VerticalPodAutoscaler
369370
expectedCPU *resource.Quantity
370371
expectedMem *resource.Quantity
@@ -518,11 +519,26 @@ func TestGetContainersResources(t *testing.T) {
518519
expectedMemLimit: mustParseResourcePointer("20M"),
519520
addAll: false,
520521
},
522+
{
523+
name: "CPU and memory recommendation, request and limits only set in containerStatus, addAll true",
524+
container: test.Container().WithName("container").Get(),
525+
containerStatus: test.ContainerStatus().WithName("container").
526+
WithCPURequest(resource.MustParse("1")).
527+
WithMemRequest(resource.MustParse("1M")).
528+
WithCPULimit(resource.MustParse("10")).
529+
WithMemLimit(resource.MustParse("3M")).Get(),
530+
vpa: test.VerticalPodAutoscaler().WithContainer("container").WithTarget("3", "2M").Get(),
531+
expectedCPU: mustParseResourcePointer("3"),
532+
expectedMem: mustParseResourcePointer("2M"),
533+
expectedCPULimit: mustParseResourcePointer("30"),
534+
expectedMemLimit: mustParseResourcePointer("6M"),
535+
addAll: true,
536+
},
521537
}
522538

523539
for _, tc := range testCases {
524540
t.Run(tc.name, func(t *testing.T) {
525-
pod := test.Pod().WithName("pod").AddContainer(tc.container).Get()
541+
pod := test.Pod().WithName("pod").AddContainer(tc.container).AddContainerStatus(tc.containerStatus).Get()
526542
resources := GetContainersResources(pod, tc.vpa.Spec.ResourcePolicy, *tc.vpa.Status.Recommendation, nil, tc.addAll, vpa_api_util.ContainerToAnnotationsMap{})
527543

528544
cpu, cpuPresent := resources[0].Requests[apiv1.ResourceCPU]

vertical-pod-autoscaler/pkg/recommender/input/oom/observer.go

+6-1
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ import (
2525
"k8s.io/client-go/tools/cache"
2626

2727
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/model"
28+
resourcehelpers "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/resources"
2829

2930
"k8s.io/klog/v2"
3031
)
@@ -156,7 +157,11 @@ func (o *observer) OnUpdate(oldObj, newObj interface{}) {
156157
if oldStatus != nil && containerStatus.RestartCount > oldStatus.RestartCount {
157158
oldSpec := findSpec(containerStatus.Name, oldPod.Spec.Containers)
158159
if oldSpec != nil {
159-
memory := oldSpec.Resources.Requests[apiv1.ResourceMemory]
160+
requests, _ := resourcehelpers.ContainerRequestsAndLimits(containerStatus.Name, oldPod)
161+
var memory resource.Quantity
162+
if requests != nil {
163+
memory = requests[apiv1.ResourceMemory]
164+
}
160165
oomInfo := OomInfo{
161166
Timestamp: containerStatus.LastTerminationState.Terminated.FinishedAt.Time.UTC(),
162167
Memory: model.ResourceAmount(memory.Value()),

vertical-pod-autoscaler/pkg/recommender/input/oom/observer_test.go

+77-10
Original file line numberDiff line numberDiff line change
@@ -100,18 +100,85 @@ func TestOOMReceived(t *testing.T) {
100100
assert.NoError(t, err)
101101
p2, err := newPod(pod2Yaml)
102102
assert.NoError(t, err)
103-
observer := NewObserver()
104-
go observer.OnUpdate(p1, p2)
105-
106-
info := <-observer.observedOomsChannel
107-
container := info.ContainerID
108-
assert.Equal(t, "mockNamespace", container.PodID.Namespace)
109-
assert.Equal(t, "Pod1", container.PodID.PodName)
110-
assert.Equal(t, "Name11", container.ContainerName)
111-
assert.Equal(t, model.ResourceAmount(int64(1024)), info.Memory)
112103
timestamp, err := time.Parse(time.RFC3339, "2018-02-23T13:38:48Z")
113104
assert.NoError(t, err)
114-
assert.Equal(t, timestamp.Unix(), info.Timestamp.Unix())
105+
106+
testCases := []struct {
107+
desc string
108+
oldPod *v1.Pod
109+
newPod *v1.Pod
110+
wantOOMInfo OomInfo
111+
}{
112+
{
113+
desc: "OK",
114+
oldPod: p1,
115+
newPod: p2,
116+
wantOOMInfo: OomInfo{
117+
ContainerID: model.ContainerID{
118+
ContainerName: "Name11",
119+
PodID: model.PodID{
120+
Namespace: "mockNamespace",
121+
PodName: "Pod1",
122+
},
123+
},
124+
Memory: model.ResourceAmount(int64(1024)),
125+
Timestamp: timestamp,
126+
},
127+
},
128+
{
129+
desc: "Old pod does not set memory requests",
130+
oldPod: func() *v1.Pod {
131+
oldPod := p1.DeepCopy()
132+
oldPod.Spec.Containers[0].Resources.Requests = nil
133+
oldPod.Status.ContainerStatuses[0].Resources = nil
134+
return oldPod
135+
}(),
136+
newPod: p2,
137+
wantOOMInfo: OomInfo{
138+
ContainerID: model.ContainerID{
139+
ContainerName: "Name11",
140+
PodID: model.PodID{
141+
Namespace: "mockNamespace",
142+
PodName: "Pod1",
143+
},
144+
},
145+
Memory: model.ResourceAmount(int64(0)),
146+
Timestamp: timestamp,
147+
},
148+
},
149+
{
150+
desc: "Old pod also set memory request in containerStatus, prefer info from containerStatus",
151+
oldPod: func() *v1.Pod {
152+
oldPod := p1.DeepCopy()
153+
oldPod.Status.ContainerStatuses[0].Resources = &v1.ResourceRequirements{
154+
Requests: v1.ResourceList{
155+
v1.ResourceMemory: resource.MustParse("2048"),
156+
},
157+
}
158+
return oldPod
159+
}(),
160+
newPod: p2,
161+
wantOOMInfo: OomInfo{
162+
ContainerID: model.ContainerID{
163+
ContainerName: "Name11",
164+
PodID: model.PodID{
165+
Namespace: "mockNamespace",
166+
PodName: "Pod1",
167+
},
168+
},
169+
Memory: model.ResourceAmount(int64(2048)),
170+
Timestamp: timestamp,
171+
},
172+
},
173+
}
174+
for _, tc := range testCases {
175+
t.Run(tc.desc, func(t *testing.T) {
176+
observer := NewObserver()
177+
observer.OnUpdate(tc.oldPod, tc.newPod)
178+
info := <-observer.observedOomsChannel
179+
assert.Equal(t, tc.wantOOMInfo, info)
180+
})
181+
}
115182
}
116183

117184
func TestMalformedPodReceived(t *testing.T) {

vertical-pod-autoscaler/pkg/updater/priority/priority_processor.go

+3-1
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ import (
2424

2525
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
2626
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/annotations"
27+
resourcehelpers "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/resources"
2728
vpa_api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa"
2829
)
2930

@@ -65,7 +66,8 @@ func (*defaultPriorityProcessor) GetUpdatePriority(pod *apiv1.Pod, vpa *vpa_type
6566
totalRecommendedPerResource[resourceName] += recommended.MilliValue()
6667
lowerBound, hasLowerBound := recommendedRequest.LowerBound[resourceName]
6768
upperBound, hasUpperBound := recommendedRequest.UpperBound[resourceName]
68-
if request, hasRequest := podContainer.Resources.Requests[resourceName]; hasRequest {
69+
requests, _ := resourcehelpers.ContainerRequestsAndLimits(podContainer.Name, pod)
70+
if request, hasRequest := requests[resourceName]; hasRequest {
6971
totalRequestPerResource[resourceName] += request.MilliValue()
7072
if recommended.MilliValue() > request.MilliValue() {
7173
scaleUp = true

vertical-pod-autoscaler/pkg/updater/priority/priority_processor_test.go

+10
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,16 @@ func TestGetUpdatePriority(t *testing.T) {
4646
ResourceDiff: 4.0,
4747
ScaleUp: true,
4848
},
49+
}, {
50+
name: "simple scale up, resources from containerStatus have higher priority",
51+
pod: test.Pod().WithName("POD1").AddContainer(test.Container().WithName(containerName).WithCPURequest(resource.MustParse("10000")).Get()).
52+
AddContainerStatus(test.ContainerStatus().WithName(containerName).WithCPURequest(resource.MustParse("2")).Get()).Get(),
53+
vpa: test.VerticalPodAutoscaler().WithContainer(containerName).WithTarget("10", "").Get(),
54+
expectedPrio: PodPriority{
55+
OutsideRecommendedRange: false,
56+
ResourceDiff: 4.0,
57+
ScaleUp: true,
58+
},
4959
}, {
5060
name: "simple scale down",
5161
pod: test.Pod().WithName("POD1").AddContainer(test.Container().WithName(containerName).WithCPURequest(resource.MustParse("4")).Get()).Get(),

vertical-pod-autoscaler/pkg/updater/priority/scaling_direction_pod_eviction_admission.go

+7-5
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ import (
2020
apiv1 "k8s.io/api/core/v1"
2121

2222
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
23+
resourcehelpers "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/resources"
2324
vpa_utils "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa"
2425
)
2526

@@ -50,23 +51,24 @@ func (s *scalingDirectionPodEvictionAdmission) Admit(pod *apiv1.Pod, resources *
5051
if recommendedResources == nil {
5152
continue
5253
}
53-
if s.admitContainer(container, recommendedResources, podEvictionRequirements) {
54+
containerRequests, _ := resourcehelpers.ContainerRequestsAndLimits(container.Name, pod)
55+
if s.admitContainer(containerRequests, recommendedResources, podEvictionRequirements) {
5456
return true
5557
}
5658
}
5759
return false
5860
}
5961

60-
func (s *scalingDirectionPodEvictionAdmission) admitContainer(container apiv1.Container, recommendedResources *vpa_types.RecommendedContainerResources, podEvictionRequirements []*vpa_types.EvictionRequirement) bool {
61-
_, foundCPURequests := container.Resources.Requests[apiv1.ResourceCPU]
62+
func (s *scalingDirectionPodEvictionAdmission) admitContainer(containerRequests apiv1.ResourceList, recommendedResources *vpa_types.RecommendedContainerResources, podEvictionRequirements []*vpa_types.EvictionRequirement) bool {
63+
_, foundCPURequests := containerRequests[apiv1.ResourceCPU]
6264
if !foundCPURequests {
6365
return true
6466
}
65-
_, foundMemoryRequests := container.Resources.Requests[apiv1.ResourceMemory]
67+
_, foundMemoryRequests := containerRequests[apiv1.ResourceMemory]
6668
if !foundMemoryRequests {
6769
return true
6870
}
69-
return s.checkEvictionRequirementsForContainer(container.Resources.Requests, recommendedResources.Target, podEvictionRequirements)
71+
return s.checkEvictionRequirementsForContainer(containerRequests, recommendedResources.Target, podEvictionRequirements)
7072
}
7173

7274
func (s *scalingDirectionPodEvictionAdmission) checkEvictionRequirementsForContainer(requestedResources apiv1.ResourceList, recommendedResources apiv1.ResourceList, evictionRequirements []*vpa_types.EvictionRequirement) bool {

0 commit comments

Comments
 (0)