Skip to content

Commit d883c8a

Browse files
authored
Merge pull request #1640 from ingvagabund/referenced-resource-list
ReferencedResourceList: alias for map[v1.ResourceName]*resource.Quantity to avoid the type definition duplication
2 parents fd9f2b4 + 50dd3b8 commit d883c8a

File tree

9 files changed

+54
-47
lines changed

9 files changed

+54
-47
lines changed

pkg/api/types.go

+4
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ package api
1818

1919
import (
2020
v1 "k8s.io/api/core/v1"
21+
"k8s.io/apimachinery/pkg/api/resource"
2122
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2223
"k8s.io/apimachinery/pkg/runtime"
2324
)
@@ -104,3 +105,6 @@ type MetricsCollector struct {
104105
// Later, the collection can be extended to other providers.
105106
Enabled bool
106107
}
108+
109+
// ReferencedResourceList is an adaption of v1.ResourceList with resources as references
110+
type ReferencedResourceList = map[v1.ResourceName]*resource.Quantity

pkg/descheduler/metricscollector/metricscollector.go

+9-8
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ import (
3232
"k8s.io/klog/v2"
3333
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
3434
utilptr "k8s.io/utils/ptr"
35+
"sigs.k8s.io/descheduler/pkg/api"
3536
)
3637

3738
const (
@@ -43,7 +44,7 @@ type MetricsCollector struct {
4344
metricsClientset metricsclient.Interface
4445
nodeSelector labels.Selector
4546

46-
nodes map[string]map[v1.ResourceName]*resource.Quantity
47+
nodes map[string]api.ReferencedResourceList
4748

4849
mu sync.RWMutex
4950
// hasSynced signals at least one sync succeeded
@@ -55,7 +56,7 @@ func NewMetricsCollector(nodeLister listercorev1.NodeLister, metricsClientset me
5556
nodeLister: nodeLister,
5657
metricsClientset: metricsClientset,
5758
nodeSelector: nodeSelector,
58-
nodes: make(map[string]map[v1.ResourceName]*resource.Quantity),
59+
nodes: make(map[string]api.ReferencedResourceList),
5960
}
6061
}
6162

@@ -77,13 +78,13 @@ func weightedAverage(prevValue, value int64) int64 {
7778
return int64(math.Round(beta*float64(prevValue) + (1-beta)*float64(value)))
7879
}
7980

80-
func (mc *MetricsCollector) AllNodesUsage() (map[string]map[v1.ResourceName]*resource.Quantity, error) {
81+
func (mc *MetricsCollector) AllNodesUsage() (map[string]api.ReferencedResourceList, error) {
8182
mc.mu.RLock()
8283
defer mc.mu.RUnlock()
8384

84-
allNodesUsage := make(map[string]map[v1.ResourceName]*resource.Quantity)
85+
allNodesUsage := make(map[string]api.ReferencedResourceList)
8586
for nodeName := range mc.nodes {
86-
allNodesUsage[nodeName] = map[v1.ResourceName]*resource.Quantity{
87+
allNodesUsage[nodeName] = api.ReferencedResourceList{
8788
v1.ResourceCPU: utilptr.To[resource.Quantity](mc.nodes[nodeName][v1.ResourceCPU].DeepCopy()),
8889
v1.ResourceMemory: utilptr.To[resource.Quantity](mc.nodes[nodeName][v1.ResourceMemory].DeepCopy()),
8990
}
@@ -92,15 +93,15 @@ func (mc *MetricsCollector) AllNodesUsage() (map[string]map[v1.ResourceName]*res
9293
return allNodesUsage, nil
9394
}
9495

95-
func (mc *MetricsCollector) NodeUsage(node *v1.Node) (map[v1.ResourceName]*resource.Quantity, error) {
96+
func (mc *MetricsCollector) NodeUsage(node *v1.Node) (api.ReferencedResourceList, error) {
9697
mc.mu.RLock()
9798
defer mc.mu.RUnlock()
9899

99100
if _, exists := mc.nodes[node.Name]; !exists {
100101
klog.V(4).InfoS("unable to find node in the collected metrics", "node", klog.KObj(node))
101102
return nil, fmt.Errorf("unable to find node %q in the collected metrics", node.Name)
102103
}
103-
return map[v1.ResourceName]*resource.Quantity{
104+
return api.ReferencedResourceList{
104105
v1.ResourceCPU: utilptr.To[resource.Quantity](mc.nodes[node.Name][v1.ResourceCPU].DeepCopy()),
105106
v1.ResourceMemory: utilptr.To[resource.Quantity](mc.nodes[node.Name][v1.ResourceMemory].DeepCopy()),
106107
}, nil
@@ -131,7 +132,7 @@ func (mc *MetricsCollector) Collect(ctx context.Context) error {
131132
}
132133

133134
if _, exists := mc.nodes[node.Name]; !exists {
134-
mc.nodes[node.Name] = map[v1.ResourceName]*resource.Quantity{
135+
mc.nodes[node.Name] = api.ReferencedResourceList{
135136
v1.ResourceCPU: utilptr.To[resource.Quantity](metrics.Usage.Cpu().DeepCopy()),
136137
v1.ResourceMemory: utilptr.To[resource.Quantity](metrics.Usage.Memory().DeepCopy()),
137138
}

pkg/descheduler/metricscollector/metricscollector_test.go

+2-1
Original file line numberDiff line numberDiff line change
@@ -29,10 +29,11 @@ import (
2929
fakeclientset "k8s.io/client-go/kubernetes/fake"
3030
fakemetricsclient "k8s.io/metrics/pkg/client/clientset/versioned/fake"
3131

32+
"sigs.k8s.io/descheduler/pkg/api"
3233
"sigs.k8s.io/descheduler/test"
3334
)
3435

35-
func checkCpuNodeUsage(t *testing.T, usage map[v1.ResourceName]*resource.Quantity, millicpu int64) {
36+
func checkCpuNodeUsage(t *testing.T, usage api.ReferencedResourceList, millicpu int64) {
3637
t.Logf("current node cpu usage: %v\n", usage[v1.ResourceCPU].MilliValue())
3738
if usage[v1.ResourceCPU].MilliValue() != millicpu {
3839
t.Fatalf("cpu node usage expected to be %v, got %v instead", millicpu, usage[v1.ResourceCPU].MilliValue())

pkg/descheduler/node/node.go

+5-4
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ import (
3030
listersv1 "k8s.io/client-go/listers/core/v1"
3131
"k8s.io/client-go/util/workqueue"
3232
"k8s.io/klog/v2"
33+
"sigs.k8s.io/descheduler/pkg/api"
3334
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
3435
"sigs.k8s.io/descheduler/pkg/utils"
3536
)
@@ -244,7 +245,7 @@ func fitsRequest(nodeIndexer podutil.GetPodsAssignedToNodeFunc, pod *v1.Pod, nod
244245
}
245246

246247
// nodeAvailableResources returns resources mapped to the quanitity available on the node.
247-
func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node *v1.Node, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (map[v1.ResourceName]*resource.Quantity, error) {
248+
func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node *v1.Node, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (api.ReferencedResourceList, error) {
248249
podsOnNode, err := podutil.ListPodsOnANode(node.Name, nodeIndexer, nil)
249250
if err != nil {
250251
return nil, err
@@ -253,7 +254,7 @@ func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node
253254
if err != nil {
254255
return nil, err
255256
}
256-
remainingResources := map[v1.ResourceName]*resource.Quantity{
257+
remainingResources := api.ReferencedResourceList{
257258
v1.ResourceCPU: resource.NewMilliQuantity(node.Status.Allocatable.Cpu().MilliValue()-nodeUtilization[v1.ResourceCPU].MilliValue(), resource.DecimalSI),
258259
v1.ResourceMemory: resource.NewQuantity(node.Status.Allocatable.Memory().Value()-nodeUtilization[v1.ResourceMemory].Value(), resource.BinarySI),
259260
v1.ResourcePods: resource.NewQuantity(node.Status.Allocatable.Pods().Value()-nodeUtilization[v1.ResourcePods].Value(), resource.DecimalSI),
@@ -273,8 +274,8 @@ func nodeAvailableResources(nodeIndexer podutil.GetPodsAssignedToNodeFunc, node
273274
}
274275

275276
// NodeUtilization returns the resources requested by the given pods. Only resources supplied in the resourceNames parameter are calculated.
276-
func NodeUtilization(pods []*v1.Pod, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (map[v1.ResourceName]*resource.Quantity, error) {
277-
totalUtilization := map[v1.ResourceName]*resource.Quantity{
277+
func NodeUtilization(pods []*v1.Pod, resourceNames []v1.ResourceName, podUtilization podutil.PodUtilizationFnc) (api.ReferencedResourceList, error) {
278+
totalUtilization := api.ReferencedResourceList{
278279
v1.ResourceCPU: resource.NewMilliQuantity(0, resource.DecimalSI),
279280
v1.ResourceMemory: resource.NewQuantity(0, resource.BinarySI),
280281
v1.ResourcePods: resource.NewQuantity(int64(len(pods)), resource.DecimalSI),

pkg/framework/plugins/nodeutilization/highnodeutilization.go

+1-2
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@ import (
2121
"fmt"
2222

2323
v1 "k8s.io/api/core/v1"
24-
"k8s.io/apimachinery/pkg/api/resource"
2524
"k8s.io/apimachinery/pkg/runtime"
2625
"k8s.io/klog/v2"
2726
"sigs.k8s.io/descheduler/pkg/api"
@@ -138,7 +137,7 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
138137
}
139138

140139
// stop if the total available usage has dropped to zero - no more pods can be scheduled
141-
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
140+
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage api.ReferencedResourceList) bool {
142141
for name := range totalAvailableUsage {
143142
if totalAvailableUsage[name].CmpInt64(0) < 1 {
144143
return false

pkg/framework/plugins/nodeutilization/lownodeutilization.go

+1-2
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@ import (
2121
"fmt"
2222

2323
v1 "k8s.io/api/core/v1"
24-
"k8s.io/apimachinery/pkg/api/resource"
2524
"k8s.io/apimachinery/pkg/runtime"
2625
"k8s.io/klog/v2"
2726

@@ -168,7 +167,7 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
168167
}
169168

170169
// stop if node utilization drops below target threshold or any of required capacity (cpu, memory, pods) is moved
171-
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool {
170+
continueEvictionCond := func(nodeInfo NodeInfo, totalAvailableUsage api.ReferencedResourceList) bool {
172171
if !isNodeAboveTargetUtilization(nodeInfo.NodeUsage, nodeInfo.thresholds.highResourceThreshold) {
173172
return false
174173
}

pkg/framework/plugins/nodeutilization/nodeutilization.go

+11-11
Original file line numberDiff line numberDiff line change
@@ -37,21 +37,21 @@ import (
3737
// NodeUsage stores a node's info, pods on it, thresholds and its resource usage
3838
type NodeUsage struct {
3939
node *v1.Node
40-
usage map[v1.ResourceName]*resource.Quantity
40+
usage api.ReferencedResourceList
4141
allPods []*v1.Pod
4242
}
4343

4444
type NodeThresholds struct {
45-
lowResourceThreshold map[v1.ResourceName]*resource.Quantity
46-
highResourceThreshold map[v1.ResourceName]*resource.Quantity
45+
lowResourceThreshold api.ReferencedResourceList
46+
highResourceThreshold api.ReferencedResourceList
4747
}
4848

4949
type NodeInfo struct {
5050
NodeUsage
5151
thresholds NodeThresholds
5252
}
5353

54-
type continueEvictionCond func(nodeInfo NodeInfo, totalAvailableUsage map[v1.ResourceName]*resource.Quantity) bool
54+
type continueEvictionCond func(nodeInfo NodeInfo, totalAvailableUsage api.ReferencedResourceList) bool
5555

5656
// NodePodsMap is a set of (node, pods) pairs
5757
type NodePodsMap map[*v1.Node][]*v1.Pod
@@ -94,8 +94,8 @@ func getNodeThresholds(
9494
}
9595

9696
nodeThresholdsMap[node.Name] = NodeThresholds{
97-
lowResourceThreshold: map[v1.ResourceName]*resource.Quantity{},
98-
highResourceThreshold: map[v1.ResourceName]*resource.Quantity{},
97+
lowResourceThreshold: api.ReferencedResourceList{},
98+
highResourceThreshold: api.ReferencedResourceList{},
9999
}
100100

101101
for _, resourceName := range resourceNames {
@@ -206,7 +206,7 @@ func classifyNodes(
206206
return lowNodes, highNodes
207207
}
208208

209-
func usageToKeysAndValues(usage map[v1.ResourceName]*resource.Quantity) []interface{} {
209+
func usageToKeysAndValues(usage api.ReferencedResourceList) []interface{} {
210210
// log message in one line
211211
keysAndValues := []interface{}{}
212212
if quantity, exists := usage[v1.ResourceCPU]; exists {
@@ -241,7 +241,7 @@ func evictPodsFromSourceNodes(
241241
usageClient usageClient,
242242
) {
243243
// upper bound on total number of pods/cpu/memory and optional extended resources to be moved
244-
totalAvailableUsage := map[v1.ResourceName]*resource.Quantity{}
244+
totalAvailableUsage := api.ReferencedResourceList{}
245245
for _, resourceName := range resourceNames {
246246
totalAvailableUsage[resourceName] = &resource.Quantity{}
247247
}
@@ -296,7 +296,7 @@ func evictPods(
296296
evictableNamespaces *api.Namespaces,
297297
inputPods []*v1.Pod,
298298
nodeInfo NodeInfo,
299-
totalAvailableUsage map[v1.ResourceName]*resource.Quantity,
299+
totalAvailableUsage api.ReferencedResourceList,
300300
taintsOfLowNodes map[string][]v1.Taint,
301301
podEvictor frameworktypes.Evictor,
302302
evictOptions evictions.EvictOptions,
@@ -400,7 +400,7 @@ func sortNodesByUsage(nodes []NodeInfo, ascending bool) {
400400

401401
// isNodeAboveTargetUtilization checks if a node is overutilized
402402
// At least one resource has to be above the high threshold
403-
func isNodeAboveTargetUtilization(usage NodeUsage, threshold map[v1.ResourceName]*resource.Quantity) bool {
403+
func isNodeAboveTargetUtilization(usage NodeUsage, threshold api.ReferencedResourceList) bool {
404404
for name, nodeValue := range usage.usage {
405405
// usage.highResourceThreshold[name] < nodeValue
406406
if threshold[name].Cmp(*nodeValue) == -1 {
@@ -412,7 +412,7 @@ func isNodeAboveTargetUtilization(usage NodeUsage, threshold map[v1.ResourceName
412412

413413
// isNodeWithLowUtilization checks if a node is underutilized
414414
// All resources have to be below the low threshold
415-
func isNodeWithLowUtilization(usage NodeUsage, threshold map[v1.ResourceName]*resource.Quantity) bool {
415+
func isNodeWithLowUtilization(usage NodeUsage, threshold api.ReferencedResourceList) bool {
416416
for name, nodeValue := range usage.usage {
417417
// usage.lowResourceThreshold[name] < nodeValue
418418
if threshold[name].Cmp(*nodeValue) == -1 {

pkg/framework/plugins/nodeutilization/nodeutilization_test.go

+8-7
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ import (
2323
v1 "k8s.io/api/core/v1"
2424
"k8s.io/apimachinery/pkg/api/resource"
2525
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
26+
"sigs.k8s.io/descheduler/pkg/api"
2627
)
2728

2829
func BuildTestNodeInfo(name string, apply func(*NodeInfo)) *NodeInfo {
@@ -71,7 +72,7 @@ func TestResourceUsagePercentages(t *testing.T) {
7172
},
7273
},
7374
},
74-
usage: map[v1.ResourceName]*resource.Quantity{
75+
usage: api.ReferencedResourceList{
7576
v1.ResourceCPU: resource.NewMilliQuantity(1220, resource.DecimalSI),
7677
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
7778
v1.ResourcePods: resource.NewQuantity(11, resource.BinarySI),
@@ -103,21 +104,21 @@ func TestSortNodesByUsage(t *testing.T) {
103104
name: "cpu memory pods",
104105
nodeInfoList: []NodeInfo{
105106
*BuildTestNodeInfo("node1", func(nodeInfo *NodeInfo) {
106-
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
107+
nodeInfo.usage = api.ReferencedResourceList{
107108
v1.ResourceCPU: resource.NewMilliQuantity(1730, resource.DecimalSI),
108109
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
109110
v1.ResourcePods: resource.NewQuantity(25, resource.BinarySI),
110111
}
111112
}),
112113
*BuildTestNodeInfo("node2", func(nodeInfo *NodeInfo) {
113-
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
114+
nodeInfo.usage = api.ReferencedResourceList{
114115
v1.ResourceCPU: resource.NewMilliQuantity(1220, resource.DecimalSI),
115116
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
116117
v1.ResourcePods: resource.NewQuantity(11, resource.BinarySI),
117118
}
118119
}),
119120
*BuildTestNodeInfo("node3", func(nodeInfo *NodeInfo) {
120-
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
121+
nodeInfo.usage = api.ReferencedResourceList{
121122
v1.ResourceCPU: resource.NewMilliQuantity(1530, resource.DecimalSI),
122123
v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI),
123124
v1.ResourcePods: resource.NewQuantity(20, resource.BinarySI),
@@ -130,17 +131,17 @@ func TestSortNodesByUsage(t *testing.T) {
130131
name: "memory",
131132
nodeInfoList: []NodeInfo{
132133
*BuildTestNodeInfo("node1", func(nodeInfo *NodeInfo) {
133-
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
134+
nodeInfo.usage = api.ReferencedResourceList{
134135
v1.ResourceMemory: resource.NewQuantity(3038982964, resource.BinarySI),
135136
}
136137
}),
137138
*BuildTestNodeInfo("node2", func(nodeInfo *NodeInfo) {
138-
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
139+
nodeInfo.usage = api.ReferencedResourceList{
139140
v1.ResourceMemory: resource.NewQuantity(2038982964, resource.BinarySI),
140141
}
141142
}),
142143
*BuildTestNodeInfo("node3", func(nodeInfo *NodeInfo) {
143-
nodeInfo.usage = map[v1.ResourceName]*resource.Quantity{
144+
nodeInfo.usage = api.ReferencedResourceList{
144145
v1.ResourceMemory: resource.NewQuantity(5038982964, resource.BinarySI),
145146
}
146147
}),

0 commit comments

Comments
 (0)