Skip to content

Commit 5bae930

Browse files
committed
feature: use contextal logging for plugins
1 parent a31a3b5 commit 5bae930

File tree

15 files changed

+150
-83
lines changed

15 files changed

+150
-83
lines changed

pkg/framework/plugins/defaultevictor/defaultevictor.go

+11-6
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ type constraint func(pod *v1.Pod) error
4848
// This plugin is only meant to customize other actions (extension points) of the evictor,
4949
// like filtering, sorting, and other ones that might be relevant in the future
5050
type DefaultEvictor struct {
51+
logger klog.Logger
5152
args *DefaultEvictorArgs
5253
constraints []constraint
5354
handle frameworktypes.Handle
@@ -71,14 +72,16 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
7172
if !ok {
7273
return nil, fmt.Errorf("want args to be of type defaultEvictorFilterArgs, got %T", args)
7374
}
75+
logger := klog.Background().WithValues("plugin", PluginName)
7476

7577
ev := &DefaultEvictor{
78+
logger: logger,
7679
handle: handle,
7780
args: defaultEvictorArgs,
7881
}
7982

8083
if defaultEvictorArgs.EvictFailedBarePods {
81-
klog.V(1).InfoS("Warning: EvictFailedBarePods is set to True. This could cause eviction of pods without ownerReferences.")
84+
logger.V(1).Info("Warning: EvictFailedBarePods is set to True. This could cause eviction of pods without ownerReferences.")
8285
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
8386
ownerRefList := podutil.OwnerRef(pod)
8487
// Enable evictFailedBarePods to evict bare pods in failed phase
@@ -117,7 +120,7 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
117120
})
118121
}
119122
} else {
120-
klog.V(1).InfoS("Warning: EvictSystemCriticalPods is set to True. This could cause eviction of Kubernetes system pods.")
123+
logger.V(1).Info("Warning: EvictSystemCriticalPods is set to True. This could cause eviction of Kubernetes system pods.")
121124
}
122125
if !defaultEvictorArgs.EvictLocalStoragePods {
123126
ev.constraints = append(ev.constraints, func(pod *v1.Pod) error {
@@ -169,7 +172,7 @@ func New(args runtime.Object, handle frameworktypes.Handle) (frameworktypes.Plug
169172
}
170173

171174
if len(pod.OwnerReferences) > 1 {
172-
klog.V(5).InfoS("pod has multiple owner references which is not supported for minReplicas check", "size", len(pod.OwnerReferences), "pod", klog.KObj(pod))
175+
logger.V(5).Info("pod has multiple owner references which is not supported for minReplicas check", "size", len(pod.OwnerReferences), "pod", klog.KObj(pod))
173176
return nil
174177
}
175178

@@ -218,14 +221,15 @@ func (d *DefaultEvictor) Name() string {
218221
}
219222

220223
func (d *DefaultEvictor) PreEvictionFilter(pod *v1.Pod) bool {
224+
logger := d.logger.WithValues("ExtensionPoint", frameworktypes.PreEvictionFilterExtensionPoint)
221225
if d.args.NodeFit {
222226
nodes, err := nodeutil.ReadyNodes(context.TODO(), d.handle.ClientSet(), d.handle.SharedInformerFactory().Core().V1().Nodes().Lister(), d.args.NodeSelector)
223227
if err != nil {
224-
klog.ErrorS(err, "unable to list ready nodes", "pod", klog.KObj(pod))
228+
logger.Error(err, "unable to list ready nodes", "pod", klog.KObj(pod))
225229
return false
226230
}
227231
if !nodeutil.PodFitsAnyOtherNode(d.handle.GetPodsAssignedToNodeFunc(), pod, nodes) {
228-
klog.InfoS("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable", "pod", klog.KObj(pod))
232+
logger.Info("pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable", "pod", klog.KObj(pod))
229233
return false
230234
}
231235
return true
@@ -234,6 +238,7 @@ func (d *DefaultEvictor) PreEvictionFilter(pod *v1.Pod) bool {
234238
}
235239

236240
func (d *DefaultEvictor) Filter(pod *v1.Pod) bool {
241+
logger := d.logger.WithValues("ExtensionPoint", frameworktypes.FilterExtensionPoint)
237242
checkErrs := []error{}
238243

239244
if HaveEvictAnnotation(pod) {
@@ -259,7 +264,7 @@ func (d *DefaultEvictor) Filter(pod *v1.Pod) bool {
259264
}
260265

261266
if len(checkErrs) > 0 {
262-
klog.V(4).InfoS("Pod fails the following checks", "pod", klog.KObj(pod), "checks", utilerrors.NewAggregate(checkErrs).Error())
267+
logger.V(4).Info("Pod fails the following checks", "pod", klog.KObj(pod), "checks", utilerrors.NewAggregate(checkErrs).Error())
263268
return false
264269
}
265270

pkg/framework/plugins/example/example.go

+4-1
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ var _ fwtypes.DeschedulePlugin = &Example{}
4646
// Example is our plugin (implementing the DeschedulePlugin interface). This
4747
// plugin will evict pods that match a regex and are older than a certain age.
4848
type Example struct {
49+
logger klog.Logger
4950
handle fwtypes.Handle
5051
args *ExampleArgs
5152
podFilter podutil.FilterFunc
@@ -61,6 +62,7 @@ func New(args runtime.Object, handle fwtypes.Handle) (fwtypes.Plugin, error) {
6162
if !ok {
6263
return nil, fmt.Errorf("args must be of type ExampleArgs, got %T", args)
6364
}
65+
logger := klog.Background().WithValues("plugin", PluginName)
6466

6567
// we can use the included and excluded namespaces to filter the pods we want
6668
// to evict.
@@ -90,6 +92,7 @@ func New(args runtime.Object, handle fwtypes.Handle) (fwtypes.Plugin, error) {
9092
}
9193

9294
return &Example{
95+
logger: logger,
9396
handle: handle,
9497
podFilter: podFilter,
9598
args: exampleArgs,
@@ -107,7 +110,7 @@ func (d *Example) Name() string {
107110
// of nodes we need to process.
108111
func (d *Example) Deschedule(ctx context.Context, nodes []*v1.Node) *fwtypes.Status {
109112
var podsToEvict []*v1.Pod
110-
logger := klog.FromContext(ctx)
113+
logger := klog.FromContext(klog.NewContext(ctx, d.logger)).WithValues("ExtensionPoint", fwtypes.DescheduleExtensionPoint)
111114
logger.Info("Example plugin starting descheduling")
112115

113116
re, err := regexp.Compile(d.args.Regex)

pkg/framework/plugins/nodeutilization/highnodeutilization.go

+14-8
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ var _ frameworktypes.BalancePlugin = &HighNodeUtilization{}
4343
// can schedule according to its plugin. Note that CPU/Memory requests are used
4444
// to calculate nodes' utilization and not the actual resource usage.
4545
type HighNodeUtilization struct {
46+
logger klog.Logger
4647
handle frameworktypes.Handle
4748
args *HighNodeUtilizationArgs
4849
podFilter func(pod *v1.Pod) bool
@@ -63,6 +64,7 @@ func NewHighNodeUtilization(
6364
genericArgs,
6465
)
6566
}
67+
logger := klog.Background().WithValues("plugin", HighNodeUtilizationPluginName)
6668

6769
// this plugins worries only about thresholds but the nodeplugins
6870
// package was made to take two thresholds into account, one for low
@@ -102,13 +104,15 @@ func NewHighNodeUtilization(
102104
)
103105

104106
return &HighNodeUtilization{
107+
logger: logger,
105108
handle: handle,
106109
args: args,
107110
resourceNames: resourceNames,
108111
highThresholds: highThresholds,
109112
criteria: criteria,
110113
podFilter: podFilter,
111114
usageClient: newRequestedUsageClient(
115+
logger.WithValues("client", "RequestedUsageClient"),
112116
resourceNames,
113117
handle.GetPodsAssignedToNodeFunc(),
114118
),
@@ -124,6 +128,8 @@ func (h *HighNodeUtilization) Name() string {
124128
// utilized nodes. The goal here is to concentrate pods in fewer nodes so that
125129
// less nodes are used.
126130
func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
131+
logger := klog.FromContext(klog.NewContext(ctx, h.logger)).WithValues("ExtensionPoint", frameworktypes.BalanceExtensionPoint)
132+
127133
if err := h.usageClient.sync(ctx, nodes); err != nil {
128134
return &frameworktypes.Status{
129135
Err: fmt.Errorf("error getting node usage: %v", err),
@@ -154,7 +160,7 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
154160
// schedulable nodes.
155161
func(nodeName string, usage, threshold api.ResourceThresholds) bool {
156162
if nodeutil.IsNodeUnschedulable(nodesMap[nodeName]) {
157-
klog.V(2).InfoS(
163+
logger.V(2).Info(
158164
"Node is unschedulable",
159165
"node", klog.KObj(nodesMap[nodeName]),
160166
)
@@ -173,7 +179,7 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
173179
category := []string{"underutilized", "overutilized"}
174180
for i := range nodeGroups {
175181
for nodeName := range nodeGroups[i] {
176-
klog.InfoS(
182+
logger.Info(
177183
"Node has been classified",
178184
"category", category[i],
179185
"node", klog.KObj(nodesMap[nodeName]),
@@ -197,18 +203,18 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
197203

198204
lowNodes, schedulableNodes := nodeInfos[0], nodeInfos[1]
199205

200-
klog.V(1).InfoS("Criteria for a node below target utilization", h.criteria...)
201-
klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(lowNodes))
206+
logger.V(1).Info("Criteria for a node below target utilization", h.criteria...)
207+
logger.V(1).Info("Number of underutilized nodes", "totalNumber", len(lowNodes))
202208

203209
if len(lowNodes) == 0 {
204-
klog.V(1).InfoS(
210+
logger.V(1).Info(
205211
"No node is underutilized, nothing to do here, you might tune your thresholds further",
206212
)
207213
return nil
208214
}
209215

210216
if len(lowNodes) <= h.args.NumberOfNodes {
211-
klog.V(1).InfoS(
217+
logger.V(1).Info(
212218
"Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here",
213219
"underutilizedNodes", len(lowNodes),
214220
"numberOfNodes", h.args.NumberOfNodes,
@@ -217,12 +223,12 @@ func (h *HighNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fr
217223
}
218224

219225
if len(lowNodes) == len(nodes) {
220-
klog.V(1).InfoS("All nodes are underutilized, nothing to do here")
226+
logger.V(1).Info("All nodes are underutilized, nothing to do here")
221227
return nil
222228
}
223229

224230
if len(schedulableNodes) == 0 {
225-
klog.V(1).InfoS("No node is available to schedule the pods, nothing to do here")
231+
logger.V(1).Info("No node is available to schedule the pods, nothing to do here")
226232
return nil
227233
}
228234

pkg/framework/plugins/nodeutilization/lownodeutilization.go

+21-12
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ var _ frameworktypes.BalancePlugin = &LowNodeUtilization{}
4343
// nodes. Note that CPU/Memory requests are used to calculate nodes'
4444
// utilization and not the actual resource usage.
4545
type LowNodeUtilization struct {
46+
logger klog.Logger
4647
handle frameworktypes.Handle
4748
args *LowNodeUtilizationArgs
4849
podFilter func(pod *v1.Pod) bool
@@ -66,6 +67,7 @@ func NewLowNodeUtilization(
6667
genericArgs,
6768
)
6869
}
70+
logger := klog.Background().WithValues("plugin", LowNodeUtilizationPluginName)
6971

7072
// resourceNames holds a list of resources for which the user has
7173
// provided thresholds for. extendedResourceNames holds those as well
@@ -115,16 +117,18 @@ func NewLowNodeUtilization(
115117
// have the correct one or an error is triggered. XXX MetricsServer is
116118
// deprecated, removed once dropped.
117119
var usageClient usageClient = newRequestedUsageClient(
120+
logger.WithValues("client", "RequestedUsageClient"),
118121
extendedResourceNames, handle.GetPodsAssignedToNodeFunc(),
119122
)
120123
if metrics != nil {
121-
usageClient, err = usageClientForMetrics(args, handle, extendedResourceNames)
124+
usageClient, err = usageClientForMetrics(logger, args, handle, extendedResourceNames)
122125
if err != nil {
123126
return nil, err
124127
}
125128
}
126129

127130
return &LowNodeUtilization{
131+
logger: logger,
128132
handle: handle,
129133
args: args,
130134
underCriteria: underCriteria,
@@ -145,6 +149,8 @@ func (l *LowNodeUtilization) Name() string {
145149
// utilized nodes to under utilized nodes. The goal here is to evenly
146150
// distribute pods across nodes.
147151
func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *frameworktypes.Status {
152+
logger := klog.FromContext(klog.NewContext(ctx, l.logger)).WithValues("ExtensionPoint", frameworktypes.BalanceExtensionPoint)
153+
148154
if err := l.usageClient.sync(ctx, nodes); err != nil {
149155
return &frameworktypes.Status{
150156
Err: fmt.Errorf("error getting node usage: %v", err),
@@ -192,7 +198,7 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
192198
// underutilized but aren't schedulable are ignored.
193199
func(nodeName string, usage, threshold api.ResourceThresholds) bool {
194200
if nodeutil.IsNodeUnschedulable(nodesMap[nodeName]) {
195-
klog.V(2).InfoS(
201+
logger.V(2).Info(
196202
"Node is unschedulable, thus not considered as underutilized",
197203
"node", klog.KObj(nodesMap[nodeName]),
198204
)
@@ -217,7 +223,7 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
217223
for nodeName := range nodeGroups[i] {
218224
classifiedNodes[nodeName] = true
219225

220-
klog.InfoS(
226+
logger.Info(
221227
"Node has been classified",
222228
"category", categories[i],
223229
"node", klog.KObj(nodesMap[nodeName]),
@@ -243,7 +249,7 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
243249
// log nodes that are appropriately utilized.
244250
for nodeName := range nodesMap {
245251
if !classifiedNodes[nodeName] {
246-
klog.InfoS(
252+
logger.Info(
247253
"Node is appropriately utilized",
248254
"node", klog.KObj(nodesMap[nodeName]),
249255
"usage", nodesUsageMap[nodeName],
@@ -255,20 +261,20 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
255261
lowNodes, highNodes := nodeInfos[0], nodeInfos[1]
256262

257263
// log messages for nodes with low and high utilization
258-
klog.V(1).InfoS("Criteria for a node under utilization", l.underCriteria...)
259-
klog.V(1).InfoS("Number of underutilized nodes", "totalNumber", len(lowNodes))
260-
klog.V(1).InfoS("Criteria for a node above target utilization", l.overCriteria...)
261-
klog.V(1).InfoS("Number of overutilized nodes", "totalNumber", len(highNodes))
264+
logger.V(1).Info("Criteria for a node under utilization", "underCriteria", klog.KObjSlice(l.underCriteria))
265+
logger.V(1).Info("Number of underutilized nodes", "totalNumber", len(lowNodes))
266+
logger.V(1).Info("Criteria for a node above target utilization", "overCriteria", klog.KObjSlice(l.overCriteria))
267+
logger.V(1).Info("Number of overutilized nodes", "totalNumber", len(highNodes))
262268

263269
if len(lowNodes) == 0 {
264-
klog.V(1).InfoS(
270+
logger.V(1).Info(
265271
"No node is underutilized, nothing to do here, you might tune your thresholds further",
266272
)
267273
return nil
268274
}
269275

270276
if len(lowNodes) <= l.args.NumberOfNodes {
271-
klog.V(1).InfoS(
277+
logger.V(1).Info(
272278
"Number of nodes underutilized is less or equal than NumberOfNodes, nothing to do here",
273279
"underutilizedNodes", len(lowNodes),
274280
"numberOfNodes", l.args.NumberOfNodes,
@@ -277,12 +283,12 @@ func (l *LowNodeUtilization) Balance(ctx context.Context, nodes []*v1.Node) *fra
277283
}
278284

279285
if len(lowNodes) == len(nodes) {
280-
klog.V(1).InfoS("All nodes are underutilized, nothing to do here")
286+
logger.V(1).Info("All nodes are underutilized, nothing to do here")
281287
return nil
282288
}
283289

284290
if len(highNodes) == 0 {
285-
klog.V(1).InfoS("All nodes are under target utilization, nothing to do here")
291+
logger.V(1).Info("All nodes are under target utilization, nothing to do here")
286292
return nil
287293
}
288294

@@ -359,6 +365,7 @@ func validatePrometheusMetricsUtilization(args *LowNodeUtilizationArgs) error {
359365
// usageClientForMetrics returns the correct usage client based on the
360366
// metrics source. XXX MetricsServer is deprecated, removed once dropped.
361367
func usageClientForMetrics(
368+
logger klog.Logger,
362369
args *LowNodeUtilizationArgs, handle frameworktypes.Handle, resources []v1.ResourceName,
363370
) (usageClient, error) {
364371
metrics := args.MetricsUtilization
@@ -368,6 +375,7 @@ func usageClientForMetrics(
368375
return nil, fmt.Errorf("metrics client not initialized")
369376
}
370377
return newActualUsageClient(
378+
logger.WithValues("client", "ActualUsageClient"),
371379
resources,
372380
handle.GetPodsAssignedToNodeFunc(),
373381
handle.MetricsCollector(),
@@ -378,6 +386,7 @@ func usageClientForMetrics(
378386
return nil, fmt.Errorf("prometheus client not initialized")
379387
}
380388
return newPrometheusUsageClient(
389+
logger.WithValues("client", "PrometheusUsageClient"),
381390
handle.GetPodsAssignedToNodeFunc(),
382391
handle.PrometheusClient(),
383392
metrics.Prometheus.Query,

0 commit comments

Comments
 (0)