Skip to content

Commit 2c72b1a

Browse files
authored
Merge pull request #299 from derailed/popeye/v0.21.2
Popeye v0.21.2
2 parents aa92a01 + 14ae249 commit 2c72b1a

File tree

11 files changed

+59
-11
lines changed

11 files changed

+59
-11
lines changed

Makefile

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
NAME := popeye
22
PACKAGE := github.com/derailed/$(NAME)
3-
VERSION := v0.21.1
3+
VERSION := v0.21.2
44
GIT := $(shell git rev-parse --short HEAD)
55
DATE := $(shell date +%FT%T%Z)
66
IMG_NAME := derailed/popeye

README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
<img src="https://github.com/derailed/popeye/raw/master/assets/popeye_logo.png" align="right" width="250" height="auto">
22

3-
# Popeye - A Kubernetes Live Cluster Linter
3+
# Popeye: Kubernetes Live Cluster Linter
44

55
Popeye is a utility that scans live Kubernetes clusters and reports potential issues with deployed resources and configurations.
66
As Kubernetes landscapes grows, it is becoming a challenge for a human to track the slew of manifests and policies that orchestrate a cluster.

change_logs/release_v0.21.2.md

+25
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
<img src="https://raw.githubusercontent.com/derailed/popeye/master/assets/popeye_logo.png" align="right" width="200" height="auto"/>
2+
3+
# Release v0.21.2
4+
5+
## Notes
6+
7+
Thank you to all that contributed with flushing out issues and enhancements for Popeye! I'll try to mark some of these issues as fixed. But if you don't mind grab the latest rev and see if we're happier with some of the fixes! If you've filed an issue please help me verify and close. Your support, kindness and awesome suggestions to make Popeye better is as ever very much noticed and appreciated!
8+
9+
This project offers a GitHub Sponsor button (over here 👆). As you well know this is not pimped out by big corps with deep pockets. If you feel `Popeye` is saving you cycles diagnosing potential cluster issues please consider sponsoring this project!! It does go a long way in keeping our servers lights on and beers in our fridge.
10+
11+
Also if you dig this tool, please make some noise on social! [@kitesurfer](https://twitter.com/kitesurfer)
12+
13+
---
14+
15+
## Maintenance Release
16+
17+
---
18+
19+
## Resolved Issues
20+
21+
. [#298](https://github.com/derailed/popeye/issues/298) Popeye showing errors for Complete cronjobs
22+
23+
---
24+
25+
<img src="https://raw.githubusercontent.com/derailed/popeye/master/assets/imhotep_logo.png" width="32" height="auto"/>&nbsp; © 2024 Imhotep Software LLC. All materials licensed under [Apache v2.0](http://www.apache.org/licenses/LICENSE-2.0)

internal/dao/ev.go

+5-2
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ import (
1111
"github.com/derailed/popeye/internal"
1212
"github.com/derailed/popeye/internal/client"
1313
"github.com/derailed/popeye/types"
14+
"github.com/rs/zerolog/log"
1415
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
1516
)
1617

@@ -26,7 +27,8 @@ type EventInfo struct {
2627
}
2728

2829
func (e EventInfo) IsIssue() bool {
29-
return e.Kind == WarnEvt || !strings.Contains(e.Reason, "Success")
30+
return e.Kind == WarnEvt ||
31+
(e.Reason != "Success" && e.Reason != "SawCompletedJob")
3032
}
3133

3234
type EventInfos []EventInfo
@@ -73,7 +75,8 @@ func EventsFor(ctx context.Context, gvr types.GVR, level, kind, fqn string) (Eve
7375
return nil, err
7476
}
7577
if len(oo) == 0 {
76-
return nil, fmt.Errorf("No events found %s", fqn)
78+
log.Debug().Msgf("No events found %s: %s", gvr, fqn)
79+
return nil, nil
7780
}
7881

7982
tt := oo[0].(*metav1.Table)

internal/issues/assets/codes.yaml

+4
Original file line numberDiff line numberDiff line change
@@ -126,6 +126,10 @@ codes:
126126
407:
127127
message: "%s references %s %q which does not exist"
128128
severity: 3
129+
666:
130+
message: "Lint internal error: %s"
131+
severity: 3
132+
129133

130134
# Pod controllers
131135
500:

internal/issues/codes_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ func TestCodesLoad(t *testing.T) {
1515
cc, err := issues.LoadCodes()
1616

1717
assert.Nil(t, err)
18-
assert.Equal(t, 114, len(cc.Glossary))
18+
assert.Equal(t, 115, len(cc.Glossary))
1919
assert.Equal(t, "No liveness probe", cc.Glossary[103].Message)
2020
assert.Equal(t, rules.WarnLevel, cc.Glossary[103].Severity)
2121
}

internal/issues/collector.go

+15-1
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,8 @@ import (
1313
"github.com/rs/zerolog/log"
1414
)
1515

16+
const errCode = 666
17+
1618
// Collector tracks linter issues and codes.
1719
type Collector struct {
1820
*config.Config
@@ -95,8 +97,20 @@ func (c *Collector) AddCode(ctx context.Context, code rules.ID, args ...interfac
9597
// AddErr adds a collection of errors.
9698
func (c *Collector) AddErr(ctx context.Context, errs ...error) {
9799
run := internal.MustExtractRunInfo(ctx)
100+
if c.codes == nil {
101+
for _, e := range errs {
102+
c.addIssue(run.Spec.FQN, New(run.SectionGVR, Root, rules.ErrorLevel, e.Error()))
103+
}
104+
return
105+
}
106+
107+
co, ok := c.codes.Glossary[errCode]
108+
if !ok {
109+
// BOZO!! refact once codes are in!!
110+
panic(fmt.Errorf("no codes found with id %d", errCode))
111+
}
98112
for _, e := range errs {
99-
c.addIssue(run.Spec.FQN, New(run.SectionGVR, Root, rules.ErrorLevel, e.Error()))
113+
c.addIssue(run.Spec.FQN, New(run.SectionGVR, Root, rules.ErrorLevel, co.Format(errCode, e.Error())))
100114
}
101115
}
102116

internal/lint/cronjob.go

+4-2
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ func (s *CronJob) checkUtilization(ctx context.Context, over bool, fqn string) {
9191
s.AddErr(ctx, err)
9292
return
9393
}
94-
mx := jobResourceUsage(ctx, s.db, s, jj)
94+
mx := jobResourceUsage(s.db, jj)
9595
if mx.RequestCPU.IsZero() && mx.RequestMEM.IsZero() {
9696
return
9797
}
@@ -105,13 +105,15 @@ func checkEvents(ctx context.Context, ii *issues.Collector, r internal.R, kind,
105105
ee, err := dao.EventsFor(ctx, internal.Glossary[r], kind, object, fqn)
106106
if err != nil {
107107
ii.AddErr(ctx, err)
108+
return
108109
}
110+
109111
for _, e := range ee.Issues() {
110112
ii.AddErr(ctx, errors.New(e))
111113
}
112114
}
113115

114-
func jobResourceUsage(ctx context.Context, dba *db.DB, c Collector, jobs []*batchv1.Job) ConsumptionMetrics {
116+
func jobResourceUsage(dba *db.DB, jobs []*batchv1.Job) ConsumptionMetrics {
115117
var mx ConsumptionMetrics
116118

117119
if len(jobs) == 0 {

internal/lint/dp_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,6 @@ func TestDPLint(t *testing.T) {
5353
assert.Equal(t, 2, len(ii))
5454
assert.Equal(t, `[POP-500] Zero scale detected`, ii[0].Message)
5555
assert.Equal(t, rules.WarnLevel, ii[0].Level)
56-
assert.Equal(t, `no pod selector given`, ii[1].Message)
56+
assert.Equal(t, `[POP-666] Lint internal error: no pod selector given`, ii[1].Message)
5757
assert.Equal(t, rules.ErrorLevel, ii[1].Level)
5858
}

internal/lint/ing_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ func TestIngLint(t *testing.T) {
5252
assert.Equal(t, 2, len(ii))
5353
assert.Equal(t, `[POP-1400] Ingress LoadBalancer port reported an error: boom`, ii[0].Message)
5454
assert.Equal(t, rules.ErrorLevel, ii[0].Level)
55-
assert.Equal(t, `Ingress local obj refs not supported`, ii[1].Message)
55+
assert.Equal(t, `[POP-666] Lint internal error: Ingress local obj refs not supported`, ii[1].Message)
5656
assert.Equal(t, rules.ErrorLevel, ii[1].Level)
5757

5858
ii = ing.Outcome()["default/ing6"]

internal/lint/job.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ func (s *Job) checkUtilization(ctx context.Context, over bool, fqn string) {
8282
s.AddErr(ctx, err)
8383
return
8484
}
85-
mx := jobResourceUsage(ctx, s.db, s, jj)
85+
mx := jobResourceUsage(s.db, jj)
8686
if mx.RequestCPU.IsZero() && mx.RequestMEM.IsZero() {
8787
return
8888
}

0 commit comments

Comments
 (0)