Skip to content

Commit 5290137

Browse files
authored
First cut at metrics for alertmanager sharding operation. (#4149)
* First cut at metrics for alertmanager sharding operation. This commit adds a number of metrics to help track the operation of the alertmanager sharding, specifically around the handling of the state. - `cortex_alertmanager_fetch_replica_state_total` - `cortex_alertmanager_fetch_replica_state_failed_total` - `cortex_alertmanager_state_initial_sync_total` - `cortex_alertmanager_state_initial_sync_completed_total` - `cortex_alertmanager_state_initial_sync_duration_seconds` - `cortex_alertmanager_state_persist_total` - `cortex_alertmanager_state_persist_failed_total` Note this complements the already existing metrics: - `cortex_alertmanager_partial_state_merges_total` - `cortex_alertmanager_partial_state_merges_failed_total` - `cortex_alertmanager_state_replication_total` - `cortex_alertmanager_state_replication_failed_total` Overly detailed timing metrics have not been included, instead opting for just a single (non per-user) histogram for the duration of the initial state operation. Timings for storage read/write are not included as they are already provided from the bucket client. Signed-off-by: Steve Simpson <[email protected]> * Update Changelog. Signed-off-by: Steve Simpson <[email protected]> * Review comments. Signed-off-by: Steve Simpson <[email protected]> * Review comments. Signed-off-by: Steve Simpson <[email protected]> * Update Changelog. Signed-off-by: Steve Simpson <[email protected]> * Review comments. Signed-off-by: Steve Simpson <[email protected]>
1 parent ba93ac0 commit 5290137

8 files changed

+220
-11
lines changed

CHANGELOG.md

+8
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,14 @@
66
* [CHANGE] Alertmanager: allowed to configure the experimental receivers firewall on a per-tenant basis. The following CLI flags (and their respective YAML config options) have been changed and moved to the limits config section: #4143
77
- `-alertmanager.receivers-firewall.block.cidr-networks` renamed to `-alertmanager.receivers-firewall-block-cidr-networks`
88
- `-alertmanager.receivers-firewall.block.private-addresses` renamed to `-alertmanager.receivers-firewall-block-private-addresses`
9+
* [ENHANCEMENT] Alertmanager: introduced new metrics to monitor operation when using `-alertmanager.sharding-enabled`: #4149
10+
* `cortex_alertmanager_state_fetch_replica_state_total`
11+
* `cortex_alertmanager_state_fetch_replica_state_failed_total`
12+
* `cortex_alertmanager_state_initial_sync_total`
13+
* `cortex_alertmanager_state_initial_sync_completed_total`
14+
* `cortex_alertmanager_state_initial_sync_duration_seconds`
15+
* `cortex_alertmanager_state_persist_total`
16+
* `cortex_alertmanager_state_persist_failed_total`
917

1018
## 1.9.0 in progress
1119

pkg/alertmanager/alertmanager.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,7 @@ func New(cfg *Config, reg *prometheus.Registry) (*Alertmanager, error) {
171171
level.Debug(am.logger).Log("msg", "starting tenant alertmanager with ring-based replication")
172172
state := newReplicatedStates(cfg.UserID, cfg.ReplicationFactor, cfg.Replicator, cfg.Store, am.logger, am.registry)
173173
am.state = state
174-
am.persister = newStatePersister(cfg.PersisterConfig, cfg.UserID, state, cfg.Store, am.logger)
174+
am.persister = newStatePersister(cfg.PersisterConfig, cfg.UserID, state, cfg.Store, am.logger, am.registry)
175175
} else {
176176
level.Debug(am.logger).Log("msg", "starting tenant alertmanager without replication")
177177
am.state = &NilPeer{}

pkg/alertmanager/alertmanager_metrics.go

+53-4
Original file line numberDiff line numberDiff line change
@@ -47,10 +47,17 @@ type alertmanagerMetrics struct {
4747
// The alertmanager config hash.
4848
configHashValue *prometheus.Desc
4949

50-
partialMerges *prometheus.Desc
51-
partialMergesFailed *prometheus.Desc
52-
replicationTotal *prometheus.Desc
53-
replicationFailed *prometheus.Desc
50+
partialMerges *prometheus.Desc
51+
partialMergesFailed *prometheus.Desc
52+
replicationTotal *prometheus.Desc
53+
replicationFailed *prometheus.Desc
54+
fetchReplicaStateTotal *prometheus.Desc
55+
fetchReplicaStateFailed *prometheus.Desc
56+
initialSyncTotal *prometheus.Desc
57+
initialSyncCompleted *prometheus.Desc
58+
initialSyncDuration *prometheus.Desc
59+
persistTotal *prometheus.Desc
60+
persistFailed *prometheus.Desc
5461
}
5562

5663
func newAlertmanagerMetrics() *alertmanagerMetrics {
@@ -168,6 +175,34 @@ func newAlertmanagerMetrics() *alertmanagerMetrics {
168175
"cortex_alertmanager_state_replication_failed_total",
169176
"Number of times we have failed to replicate a state to other alertmanagers",
170177
[]string{"user"}, nil),
178+
fetchReplicaStateTotal: prometheus.NewDesc(
179+
"cortex_alertmanager_state_fetch_replica_state_total",
180+
"Number of times we have tried to read and merge the full state from another replica.",
181+
nil, nil),
182+
fetchReplicaStateFailed: prometheus.NewDesc(
183+
"cortex_alertmanager_state_fetch_replica_state_failed_total",
184+
"Number of times we have failed to read and merge the full state from another replica.",
185+
nil, nil),
186+
initialSyncTotal: prometheus.NewDesc(
187+
"cortex_alertmanager_state_initial_sync_total",
188+
"Number of times we have tried to sync initial state from peers or storage.",
189+
nil, nil),
190+
initialSyncCompleted: prometheus.NewDesc(
191+
"cortex_alertmanager_state_initial_sync_completed_total",
192+
"Number of times we have completed syncing initial state for each possible outcome.",
193+
[]string{"outcome"}, nil),
194+
initialSyncDuration: prometheus.NewDesc(
195+
"cortex_alertmanager_state_initial_sync_duration_seconds",
196+
"Time spent syncing initial state from peers or storage.",
197+
nil, nil),
198+
persistTotal: prometheus.NewDesc(
199+
"cortex_alertmanager_state_persist_total",
200+
"Number of times we have tried to persist the running state to storage.",
201+
nil, nil),
202+
persistFailed: prometheus.NewDesc(
203+
"cortex_alertmanager_state_persist_failed_total",
204+
"Number of times we have failed to persist the running state to storage.",
205+
nil, nil),
171206
}
172207
}
173208

@@ -210,6 +245,13 @@ func (m *alertmanagerMetrics) Describe(out chan<- *prometheus.Desc) {
210245
out <- m.partialMergesFailed
211246
out <- m.replicationTotal
212247
out <- m.replicationFailed
248+
out <- m.fetchReplicaStateTotal
249+
out <- m.fetchReplicaStateFailed
250+
out <- m.initialSyncTotal
251+
out <- m.initialSyncCompleted
252+
out <- m.initialSyncDuration
253+
out <- m.persistTotal
254+
out <- m.persistFailed
213255
}
214256

215257
func (m *alertmanagerMetrics) Collect(out chan<- prometheus.Metric) {
@@ -248,4 +290,11 @@ func (m *alertmanagerMetrics) Collect(out chan<- prometheus.Metric) {
248290
data.SendSumOfCountersPerUser(out, m.partialMergesFailed, "alertmanager_partial_state_merges_failed_total")
249291
data.SendSumOfCountersPerUser(out, m.replicationTotal, "alertmanager_state_replication_total")
250292
data.SendSumOfCountersPerUser(out, m.replicationFailed, "alertmanager_state_replication_failed_total")
293+
data.SendSumOfCounters(out, m.fetchReplicaStateTotal, "alertmanager_state_fetch_replica_state_total")
294+
data.SendSumOfCounters(out, m.fetchReplicaStateFailed, "alertmanager_state_fetch_replica_state_failed_total")
295+
data.SendSumOfCounters(out, m.initialSyncTotal, "alertmanager_state_initial_sync_total")
296+
data.SendSumOfCountersWithLabels(out, m.initialSyncCompleted, "alertmanager_state_initial_sync_completed_total", "outcome")
297+
data.SendSumOfHistograms(out, m.initialSyncDuration, "alertmanager_state_initial_sync_duration_seconds")
298+
data.SendSumOfCounters(out, m.persistTotal, "alertmanager_state_persist_total")
299+
data.SendSumOfCounters(out, m.persistFailed, "alertmanager_state_persist_failed_total")
251300
}

pkg/alertmanager/alertmanager_metrics_test.go

+61
Original file line numberDiff line numberDiff line change
@@ -254,6 +254,26 @@ func TestAlertmanagerMetricsStore(t *testing.T) {
254254
# HELP cortex_alertmanager_silences_snapshot_size_bytes Size of the last silence snapshot in bytes.
255255
# TYPE cortex_alertmanager_silences_snapshot_size_bytes gauge
256256
cortex_alertmanager_silences_snapshot_size_bytes 111
257+
# HELP cortex_alertmanager_state_fetch_replica_state_failed_total Number of times we have failed to read and merge the full state from another replica.
258+
# TYPE cortex_alertmanager_state_fetch_replica_state_failed_total counter
259+
cortex_alertmanager_state_fetch_replica_state_failed_total 0
260+
# HELP cortex_alertmanager_state_fetch_replica_state_total Number of times we have tried to read and merge the full state from another replica.
261+
# TYPE cortex_alertmanager_state_fetch_replica_state_total counter
262+
cortex_alertmanager_state_fetch_replica_state_total 0
263+
# HELP cortex_alertmanager_state_initial_sync_duration_seconds Time spent syncing initial state from peers or storage.
264+
# TYPE cortex_alertmanager_state_initial_sync_duration_seconds histogram
265+
cortex_alertmanager_state_initial_sync_duration_seconds_bucket{le="+Inf"} 0
266+
cortex_alertmanager_state_initial_sync_duration_seconds_sum 0
267+
cortex_alertmanager_state_initial_sync_duration_seconds_count 0
268+
# HELP cortex_alertmanager_state_initial_sync_total Number of times we have tried to sync initial state from peers or storage.
269+
# TYPE cortex_alertmanager_state_initial_sync_total counter
270+
cortex_alertmanager_state_initial_sync_total 0
271+
# HELP cortex_alertmanager_state_persist_failed_total Number of times we have failed to persist the running state to storage.
272+
# TYPE cortex_alertmanager_state_persist_failed_total counter
273+
cortex_alertmanager_state_persist_failed_total 0
274+
# HELP cortex_alertmanager_state_persist_total Number of times we have tried to persist the running state to storage.
275+
# TYPE cortex_alertmanager_state_persist_total counter
276+
cortex_alertmanager_state_persist_total 0
257277
`))
258278
require.NoError(t, err)
259279
}
@@ -517,6 +537,26 @@ func TestAlertmanagerMetricsRemoval(t *testing.T) {
517537
# HELP cortex_alertmanager_silences_snapshot_size_bytes Size of the last silence snapshot in bytes.
518538
# TYPE cortex_alertmanager_silences_snapshot_size_bytes gauge
519539
cortex_alertmanager_silences_snapshot_size_bytes 111
540+
# HELP cortex_alertmanager_state_fetch_replica_state_failed_total Number of times we have failed to read and merge the full state from another replica.
541+
# TYPE cortex_alertmanager_state_fetch_replica_state_failed_total counter
542+
cortex_alertmanager_state_fetch_replica_state_failed_total 0
543+
# HELP cortex_alertmanager_state_fetch_replica_state_total Number of times we have tried to read and merge the full state from another replica.
544+
# TYPE cortex_alertmanager_state_fetch_replica_state_total counter
545+
cortex_alertmanager_state_fetch_replica_state_total 0
546+
# HELP cortex_alertmanager_state_initial_sync_duration_seconds Time spent syncing initial state from peers or storage.
547+
# TYPE cortex_alertmanager_state_initial_sync_duration_seconds histogram
548+
cortex_alertmanager_state_initial_sync_duration_seconds_bucket{le="+Inf"} 0
549+
cortex_alertmanager_state_initial_sync_duration_seconds_sum 0
550+
cortex_alertmanager_state_initial_sync_duration_seconds_count 0
551+
# HELP cortex_alertmanager_state_initial_sync_total Number of times we have tried to sync initial state from peers or storage.
552+
# TYPE cortex_alertmanager_state_initial_sync_total counter
553+
cortex_alertmanager_state_initial_sync_total 0
554+
# HELP cortex_alertmanager_state_persist_failed_total Number of times we have failed to persist the running state to storage.
555+
# TYPE cortex_alertmanager_state_persist_failed_total counter
556+
cortex_alertmanager_state_persist_failed_total 0
557+
# HELP cortex_alertmanager_state_persist_total Number of times we have tried to persist the running state to storage.
558+
# TYPE cortex_alertmanager_state_persist_total counter
559+
cortex_alertmanager_state_persist_total 0
520560
`))
521561
require.NoError(t, err)
522562

@@ -727,6 +767,27 @@ func TestAlertmanagerMetricsRemoval(t *testing.T) {
727767
# HELP cortex_alertmanager_silences_snapshot_size_bytes Size of the last silence snapshot in bytes.
728768
# TYPE cortex_alertmanager_silences_snapshot_size_bytes gauge
729769
cortex_alertmanager_silences_snapshot_size_bytes 11
770+
771+
# HELP cortex_alertmanager_state_fetch_replica_state_failed_total Number of times we have failed to read and merge the full state from another replica.
772+
# TYPE cortex_alertmanager_state_fetch_replica_state_failed_total counter
773+
cortex_alertmanager_state_fetch_replica_state_failed_total 0
774+
# HELP cortex_alertmanager_state_fetch_replica_state_total Number of times we have tried to read and merge the full state from another replica.
775+
# TYPE cortex_alertmanager_state_fetch_replica_state_total counter
776+
cortex_alertmanager_state_fetch_replica_state_total 0
777+
# HELP cortex_alertmanager_state_initial_sync_duration_seconds Time spent syncing initial state from peers or storage.
778+
# TYPE cortex_alertmanager_state_initial_sync_duration_seconds histogram
779+
cortex_alertmanager_state_initial_sync_duration_seconds_bucket{le="+Inf"} 0
780+
cortex_alertmanager_state_initial_sync_duration_seconds_sum 0
781+
cortex_alertmanager_state_initial_sync_duration_seconds_count 0
782+
# HELP cortex_alertmanager_state_initial_sync_total Number of times we have tried to sync initial state from peers or storage.
783+
# TYPE cortex_alertmanager_state_initial_sync_total counter
784+
cortex_alertmanager_state_initial_sync_total 0
785+
# HELP cortex_alertmanager_state_persist_failed_total Number of times we have failed to persist the running state to storage.
786+
# TYPE cortex_alertmanager_state_persist_failed_total counter
787+
cortex_alertmanager_state_persist_failed_total 0
788+
# HELP cortex_alertmanager_state_persist_total Number of times we have tried to persist the running state to storage.
789+
# TYPE cortex_alertmanager_state_persist_total counter
790+
cortex_alertmanager_state_persist_total 0
730791
`))
731792
require.NoError(t, err)
732793
}

pkg/alertmanager/state_persister.go

+25-4
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,8 @@ import (
99
"github.com/go-kit/kit/log/level"
1010
"github.com/pkg/errors"
1111
"github.com/prometheus/alertmanager/cluster/clusterpb"
12+
"github.com/prometheus/client_golang/prometheus"
13+
"github.com/prometheus/client_golang/prometheus/promauto"
1214

1315
"github.com/cortexproject/cortex/pkg/alertmanager/alertspb"
1416
"github.com/cortexproject/cortex/pkg/alertmanager/alertstore"
@@ -53,17 +55,28 @@ type statePersister struct {
5355
logger log.Logger
5456

5557
timeout time.Duration
58+
59+
persistTotal prometheus.Counter
60+
persistFailed prometheus.Counter
5661
}
5762

5863
// newStatePersister creates a new state persister.
59-
func newStatePersister(cfg PersisterConfig, userID string, state PersistableState, store alertstore.AlertStore, l log.Logger) *statePersister {
64+
func newStatePersister(cfg PersisterConfig, userID string, state PersistableState, store alertstore.AlertStore, l log.Logger, r prometheus.Registerer) *statePersister {
6065

6166
s := &statePersister{
6267
state: state,
6368
store: store,
6469
userID: userID,
6570
logger: l,
6671
timeout: defaultPersistTimeout,
72+
persistTotal: promauto.With(r).NewCounter(prometheus.CounterOpts{
73+
Name: "alertmanager_state_persist_total",
74+
Help: "Number of times we have tried to persist the running state to remote storage.",
75+
}),
76+
persistFailed: promauto.With(r).NewCounter(prometheus.CounterOpts{
77+
Name: "alertmanager_state_persist_failed_total",
78+
Help: "Number of times we have failed to persist the running state to remote storage.",
79+
}),
6780
}
6881

6982
s.Service = services.NewTimerService(cfg.Interval, s.starting, s.iteration, nil)
@@ -84,15 +97,23 @@ func (s *statePersister) iteration(ctx context.Context) error {
8497
return nil
8598
}
8699

87-
func (s *statePersister) persist(ctx context.Context) error {
100+
func (s *statePersister) persist(ctx context.Context) (err error) {
88101
// Only the replica at position zero should write the state.
89102
if s.state.Position() != 0 {
90103
return nil
91104
}
92105

106+
s.persistTotal.Inc()
107+
defer func() {
108+
if err != nil {
109+
s.persistFailed.Inc()
110+
}
111+
}()
112+
93113
level.Debug(s.logger).Log("msg", "persisting state", "user", s.userID)
94114

95-
fs, err := s.state.GetFullState()
115+
var fs *clusterpb.FullState
116+
fs, err = s.state.GetFullState()
96117
if err != nil {
97118
return err
98119
}
@@ -101,7 +122,7 @@ func (s *statePersister) persist(ctx context.Context) error {
101122
defer cancel()
102123

103124
desc := alertspb.FullStateDesc{State: fs}
104-
if err := s.store.SetFullState(ctx, s.userID, desc); err != nil {
125+
if err = s.store.SetFullState(ctx, s.userID, desc); err != nil {
105126
return err
106127
}
107128

pkg/alertmanager/state_persister_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ func makeTestStatePersister(t *testing.T, position int, userID string) (*fakePer
8787
store := &fakeStore{}
8888
cfg := PersisterConfig{Interval: 1 * time.Second}
8989

90-
s := newStatePersister(cfg, userID, state, store, log.NewNopLogger())
90+
s := newStatePersister(cfg, userID, state, store, log.NewNopLogger(), nil)
9191

9292
require.NoError(t, s.StartAsync(context.Background()))
9393
t.Cleanup(func() {

pkg/alertmanager/state_replication.go

+46
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,12 @@ import (
2323
const (
2424
defaultSettleReadTimeout = 15 * time.Second
2525
defaultStoreReadTimeout = 15 * time.Second
26+
27+
// Initial sync outcome label values.
28+
syncFromReplica = "from-replica"
29+
syncFromStorage = "from-storage"
30+
syncUserNotFound = "user-not-found"
31+
syncFailed = "failed"
2632
)
2733

2834
// state represents the Alertmanager silences and notification log internal state.
@@ -47,6 +53,11 @@ type state struct {
4753
partialStateMergesFailed *prometheus.CounterVec
4854
stateReplicationTotal *prometheus.CounterVec
4955
stateReplicationFailed *prometheus.CounterVec
56+
fetchReplicaStateTotal prometheus.Counter
57+
fetchReplicaStateFailed prometheus.Counter
58+
initialSyncTotal prometheus.Counter
59+
initialSyncCompleted *prometheus.CounterVec
60+
initialSyncDuration prometheus.Histogram
5061

5162
msgc chan *clusterpb.Part
5263
}
@@ -81,7 +92,32 @@ func newReplicatedStates(userID string, rf int, re Replicator, st alertstore.Ale
8192
Name: "alertmanager_state_replication_failed_total",
8293
Help: "Number of times we have failed to replicate a state to other alertmanagers.",
8394
}, []string{"key"}),
95+
fetchReplicaStateTotal: promauto.With(r).NewCounter(prometheus.CounterOpts{
96+
Name: "alertmanager_state_fetch_replica_state_total",
97+
Help: "Number of times we have tried to read and merge the full state from another replica.",
98+
}),
99+
fetchReplicaStateFailed: promauto.With(r).NewCounter(prometheus.CounterOpts{
100+
Name: "alertmanager_state_fetch_replica_state_failed_total",
101+
Help: "Number of times we have failed to read and merge the full state from another replica.",
102+
}),
103+
initialSyncTotal: promauto.With(r).NewCounter(prometheus.CounterOpts{
104+
Name: "alertmanager_state_initial_sync_total",
105+
Help: "Number of times we have tried to sync initial state from peers or remote storage.",
106+
}),
107+
initialSyncCompleted: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
108+
Name: "alertmanager_state_initial_sync_completed_total",
109+
Help: "Number of times we have completed syncing initial state for each possible outcome.",
110+
}, []string{"outcome"}),
111+
initialSyncDuration: promauto.With(r).NewHistogram(prometheus.HistogramOpts{
112+
Name: "alertmanager_state_initial_sync_duration_seconds",
113+
Help: "Time spent syncing initial state from peers or remote storage.",
114+
Buckets: prometheus.ExponentialBuckets(0.008, 4, 7),
115+
}),
84116
}
117+
s.initialSyncCompleted.WithLabelValues(syncFromReplica)
118+
s.initialSyncCompleted.WithLabelValues(syncFromStorage)
119+
s.initialSyncCompleted.WithLabelValues(syncUserNotFound)
120+
s.initialSyncCompleted.WithLabelValues(syncFailed)
85121

86122
s.Service = services.NewBasicService(s.starting, s.running, nil)
87123

@@ -154,6 +190,10 @@ func (s *state) GetFullState() (*clusterpb.FullState, error) {
154190
// starting waits until the alertmanagers are ready (and sets the appropriate internal state when it is).
155191
// The idea is that we don't want to start working" before we get a chance to know most of the notifications and/or silences.
156192
func (s *state) starting(ctx context.Context) error {
193+
s.initialSyncTotal.Inc()
194+
timer := prometheus.NewTimer(s.initialSyncDuration)
195+
defer timer.ObserveDuration()
196+
157197
level.Info(s.logger).Log("msg", "Waiting for notification and silences to settle...")
158198

159199
// If the replication factor is <= 1, there is nowhere to obtain the state from.
@@ -166,13 +206,16 @@ func (s *state) starting(ctx context.Context) error {
166206
readCtx, cancel := context.WithTimeout(ctx, s.settleReadTimeout)
167207
defer cancel()
168208

209+
s.fetchReplicaStateTotal.Inc()
169210
fullStates, err := s.replicator.ReadFullStateForUser(readCtx, s.userID)
170211
if err == nil {
171212
if err = s.mergeFullStates(fullStates); err == nil {
172213
level.Info(s.logger).Log("msg", "state settled; proceeding")
214+
s.initialSyncCompleted.WithLabelValues(syncFromReplica).Inc()
173215
return nil
174216
}
175217
}
218+
s.fetchReplicaStateFailed.Inc()
176219

177220
level.Info(s.logger).Log("msg", "state not settled; trying to read from storage", "err", err)
178221

@@ -183,16 +226,19 @@ func (s *state) starting(ctx context.Context) error {
183226
fullState, err := s.store.GetFullState(storeReadCtx, s.userID)
184227
if errors.Is(err, alertspb.ErrNotFound) {
185228
level.Info(s.logger).Log("msg", "no state for user in storage; proceeding", "user", s.userID)
229+
s.initialSyncCompleted.WithLabelValues(syncUserNotFound).Inc()
186230
return nil
187231
}
188232
if err == nil {
189233
if err = s.mergeFullStates([]*clusterpb.FullState{fullState.State}); err == nil {
190234
level.Info(s.logger).Log("msg", "state read from storage; proceeding")
235+
s.initialSyncCompleted.WithLabelValues(syncFromStorage).Inc()
191236
return nil
192237
}
193238
}
194239

195240
level.Warn(s.logger).Log("msg", "failed to read state from storage; continuing anyway", "err", err)
241+
s.initialSyncCompleted.WithLabelValues(syncFailed).Inc()
196242

197243
return nil
198244
}

0 commit comments

Comments
 (0)