Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Wait for alertmanagers to complete state sync before becoming ACTIVE. #4161

Merged
merged 2 commits into from
May 11, 2021
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions pkg/alertmanager/alertmanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -273,6 +273,15 @@ func New(cfg *Config, reg *prometheus.Registry) (*Alertmanager, error) {
return am, nil
}

func (am *Alertmanager) WaitInitialStateSync(ctx context.Context) error {
if service, ok := am.state.(services.Service); ok {
if err := service.AwaitRunning(ctx); err != nil {
return errors.Wrap(err, "failed to wait for ring-based replication service")
}
}
return nil
}

// clusterWait returns a function that inspects the current peer state and returns
// a duration of one base timeout for each peer with a higher ID than ourselves.
func clusterWait(position func() int, timeout time.Duration) func() time.Duration {
Expand Down
25 changes: 25 additions & 0 deletions pkg/alertmanager/multitenant.go
Original file line number Diff line number Diff line change
Expand Up @@ -478,6 +478,14 @@ func (am *MultitenantAlertmanager) starting(ctx context.Context) (err error) {
}

if am.cfg.ShardingEnabled {
// Make sure that all the alertmanagers we were initially configured with have
// fetched state from the replicas, before advertising as ACTIVE and letting
// them shut-down. This will reduce the possibility that we lose state when
// scaling up or down.
if err := am.waitInitialStateSync(ctx); err != nil {
return err
}

// With the initial sync now completed, we should have loaded all assigned alertmanager configurations to this instance. We can switch it to ACTIVE and start serving requests.
if err := am.ringLifecycler.ChangeState(ctx, ring.ACTIVE); err != nil {
return errors.Wrapf(err, "switch instance to %s in the ring", ring.ACTIVE)
Expand Down Expand Up @@ -663,6 +671,23 @@ func (am *MultitenantAlertmanager) loadAndSyncConfigs(ctx context.Context, syncR
return nil
}

func (am *MultitenantAlertmanager) waitInitialStateSync(ctx context.Context) error {
am.alertmanagersMtx.Lock()
ams := make([]*Alertmanager, 0, len(am.alertmanagers))
for _, userAM := range am.alertmanagers {
ams = append(ams, userAM)
}
am.alertmanagersMtx.Unlock()

for _, userAM := range ams {
if err := userAM.WaitInitialStateSync(ctx); err != nil {
return err
}
}

return nil
}

// stopping runs when MultitenantAlertmanager transitions to Stopping state.
func (am *MultitenantAlertmanager) stopping(_ error) error {
am.alertmanagersMtx.Lock()
Expand Down