Skip to content

Commit c2ed27e

Browse files
authored
Using subring on getShardedRules when shuffle sharding (#4466)
* Using subring on getShardedRules when shuffle sharding Signed-off-by: Alan Protasio <[email protected]>
1 parent 523dde1 commit c2ed27e

File tree

3 files changed

+18
-8
lines changed

3 files changed

+18
-8
lines changed

CHANGELOG.md

+1
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@
3939
* [ENHANCEMENT] Updated Prometheus to include changes from prometheus/prometheus#9083. Now whenever `/labels` API calls include matchers, blocks store is queried for `LabelNames` with matchers instead of `Series` calls which was inefficient. #4380
4040
* [ENHANCEMENT] Exemplars are now emitted for all gRPC calls and many operations tracked by histograms. #4462
4141
* [ENHANCEMENT] New options `-server.http-listen-network` and `-server.grpc-listen-network` allow binding as 'tcp4' or 'tcp6'. #4462
42+
* [ENHANCEMENT] Rulers: Using shuffle sharding subring on GetRules API. #4466
4243
* [BUGFIX] Fixes a panic in the query-tee when comparing result. #4465
4344
* [BUGFIX] Frontend: Fixes @ modifier functions (start/end) when splitting queries by time. #4464
4445
* [BUGFIX] Compactor: compactor will no longer try to compact blocks that are already marked for deletion. Previously compactor would consider blocks marked for deletion within `-compactor.deletion-delay / 2` period as eligible for compaction. #4328

pkg/ruler/ruler.go

+9-3
Original file line numberDiff line numberDiff line change
@@ -651,7 +651,7 @@ func (r *Ruler) GetRules(ctx context.Context) ([]*GroupStateDesc, error) {
651651
}
652652

653653
if r.cfg.EnableSharding {
654-
return r.getShardedRules(ctx)
654+
return r.getShardedRules(ctx, userID)
655655
}
656656

657657
return r.getLocalRules(userID)
@@ -744,8 +744,14 @@ func (r *Ruler) getLocalRules(userID string) ([]*GroupStateDesc, error) {
744744
return groupDescs, nil
745745
}
746746

747-
func (r *Ruler) getShardedRules(ctx context.Context) ([]*GroupStateDesc, error) {
748-
rulers, err := r.ring.GetReplicationSetForOperation(RingOp)
747+
func (r *Ruler) getShardedRules(ctx context.Context, userID string) ([]*GroupStateDesc, error) {
748+
ring := ring.ReadRing(r.ring)
749+
750+
if shardSize := r.limits.RulerTenantShardSize(userID); shardSize > 0 && r.cfg.ShardingStrategy == util.ShardingStrategyShuffle {
751+
ring = r.ring.ShuffleShard(userID, shardSize)
752+
}
753+
754+
rulers, err := ring.GetReplicationSetForOperation(RingOp)
749755
if err != nil {
750756
return nil, err
751757
}

pkg/ruler/ruler_test.go

+8-5
Original file line numberDiff line numberDiff line change
@@ -427,11 +427,14 @@ func TestGetRules(t *testing.T) {
427427
require.NoError(t, err)
428428
require.Equal(t, len(allRulesByUser[u]), len(rules))
429429
if tc.sharding {
430-
mockPoolLClient := r.clientsPool.(*mockRulerClientsPool)
431-
432-
// Right now we are calling all rules even with shuffle sharding
433-
require.Equal(t, int32(len(rulerAddrMap)), mockPoolLClient.numberOfCalls.Load())
434-
mockPoolLClient.numberOfCalls.Store(0)
430+
mockPoolClient := r.clientsPool.(*mockRulerClientsPool)
431+
432+
if tc.shardingStrategy == util.ShardingStrategyShuffle {
433+
require.Equal(t, int32(tc.shuffleShardSize), mockPoolClient.numberOfCalls.Load())
434+
} else {
435+
require.Equal(t, int32(len(rulerAddrMap)), mockPoolClient.numberOfCalls.Load())
436+
}
437+
mockPoolClient.numberOfCalls.Store(0)
435438
}
436439
})
437440
}

0 commit comments

Comments
 (0)