@@ -105,6 +105,7 @@ type bucketStoreMetrics struct {
105
105
resultSeriesCount prometheus.Summary
106
106
chunkSizeBytes prometheus.Histogram
107
107
queriesDropped prometheus.Counter
108
+ queriesSeriesDropped prometheus.Counter
108
109
seriesRefetches prometheus.Counter
109
110
110
111
cachedPostingsCompressions * prometheus.CounterVec
@@ -190,6 +191,10 @@ func newBucketStoreMetrics(reg prometheus.Registerer) *bucketStoreMetrics {
190
191
Name : "thanos_bucket_store_queries_dropped_total" ,
191
192
Help : "Number of queries that were dropped due to the sample limit." ,
192
193
})
194
+ m .queriesSeriesDropped = promauto .With (reg ).NewCounter (prometheus.CounterOpts {
195
+ Name : "thanos_bucket_store_queries_series_dropped_total" ,
196
+ Help : "Number of queries that were dropped due to the series limit." ,
197
+ })
193
198
m .seriesRefetches = promauto .With (reg ).NewCounter (prometheus.CounterOpts {
194
199
Name : "thanos_bucket_store_series_refetches_total" ,
195
200
Help : fmt .Sprintf ("Total number of cases where %v bytes was not enough was to fetch series from index, resulting in refetch." , maxSeriesSize ),
@@ -276,6 +281,8 @@ type BucketStore struct {
276
281
277
282
// chunksLimiterFactory creates a new limiter used to limit the number of chunks fetched by each Series() call.
278
283
chunksLimiterFactory ChunksLimiterFactory
284
+ // seriesLimiterFactory creates a new limiter used to limit the number of touch series by each Series() call.
285
+ seriesLimiterFactory SeriesLimiterFactory
279
286
partitioner partitioner
280
287
281
288
filterConfig * FilterConfig
@@ -300,6 +307,7 @@ func NewBucketStore(
300
307
queryGate gate.Gate ,
301
308
maxChunkPoolBytes uint64 ,
302
309
chunksLimiterFactory ChunksLimiterFactory ,
310
+ seriesLimiterFactory SeriesLimiterFactory ,
303
311
debugLogging bool ,
304
312
blockSyncConcurrency int ,
305
313
filterConfig * FilterConfig ,
@@ -333,6 +341,7 @@ func NewBucketStore(
333
341
filterConfig : filterConfig ,
334
342
queryGate : queryGate ,
335
343
chunksLimiterFactory : chunksLimiterFactory ,
344
+ seriesLimiterFactory : seriesLimiterFactory ,
336
345
partitioner : gapBasedPartitioner {maxGapSize : partitionerMaxGapSize },
337
346
enableCompatibilityLabel : enableCompatibilityLabel ,
338
347
postingOffsetsInMemSampling : postingOffsetsInMemSampling ,
@@ -683,6 +692,7 @@ func blockSeries(
683
692
matchers []* labels.Matcher ,
684
693
req * storepb.SeriesRequest ,
685
694
chunksLimiter ChunksLimiter ,
695
+ seriesLimiter SeriesLimiter ,
686
696
) (storepb.SeriesSet , * queryStats , error ) {
687
697
ps , err := indexr .ExpandedPostings (matchers )
688
698
if err != nil {
@@ -693,6 +703,11 @@ func blockSeries(
693
703
return storepb .EmptySeriesSet (), indexr .stats , nil
694
704
}
695
705
706
+ // Reserve seriesLimiter
707
+ if err := seriesLimiter .Reserve (uint64 (len (ps ))); err != nil {
708
+ return nil , nil , errors .Wrap (err , "exceeded series limit" )
709
+ }
710
+
696
711
// Preload all series index data.
697
712
// TODO(bwplotka): Consider not keeping all series in memory all the time.
698
713
// TODO(bwplotka): Do lazy loading in one step as `ExpandingPostings` method.
@@ -887,6 +902,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie
887
902
resHints = & hintspb.SeriesResponseHints {}
888
903
reqBlockMatchers []* labels.Matcher
889
904
chunksLimiter = s .chunksLimiterFactory (s .metrics .queriesDropped )
905
+ seriesLimiter = s .seriesLimiterFactory (s .metrics .queriesSeriesDropped )
890
906
)
891
907
892
908
if req .Hints != nil {
@@ -942,6 +958,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie
942
958
blockMatchers ,
943
959
req ,
944
960
chunksLimiter ,
961
+ seriesLimiter ,
945
962
)
946
963
if err != nil {
947
964
return errors .Wrapf (err , "fetch series for block %s" , b .meta .ULID )
0 commit comments