Skip to content

Commit 345ec03

Browse files
alanprotpracucci
andauthored
max chunks per query limit shared between ingesters and storage gateways (#4260)
* max chunks per query limit shared between ingesters and storage gateways Signed-off-by: Alan Protasio <[email protected]> * Addressing comments Signed-off-by: Alan Protasio <[email protected]> * Addressing comments -2 Signed-off-by: Alan Protasio <[email protected]> * Addressing comments - 3 Signed-off-by: Alan Protasio <[email protected]> Co-authored-by: Marco Pracucci <[email protected]>
1 parent 64fb282 commit 345ec03

File tree

10 files changed

+102
-48
lines changed

10 files changed

+102
-48
lines changed

CHANGELOG.md

+2-1
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,8 @@
22

33
## master / unreleased
44

5+
* [CHANGE] Querier / ruler: Change `-querier.max-fetched-chunks-per-query` configuration to limit to maximum number of chunks that can be fetched in a single query. The number of chunks fetched by ingesters AND long-term storare combined should not exceed the value configured on `-querier.max-fetched-chunks-per-query`. #4260
6+
57
## 1.10.0-rc.0 / 2021-06-28
68

79
* [CHANGE] Enable strict JSON unmarshal for `pkg/util/validation.Limits` struct. The custom `UnmarshalJSON()` will now fail if the input has unknown fields. #4298
@@ -70,7 +72,6 @@
7072
* [BUGFIX] Store-gateway: when blocks sharding is enabled, do not load all blocks in each store-gateway in case of a cold startup, but load only blocks owned by the store-gateway replica. #4271
7173
* [BUGFIX] Memberlist: fix to setting the default configuration value for `-memberlist.retransmit-factor` when not provided. This should improve propagation delay of the ring state (including, but not limited to, tombstones). Note that if the configuration is already explicitly given, this fix has no effect. #4269
7274
* [BUGFIX] Querier: Fix issue where samples in a chunk might get skipped by batch iterator. #4218
73-
7475
## Blocksconvert
7576

7677
* [ENHANCEMENT] Scanner: add support for DynamoDB (v9 schema only). #3828

docs/configuration/config-file-reference.md

+3-5
Original file line numberDiff line numberDiff line change
@@ -4043,11 +4043,9 @@ The `limits_config` configures default and per-tenant limits imposed by Cortex s
40434043
[max_chunks_per_query: <int> | default = 2000000]
40444044
40454045
# Maximum number of chunks that can be fetched in a single query from ingesters
4046-
# and long-term storage: the total number of actual fetched chunks could be 2x
4047-
# the limit, being independently applied when querying ingesters and long-term
4048-
# storage. This limit is enforced in the ingester (if chunks streaming is
4049-
# enabled), querier, ruler and store-gateway. Takes precedence over the
4050-
# deprecated -store.query-chunk-limit. 0 to disable.
4046+
# and long-term storage. This limit is enforced in the querier, ruler and
4047+
# store-gateway. Takes precedence over the deprecated -store.query-chunk-limit.
4048+
# 0 to disable.
40514049
# CLI flag: -querier.max-fetched-chunks-per-query
40524050
[max_fetched_chunks_per_query: <int> | default = 0]
40534051

pkg/distributor/distributor_test.go

+4-2
Original file line numberDiff line numberDiff line change
@@ -912,6 +912,8 @@ func TestDistributor_QueryStream_ShouldReturnErrorIfMaxChunksPerQueryLimitIsReac
912912
shardByAllLabels: true,
913913
limits: limits,
914914
})
915+
916+
ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(0, 0, maxChunksLimit))
915917
defer stopAll(ds, r)
916918

917919
// Push a number of series below the max chunks limit. Each series has 1 sample,
@@ -957,7 +959,7 @@ func TestDistributor_QueryStream_ShouldReturnErrorIfMaxSeriesPerQueryLimitIsReac
957959
ctx := user.InjectOrgID(context.Background(), "user")
958960
limits := &validation.Limits{}
959961
flagext.DefaultValues(limits)
960-
ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(maxSeriesLimit, 0))
962+
ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(maxSeriesLimit, 0, 0))
961963

962964
// Prepare distributors.
963965
ds, _, r, _ := prepare(t, prepConfig{
@@ -1043,7 +1045,7 @@ func TestDistributor_QueryStream_ShouldReturnErrorIfMaxChunkBytesPerQueryLimitIs
10431045
var maxBytesLimit = (seriesToAdd) * responseChunkSize
10441046

10451047
// Update the limiter with the calculated limits.
1046-
ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(0, maxBytesLimit))
1048+
ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(0, maxBytesLimit, 0))
10471049

10481050
// Push a number of series below the max chunk bytes limit. Subtract one for the series added above.
10491051
writeReq = makeWriteRequest(0, seriesToAdd-1, 0)

pkg/distributor/query.go

+4-23
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@ package distributor
22

33
import (
44
"context"
5-
"fmt"
65
"io"
76
"sort"
87
"time"
@@ -11,7 +10,6 @@ import (
1110
"github.com/prometheus/common/model"
1211
"github.com/prometheus/prometheus/pkg/labels"
1312
"github.com/weaveworks/common/instrument"
14-
"go.uber.org/atomic"
1513

1614
"github.com/cortexproject/cortex/pkg/cortexpb"
1715
ingester_client "github.com/cortexproject/cortex/pkg/ingester/client"
@@ -24,10 +22,6 @@ import (
2422
"github.com/cortexproject/cortex/pkg/util/validation"
2523
)
2624

27-
var (
28-
errMaxChunksPerQueryLimit = "the query hit the max number of chunks limit while fetching chunks from ingesters for %s (limit: %d)"
29-
)
30-
3125
// Query multiple ingesters and returns a Matrix of samples.
3226
func (d *Distributor) Query(ctx context.Context, from, to model.Time, matchers ...*labels.Matcher) (model.Matrix, error) {
3327
var matrix model.Matrix
@@ -86,11 +80,6 @@ func (d *Distributor) QueryExemplars(ctx context.Context, from, to model.Time, m
8680
func (d *Distributor) QueryStream(ctx context.Context, from, to model.Time, matchers ...*labels.Matcher) (*ingester_client.QueryStreamResponse, error) {
8781
var result *ingester_client.QueryStreamResponse
8882
err := instrument.CollectedRequest(ctx, "Distributor.QueryStream", d.queryDuration, instrument.ErrorCode, func(ctx context.Context) error {
89-
userID, err := tenant.TenantID(ctx)
90-
if err != nil {
91-
return err
92-
}
93-
9483
req, err := ingester_client.ToQueryRequest(from, to, matchers)
9584
if err != nil {
9685
return err
@@ -101,7 +90,7 @@ func (d *Distributor) QueryStream(ctx context.Context, from, to model.Time, matc
10190
return err
10291
}
10392

104-
result, err = d.queryIngesterStream(ctx, userID, replicationSet, req)
93+
result, err = d.queryIngesterStream(ctx, replicationSet, req)
10594
if err != nil {
10695
return err
10796
}
@@ -290,10 +279,8 @@ func (d *Distributor) queryIngestersExemplars(ctx context.Context, replicationSe
290279
}
291280

292281
// queryIngesterStream queries the ingesters using the new streaming API.
293-
func (d *Distributor) queryIngesterStream(ctx context.Context, userID string, replicationSet ring.ReplicationSet, req *ingester_client.QueryRequest) (*ingester_client.QueryStreamResponse, error) {
282+
func (d *Distributor) queryIngesterStream(ctx context.Context, replicationSet ring.ReplicationSet, req *ingester_client.QueryRequest) (*ingester_client.QueryStreamResponse, error) {
294283
var (
295-
chunksLimit = d.limits.MaxChunksPerQueryFromIngesters(userID)
296-
chunksCount = atomic.Int32{}
297284
queryLimiter = limiter.QueryLimiterFromContextWithFallback(ctx)
298285
)
299286

@@ -327,14 +314,8 @@ func (d *Distributor) queryIngesterStream(ctx context.Context, userID string, re
327314
}
328315

329316
// Enforce the max chunks limits.
330-
if chunksLimit > 0 {
331-
if count := int(chunksCount.Add(int32(resp.ChunksCount()))); count > chunksLimit {
332-
// We expect to be always able to convert the label matchers back to Prometheus ones.
333-
// In case we fail (unexpected) the error will not include the matchers, but the core
334-
// logic doesn't break.
335-
matchers, _ := ingester_client.FromLabelMatchers(req.Matchers)
336-
return nil, validation.LimitError(fmt.Sprintf(errMaxChunksPerQueryLimit, util.LabelMatchersToString(matchers), chunksLimit))
337-
}
317+
if chunkLimitErr := queryLimiter.AddChunks(resp.ChunksCount()); chunkLimitErr != nil {
318+
return nil, validation.LimitError(chunkLimitErr.Error())
338319
}
339320

340321
for _, series := range resp.Chunkseries {

pkg/querier/blocks_store_queryable.go

+3
Original file line numberDiff line numberDiff line change
@@ -633,6 +633,9 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(
633633
if chunkBytesLimitErr := queryLimiter.AddChunkBytes(chunksSize); chunkBytesLimitErr != nil {
634634
return validation.LimitError(chunkBytesLimitErr.Error())
635635
}
636+
if chunkLimitErr := queryLimiter.AddChunks(len(s.Chunks)); chunkLimitErr != nil {
637+
return validation.LimitError(chunkLimitErr.Error())
638+
}
636639
}
637640

638641
if w := resp.GetWarning(); w != "" {

pkg/querier/blocks_store_queryable_test.go

+59-3
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) {
5151
metricNameLabel = labels.Label{Name: labels.MetricName, Value: metricName}
5252
series1Label = labels.Label{Name: "series", Value: "1"}
5353
series2Label = labels.Label{Name: "series", Value: "2"}
54-
noOpQueryLimiter = limiter.NewQueryLimiter(0, 0)
54+
noOpQueryLimiter = limiter.NewQueryLimiter(0, 0, 0)
5555
)
5656

5757
type valueResult struct {
@@ -454,6 +454,24 @@ func TestBlocksStoreQuerier_Select(t *testing.T) {
454454
queryLimiter: noOpQueryLimiter,
455455
expectedErr: validation.LimitError(fmt.Sprintf(errMaxChunksPerQueryLimit, fmt.Sprintf("{__name__=%q}", metricName), 1)),
456456
},
457+
"max chunks per query limit hit while fetching chunks at first attempt - global limit": {
458+
finderResult: bucketindex.Blocks{
459+
{ID: block1},
460+
{ID: block2},
461+
},
462+
storeSetResponses: []interface{}{
463+
map[BlocksStoreClient][]ulid.ULID{
464+
&storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{
465+
mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, minT, 1),
466+
mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, minT+1, 2),
467+
mockHintsResponse(block1, block2),
468+
}}: {block1, block2},
469+
},
470+
},
471+
limits: &blocksStoreLimitsMock{},
472+
queryLimiter: limiter.NewQueryLimiter(0, 0, 1),
473+
expectedErr: validation.LimitError(fmt.Sprintf(limiter.ErrMaxChunksPerQueryLimit, 1)),
474+
},
457475
"max chunks per query limit hit while fetching chunks during subsequent attempts": {
458476
finderResult: bucketindex.Blocks{
459477
{ID: block1},
@@ -492,6 +510,44 @@ func TestBlocksStoreQuerier_Select(t *testing.T) {
492510
queryLimiter: noOpQueryLimiter,
493511
expectedErr: validation.LimitError(fmt.Sprintf(errMaxChunksPerQueryLimit, fmt.Sprintf("{__name__=%q}", metricName), 3)),
494512
},
513+
"max chunks per query limit hit while fetching chunks during subsequent attempts - global": {
514+
finderResult: bucketindex.Blocks{
515+
{ID: block1},
516+
{ID: block2},
517+
{ID: block3},
518+
{ID: block4},
519+
},
520+
storeSetResponses: []interface{}{
521+
// First attempt returns a client whose response does not include all expected blocks.
522+
map[BlocksStoreClient][]ulid.ULID{
523+
&storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{
524+
mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, minT, 1),
525+
mockHintsResponse(block1),
526+
}}: {block1, block3},
527+
&storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: []*storepb.SeriesResponse{
528+
mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, minT, 2),
529+
mockHintsResponse(block2),
530+
}}: {block2, block4},
531+
},
532+
// Second attempt returns 1 missing block.
533+
map[BlocksStoreClient][]ulid.ULID{
534+
&storeGatewayClientMock{remoteAddr: "3.3.3.3", mockedSeriesResponses: []*storepb.SeriesResponse{
535+
mockSeriesResponse(labels.Labels{metricNameLabel, series1Label}, minT+1, 2),
536+
mockHintsResponse(block3),
537+
}}: {block3, block4},
538+
},
539+
// Third attempt returns the last missing block.
540+
map[BlocksStoreClient][]ulid.ULID{
541+
&storeGatewayClientMock{remoteAddr: "4.4.4.4", mockedSeriesResponses: []*storepb.SeriesResponse{
542+
mockSeriesResponse(labels.Labels{metricNameLabel, series2Label}, minT+1, 3),
543+
mockHintsResponse(block4),
544+
}}: {block4},
545+
},
546+
},
547+
limits: &blocksStoreLimitsMock{},
548+
queryLimiter: limiter.NewQueryLimiter(0, 0, 3),
549+
expectedErr: validation.LimitError(fmt.Sprintf(limiter.ErrMaxChunksPerQueryLimit, 3)),
550+
},
495551
"max series per query limit hit while fetching chunks": {
496552
finderResult: bucketindex.Blocks{
497553
{ID: block1},
@@ -507,7 +563,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) {
507563
},
508564
},
509565
limits: &blocksStoreLimitsMock{},
510-
queryLimiter: limiter.NewQueryLimiter(1, 0),
566+
queryLimiter: limiter.NewQueryLimiter(1, 0, 0),
511567
expectedErr: validation.LimitError(fmt.Sprintf(limiter.ErrMaxSeriesHit, 1)),
512568
},
513569
"max chunk bytes per query limit hit while fetching chunks": {
@@ -525,7 +581,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) {
525581
},
526582
},
527583
limits: &blocksStoreLimitsMock{maxChunksPerQuery: 1},
528-
queryLimiter: limiter.NewQueryLimiter(0, 8),
584+
queryLimiter: limiter.NewQueryLimiter(0, 8, 0),
529585
expectedErr: validation.LimitError(fmt.Sprintf(limiter.ErrMaxChunkBytesHit, 8)),
530586
},
531587
}

pkg/querier/querier.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -225,7 +225,7 @@ func NewQueryable(distributor QueryableWithFilter, stores []QueryableWithFilter,
225225
return nil, err
226226
}
227227

228-
ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(limits.MaxFetchedSeriesPerQuery(userID), limits.MaxFetchedChunkBytesPerQuery(userID)))
228+
ctx = limiter.AddQueryLimiterToContext(ctx, limiter.NewQueryLimiter(limits.MaxFetchedSeriesPerQuery(userID), limits.MaxFetchedChunkBytesPerQuery(userID), limits.MaxChunksPerQuery(userID)))
229229

230230
mint, maxt, err = validateQueryTimeRange(ctx, userID, mint, maxt, limits, cfg.MaxQueryIntoFuture)
231231
if err == errEmptyTimeRange {

pkg/util/limiter/query_limiter.go

+20-5
Original file line numberDiff line numberDiff line change
@@ -15,30 +15,34 @@ import (
1515
type queryLimiterCtxKey struct{}
1616

1717
var (
18-
ctxKey = &queryLimiterCtxKey{}
19-
ErrMaxSeriesHit = "the query hit the max number of series limit (limit: %d series)"
20-
ErrMaxChunkBytesHit = "the query hit the aggregated chunks size limit (limit: %d bytes)"
18+
ctxKey = &queryLimiterCtxKey{}
19+
ErrMaxSeriesHit = "the query hit the max number of series limit (limit: %d series)"
20+
ErrMaxChunkBytesHit = "the query hit the aggregated chunks size limit (limit: %d bytes)"
21+
ErrMaxChunksPerQueryLimit = "the query hit the max number of chunks limit (limit: %d chunks)"
2122
)
2223

2324
type QueryLimiter struct {
2425
uniqueSeriesMx sync.Mutex
2526
uniqueSeries map[model.Fingerprint]struct{}
2627

2728
chunkBytesCount atomic.Int64
29+
chunkCount atomic.Int64
2830

2931
maxSeriesPerQuery int
3032
maxChunkBytesPerQuery int
33+
maxChunksPerQuery int
3134
}
3235

3336
// NewQueryLimiter makes a new per-query limiter. Each query limiter
3437
// is configured using the `maxSeriesPerQuery` limit.
35-
func NewQueryLimiter(maxSeriesPerQuery, maxChunkBytesPerQuery int) *QueryLimiter {
38+
func NewQueryLimiter(maxSeriesPerQuery, maxChunkBytesPerQuery int, maxChunksPerQuery int) *QueryLimiter {
3639
return &QueryLimiter{
3740
uniqueSeriesMx: sync.Mutex{},
3841
uniqueSeries: map[model.Fingerprint]struct{}{},
3942

4043
maxSeriesPerQuery: maxSeriesPerQuery,
4144
maxChunkBytesPerQuery: maxChunkBytesPerQuery,
45+
maxChunksPerQuery: maxChunksPerQuery,
4246
}
4347
}
4448

@@ -52,7 +56,7 @@ func QueryLimiterFromContextWithFallback(ctx context.Context) *QueryLimiter {
5256
ql, ok := ctx.Value(ctxKey).(*QueryLimiter)
5357
if !ok {
5458
// If there's no limiter return a new unlimited limiter as a fallback
55-
ql = NewQueryLimiter(0, 0)
59+
ql = NewQueryLimiter(0, 0, 0)
5660
}
5761
return ql
5862
}
@@ -93,3 +97,14 @@ func (ql *QueryLimiter) AddChunkBytes(chunkSizeInBytes int) error {
9397
}
9498
return nil
9599
}
100+
101+
func (ql *QueryLimiter) AddChunks(count int) error {
102+
if ql.maxChunksPerQuery == 0 {
103+
return nil
104+
}
105+
106+
if ql.chunkCount.Add(int64(count)) > int64(ql.maxChunksPerQuery) {
107+
return fmt.Errorf(fmt.Sprintf(ErrMaxChunksPerQueryLimit, ql.maxChunksPerQuery))
108+
}
109+
return nil
110+
}

pkg/util/limiter/query_limiter_test.go

+4-4
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ func TestQueryLimiter_AddSeries_ShouldReturnNoErrorOnLimitNotExceeded(t *testing
2525
labels.MetricName: metricName + "_2",
2626
"series2": "1",
2727
})
28-
limiter = NewQueryLimiter(100, 0)
28+
limiter = NewQueryLimiter(100, 0, 0)
2929
)
3030
err := limiter.AddSeries(cortexpb.FromLabelsToLabelAdapters(series1))
3131
assert.NoError(t, err)
@@ -53,7 +53,7 @@ func TestQueryLimiter_AddSeriers_ShouldReturnErrorOnLimitExceeded(t *testing.T)
5353
labels.MetricName: metricName + "_2",
5454
"series2": "1",
5555
})
56-
limiter = NewQueryLimiter(1, 0)
56+
limiter = NewQueryLimiter(1, 0, 0)
5757
)
5858
err := limiter.AddSeries(cortexpb.FromLabelsToLabelAdapters(series1))
5959
require.NoError(t, err)
@@ -62,7 +62,7 @@ func TestQueryLimiter_AddSeriers_ShouldReturnErrorOnLimitExceeded(t *testing.T)
6262
}
6363

6464
func TestQueryLimiter_AddChunkBytes(t *testing.T) {
65-
var limiter = NewQueryLimiter(0, 100)
65+
var limiter = NewQueryLimiter(0, 100, 0)
6666

6767
err := limiter.AddChunkBytes(100)
6868
require.NoError(t, err)
@@ -84,7 +84,7 @@ func BenchmarkQueryLimiter_AddSeries(b *testing.B) {
8484
}
8585
b.ResetTimer()
8686

87-
limiter := NewQueryLimiter(b.N+1, 0)
87+
limiter := NewQueryLimiter(b.N+1, 0, 0)
8888
for _, s := range series {
8989
err := limiter.AddSeries(cortexpb.FromLabelsToLabelAdapters(s))
9090
assert.NoError(b, err)

pkg/util/validation/limits.go

+2-4
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) {
151151
f.IntVar(&l.MaxGlobalMetricsWithMetadataPerUser, "ingester.max-global-metadata-per-user", 0, "The maximum number of active metrics with metadata per user, across the cluster. 0 to disable. Supported only if -distributor.shard-by-all-labels is true.")
152152
f.IntVar(&l.MaxGlobalMetadataPerMetric, "ingester.max-global-metadata-per-metric", 0, "The maximum number of metadata per metric, across the cluster. 0 to disable.")
153153
f.IntVar(&l.MaxChunksPerQueryFromStore, "store.query-chunk-limit", 2e6, "Deprecated. Use -querier.max-fetched-chunks-per-query CLI flag and its respective YAML config option instead. Maximum number of chunks that can be fetched in a single query. This limit is enforced when fetching chunks from the long-term storage only. When running the Cortex chunks storage, this limit is enforced in the querier and ruler, while when running the Cortex blocks storage this limit is enforced in the querier, ruler and store-gateway. 0 to disable.")
154-
f.IntVar(&l.MaxChunksPerQuery, "querier.max-fetched-chunks-per-query", 0, "Maximum number of chunks that can be fetched in a single query from ingesters and long-term storage: the total number of actual fetched chunks could be 2x the limit, being independently applied when querying ingesters and long-term storage. This limit is enforced in the ingester (if chunks streaming is enabled), querier, ruler and store-gateway. Takes precedence over the deprecated -store.query-chunk-limit. 0 to disable.")
154+
f.IntVar(&l.MaxChunksPerQuery, "querier.max-fetched-chunks-per-query", 0, "Maximum number of chunks that can be fetched in a single query from ingesters and long-term storage. This limit is enforced in the querier, ruler and store-gateway. Takes precedence over the deprecated -store.query-chunk-limit. 0 to disable.")
155155
f.IntVar(&l.MaxFetchedSeriesPerQuery, "querier.max-fetched-series-per-query", 0, "The maximum number of unique series for which a query can fetch samples from each ingesters and blocks storage. This limit is enforced in the querier only when running Cortex with blocks storage. 0 to disable")
156156
f.IntVar(&l.MaxFetchedChunkBytesPerQuery, "querier.max-fetched-chunk-bytes-per-query", 0, "The maximum size of all chunks in bytes that a query can fetch from each ingester and storage. This limit is enforced in the querier and ruler only when running Cortex with blocks storage. 0 to disable.")
157157
f.Var(&l.MaxQueryLength, "store.max-query-length", "Limit the query time range (end - start time). This limit is enforced in the query-frontend (on the received query), in the querier (on the query possibly split by the query-frontend) and in the chunks storage. 0 to disable.")
@@ -398,9 +398,7 @@ func (o *Overrides) MaxChunksPerQueryFromStore(userID string) int {
398398
return o.getOverridesForUser(userID).MaxChunksPerQueryFromStore
399399
}
400400

401-
// MaxChunksPerQueryFromIngesters returns the maximum number of chunks allowed per query when fetching
402-
// chunks from ingesters.
403-
func (o *Overrides) MaxChunksPerQueryFromIngesters(userID string) int {
401+
func (o *Overrides) MaxChunksPerQuery(userID string) int {
404402
return o.getOverridesForUser(userID).MaxChunksPerQuery
405403
}
406404

0 commit comments

Comments
 (0)