Skip to content

Commit e4b1541

Browse files
committed
Ensured index cache is best effort, refactored tests, validated edge cases.
Fixes #651 Current size also includes slice header. Signed-off-by: Bartek Plotka <[email protected]>
1 parent da70cb0 commit e4b1541

File tree

7 files changed

+624
-316
lines changed

7 files changed

+624
-316
lines changed

CHANGELOG.md

+3
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,9 @@ We use *breaking* word for marking changes that are not backward compatible (rel
1616

1717
- [#1070](https://github.com/improbable-eng/thanos/pull/1070) Downsampling works back again. Deferred closer errors are now properly captured.
1818

19+
### Changed
20+
21+
- [#1073](https://github.com/improbable-eng/thanos/pull/1073) Store: index cache for requests. It now calculates the size properly (includes slice header), has anti-deadlock safeguard and reports more metrics.
1922

2023
## [v0.4.0-rc.0](https://github.com/improbable-eng/thanos/releases/tag/v0.4.0-rc.0) - 2019.04.18
2124

pkg/store/bucket.go

+18-10
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ import (
2626
"github.com/improbable-eng/thanos/pkg/objstore"
2727
"github.com/improbable-eng/thanos/pkg/pool"
2828
"github.com/improbable-eng/thanos/pkg/runutil"
29+
storecache "github.com/improbable-eng/thanos/pkg/store/cache"
2930
"github.com/improbable-eng/thanos/pkg/store/storepb"
3031
"github.com/improbable-eng/thanos/pkg/strutil"
3132
"github.com/improbable-eng/thanos/pkg/tracing"
@@ -182,7 +183,7 @@ type BucketStore struct {
182183
metrics *bucketStoreMetrics
183184
bucket objstore.BucketReader
184185
dir string
185-
indexCache *indexCache
186+
indexCache *storecache.IndexCache
186187
chunkPool *pool.BytesPool
187188

188189
// Sets of blocks that have the same labels. They are indexed by a hash over their label set.
@@ -225,10 +226,17 @@ func NewBucketStore(
225226
return nil, errors.Errorf("max concurrency value cannot be lower than 0 (got %v)", maxConcurrent)
226227
}
227228

228-
indexCache, err := newIndexCache(reg, indexCacheSizeBytes)
229+
// TODO(bwplotka): Add as a flag?
230+
maxItemSizeBytes := indexCacheSizeBytes / 2
231+
232+
indexCache, err := storecache.NewIndexCache(logger, reg, storecache.Opts{
233+
MaxSizeBytes: indexCacheSizeBytes,
234+
MaxItemSizeBytes: maxItemSizeBytes,
235+
})
229236
if err != nil {
230237
return nil, errors.Wrap(err, "create index cache")
231238
}
239+
232240
chunkPool, err := pool.NewBytesPool(2e5, 50e6, 2, maxChunkPoolBytes)
233241
if err != nil {
234242
return nil, errors.Wrap(err, "create chunk pool")
@@ -1058,7 +1066,7 @@ type bucketBlock struct {
10581066
bucket objstore.BucketReader
10591067
meta *metadata.Meta
10601068
dir string
1061-
indexCache *indexCache
1069+
indexCache *storecache.IndexCache
10621070
chunkPool *pool.BytesPool
10631071

10641072
indexVersion int
@@ -1081,7 +1089,7 @@ func newBucketBlock(
10811089
bkt objstore.BucketReader,
10821090
id ulid.ULID,
10831091
dir string,
1084-
indexCache *indexCache,
1092+
indexCache *storecache.IndexCache,
10851093
chunkPool *pool.BytesPool,
10861094
p partitioner,
10871095
) (b *bucketBlock, err error) {
@@ -1241,13 +1249,13 @@ type bucketIndexReader struct {
12411249
block *bucketBlock
12421250
dec *index.Decoder
12431251
stats *queryStats
1244-
cache *indexCache
1252+
cache *storecache.IndexCache
12451253

12461254
mtx sync.Mutex
12471255
loadedSeries map[uint64][]byte
12481256
}
12491257

1250-
func newBucketIndexReader(ctx context.Context, logger log.Logger, block *bucketBlock, cache *indexCache) *bucketIndexReader {
1258+
func newBucketIndexReader(ctx context.Context, logger log.Logger, block *bucketBlock, cache *storecache.IndexCache) *bucketIndexReader {
12511259
r := &bucketIndexReader{
12521260
logger: logger,
12531261
ctx: ctx,
@@ -1415,7 +1423,7 @@ func (r *bucketIndexReader) fetchPostings(groups []*postingGroup) error {
14151423
for i, g := range groups {
14161424
for j, key := range g.keys {
14171425
// Get postings for the given key from cache first.
1418-
if b, ok := r.cache.postings(r.block.meta.ULID, key); ok {
1426+
if b, ok := r.cache.Postings(r.block.meta.ULID, key); ok {
14191427
r.stats.postingsTouched++
14201428
r.stats.postingsTouchedSizeSum += len(b)
14211429

@@ -1487,7 +1495,7 @@ func (r *bucketIndexReader) fetchPostings(groups []*postingGroup) error {
14871495

14881496
// Return postings and fill LRU cache.
14891497
groups[p.groupID].Fill(p.keyID, fetchedPostings)
1490-
r.cache.setPostings(r.block.meta.ULID, groups[p.groupID].keys[p.keyID], c)
1498+
r.cache.SetPostings(r.block.meta.ULID, groups[p.groupID].keys[p.keyID], c)
14911499

14921500
// If we just fetched it we still have to update the stats for touched postings.
14931501
r.stats.postingsTouched++
@@ -1510,7 +1518,7 @@ func (r *bucketIndexReader) PreloadSeries(ids []uint64) error {
15101518
var newIDs []uint64
15111519

15121520
for _, id := range ids {
1513-
if b, ok := r.cache.series(r.block.meta.ULID, id); ok {
1521+
if b, ok := r.cache.Series(r.block.meta.ULID, id); ok {
15141522
r.loadedSeries[id] = b
15151523
continue
15161524
}
@@ -1567,7 +1575,7 @@ func (r *bucketIndexReader) loadSeries(ctx context.Context, ids []uint64, start,
15671575
}
15681576
c = c[n : n+int(l)]
15691577
r.loadedSeries[id] = c
1570-
r.cache.setSeries(r.block.meta.ULID, id, c)
1578+
r.cache.SetSeries(r.block.meta.ULID, id, c)
15711579
}
15721580
return nil
15731581
}

pkg/store/bucket_e2e_test.go

+5-1
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ import (
1515
"github.com/improbable-eng/thanos/pkg/objstore"
1616
"github.com/improbable-eng/thanos/pkg/objstore/objtesting"
1717
"github.com/improbable-eng/thanos/pkg/runutil"
18+
storecache "github.com/improbable-eng/thanos/pkg/store/cache"
1819
"github.com/improbable-eng/thanos/pkg/store/storepb"
1920
"github.com/improbable-eng/thanos/pkg/testutil"
2021
"github.com/pkg/errors"
@@ -310,7 +311,10 @@ func testBucketStore_e2e(t testing.TB, ctx context.Context, s *storeSuite) {
310311
t.Log("Run ", i)
311312

312313
// Always clean cache before each test.
313-
s.store.indexCache, err = newIndexCache(nil, 100)
314+
s.store.indexCache, err = storecache.NewIndexCache(log.NewNopLogger(), nil, storecache.Opts{
315+
MaxSizeBytes: 100,
316+
MaxItemSizeBytes: 100,
317+
})
314318
testutil.Ok(t, err)
315319

316320
srv := newStoreSeriesServer(ctx)

pkg/store/cache.go

-216
This file was deleted.

0 commit comments

Comments
 (0)