@@ -16,12 +16,12 @@ package statistics
16
16
17
17
import (
18
18
"math/rand"
19
+ "sort"
19
20
"testing"
20
21
"time"
21
22
22
23
. "github.com/pingcap/check"
23
24
"github.com/pingcap/kvproto/pkg/metapb"
24
- "github.com/pingcap/kvproto/pkg/pdpb"
25
25
"github.com/tikv/pd/server/core"
26
26
)
27
27
@@ -31,23 +31,9 @@ type testHotPeerCache struct{}
31
31
32
32
func (t * testHotPeerCache ) TestStoreTimeUnsync (c * C ) {
33
33
cache := NewHotPeerCache (Write )
34
- peers := newPeers (3 ,
35
- func (i int ) uint64 { return uint64 (10000 + i ) },
36
- func (i int ) uint64 { return uint64 (i ) })
37
- meta := & metapb.Region {
38
- Id : 1000 ,
39
- Peers : peers ,
40
- StartKey : []byte ("" ),
41
- EndKey : []byte ("" ),
42
- RegionEpoch : & metapb.RegionEpoch {ConfVer : 6 , Version : 6 },
43
- }
44
34
intervals := []uint64 {120 , 60 }
45
35
for _ , interval := range intervals {
46
- region := core .NewRegionInfo (meta , peers [0 ],
47
- // interval is [0, interval]
48
- core .SetReportInterval (interval ),
49
- core .SetWrittenBytes (interval * 100 * 1024 ))
50
-
36
+ region := buildRegion (Write , 3 , interval )
51
37
checkAndUpdate (c , cache , region , 3 )
52
38
{
53
39
stats := cache .RegionStats (0 )
@@ -191,6 +177,36 @@ func checkOp(c *C, ret []*HotPeerStat, storeID uint64, actionType ActionType) {
191
177
}
192
178
}
193
179
180
+ // checkIntervalSum checks whether the interval sum of the peers are different.
181
+ func checkIntervalSum (cache * hotPeerCache , region * core.RegionInfo ) bool {
182
+ var intervalSums []int
183
+ for _ , peer := range region .GetPeers () {
184
+ oldItem := cache .getOldHotPeerStat (region .GetID (), peer .StoreId )
185
+ if oldItem != nil {
186
+ intervalSums = append (intervalSums , int (oldItem .getIntervalSum ()))
187
+ }
188
+ }
189
+ sort .Ints (intervalSums )
190
+ return intervalSums [0 ] != intervalSums [len (intervalSums )- 1 ]
191
+ }
192
+
193
+ // checkIntervalSumContinuous checks whether the interval sum of the peer is continuous.
194
+ func checkIntervalSumContinuous (c * C , intervalSums map [uint64 ]int , rets []* HotPeerStat , interval uint64 ) {
195
+ for _ , ret := range rets {
196
+ if ret .actionType == Remove {
197
+ delete (intervalSums , ret .StoreID )
198
+ continue
199
+ }
200
+ new := int (ret .getIntervalSum () / 1000000000 )
201
+ if ret .source == direct {
202
+ if old , ok := intervalSums [ret .StoreID ]; ok {
203
+ c .Assert (new , Equals , (old + int (interval ))% RegionHeartBeatReportInterval )
204
+ }
205
+ }
206
+ intervalSums [ret .StoreID ] = new
207
+ }
208
+ }
209
+
194
210
func schedule (c * C , operator operator , region * core.RegionInfo , targets ... uint64 ) (srcStore uint64 , _ * core.RegionInfo ) {
195
211
switch operator {
196
212
case transferLeader :
@@ -392,29 +408,28 @@ func (t *testHotPeerCache) testMetrics(c *C, interval, byteRate, expectThreshold
392
408
}
393
409
394
410
func (t * testHotPeerCache ) TestRemoveFromCache (c * C ) {
411
+ peerCount := 3
412
+ interval := uint64 (5 )
395
413
checkers := []check {checkAndUpdate , checkAndUpdateWithOrdering }
396
414
for _ , checker := range checkers {
397
415
cache := NewHotPeerCache (Write )
398
- region := buildRegion (Write , 3 , 5 )
416
+ region := buildRegion (Write , peerCount , interval )
399
417
// prepare
418
+ intervalSums := make (map [uint64 ]int )
400
419
for i := 1 ; i <= 200 ; i ++ {
401
- checker (c , cache , region )
420
+ rets := checker (c , cache , region )
421
+ checkIntervalSumContinuous (c , intervalSums , rets , interval )
402
422
}
403
423
// make the interval sum of peers are different
404
424
checkAndUpdateSkipOne (c , cache , region )
405
- var intervalSums []int
406
- for _ , peer := range region .GetPeers () {
407
- oldItem := cache .getOldHotPeerStat (region .GetID (), peer .StoreId )
408
- intervalSums = append (intervalSums , int (oldItem .getIntervalSum ()))
409
- }
410
- c .Assert (intervalSums , HasLen , 3 )
411
- c .Assert (intervalSums [0 ], Not (Equals ), intervalSums [1 ])
412
- c .Assert (intervalSums [0 ], Not (Equals ), intervalSums [2 ])
425
+ checkIntervalSum (cache , region )
413
426
// check whether cold cache is cleared
414
427
var isClear bool
428
+ intervalSums = make (map [uint64 ]int )
415
429
region = region .Clone (core .SetWrittenBytes (0 ), core .SetWrittenKeys (0 ), core .SetWrittenQuery (0 ))
416
430
for i := 1 ; i <= 200 ; i ++ {
417
- checker (c , cache , region )
431
+ rets := checker (c , cache , region )
432
+ checkIntervalSumContinuous (c , intervalSums , rets , interval )
418
433
if len (cache .storesOfRegion [region .GetID ()]) == 0 {
419
434
isClear = true
420
435
break
@@ -435,28 +450,38 @@ func (t *testHotPeerCache) TestRemoveFromCacheRandom(c *C) {
435
450
region := buildRegion (Write , peerCount , interval )
436
451
437
452
target := uint64 (10 )
438
- movePeer := func () {
453
+ intervalSums := make (map [uint64 ]int )
454
+ step := func (i int ) {
439
455
tmp := uint64 (0 )
440
- tmp , region = schedule (c , removeReplica , region )
441
- _ , region = schedule (c , addReplica , region , target )
442
- target = tmp
456
+ if i % 5 == 0 {
457
+ tmp , region = schedule (c , removeReplica , region )
458
+ }
459
+ rets := checker (c , cache , region )
460
+ checkIntervalSumContinuous (c , intervalSums , rets , interval )
461
+ if i % 5 == 0 {
462
+ _ , region = schedule (c , addReplica , region , target )
463
+ target = tmp
464
+ }
443
465
}
466
+
444
467
// prepare with random move peer to make the interval sum of peers are different
445
- for i := 1 ; i <= 200 ; i ++ {
446
- if i % 5 == 0 {
447
- movePeer ()
468
+ for i := 1 ; i < 150 ; i ++ {
469
+ step (i )
470
+ if i > 150 && checkIntervalSum (cache , region ) {
471
+ break
448
472
}
449
- checker (c , cache , region )
473
+ }
474
+ if interval < RegionHeartBeatReportInterval {
475
+ c .Assert (checkIntervalSum (cache , region ), IsTrue )
450
476
}
451
477
c .Assert (cache .storesOfRegion [region .GetID ()], HasLen , peerCount )
478
+
452
479
// check whether cold cache is cleared
453
480
var isClear bool
481
+ intervalSums = make (map [uint64 ]int )
454
482
region = region .Clone (core .SetWrittenBytes (0 ), core .SetWrittenKeys (0 ), core .SetWrittenQuery (0 ))
455
- for i := 1 ; i <= 200 ; i ++ {
456
- if i % 5 == 0 {
457
- movePeer ()
458
- }
459
- checker (c , cache , region )
483
+ for i := 1 ; i < 200 ; i ++ {
484
+ step (i )
460
485
if len (cache .storesOfRegion [region .GetID ()]) == 0 {
461
486
isClear = true
462
487
break
@@ -468,6 +493,55 @@ func (t *testHotPeerCache) TestRemoveFromCacheRandom(c *C) {
468
493
}
469
494
}
470
495
496
+ func checkCoolDown (c * C , cache * hotPeerCache , region * core.RegionInfo , expect bool ) {
497
+ item := cache .getOldHotPeerStat (region .GetID (), region .GetLeader ().GetStoreId ())
498
+ c .Assert (item .IsNeedCoolDownTransferLeader (3 ), Equals , expect )
499
+ }
500
+
501
+ func (t * testHotPeerCache ) TestCoolDownTransferLeader (c * C ) {
502
+ cache := NewHotPeerCache (Read )
503
+ region := buildRegion (Read , 3 , 60 )
504
+
505
+ moveLeader := func () {
506
+ _ , region = schedule (c , movePeer , region , 10 )
507
+ checkAndUpdate (c , cache , region )
508
+ checkCoolDown (c , cache , region , false )
509
+ _ , region = schedule (c , transferLeader , region , 10 )
510
+ checkAndUpdate (c , cache , region )
511
+ checkCoolDown (c , cache , region , true )
512
+ }
513
+ transferLeader := func () {
514
+ _ , region = schedule (c , transferLeader , region )
515
+ checkAndUpdate (c , cache , region )
516
+ checkCoolDown (c , cache , region , true )
517
+ }
518
+ movePeer := func () {
519
+ _ , region = schedule (c , movePeer , region , 10 )
520
+ checkAndUpdate (c , cache , region )
521
+ checkCoolDown (c , cache , region , false )
522
+ }
523
+ addReplica := func () {
524
+ _ , region = schedule (c , addReplica , region , 10 )
525
+ checkAndUpdate (c , cache , region )
526
+ checkCoolDown (c , cache , region , false )
527
+ }
528
+ removeReplica := func () {
529
+ _ , region = schedule (c , removeReplica , region , 10 )
530
+ checkAndUpdate (c , cache , region )
531
+ checkCoolDown (c , cache , region , false )
532
+ }
533
+ cases := []func (){moveLeader , transferLeader , movePeer , addReplica , removeReplica }
534
+ for _ , runCase := range cases {
535
+ cache = NewHotPeerCache (Read )
536
+ region = buildRegion (Read , 3 , 60 )
537
+ for i := 1 ; i <= 200 ; i ++ {
538
+ checkAndUpdate (c , cache , region )
539
+ }
540
+ checkCoolDown (c , cache , region , false )
541
+ runCase ()
542
+ }
543
+ }
544
+
471
545
// See issue #4510
472
546
func (t * testHotPeerCache ) TestCacheInherit (c * C ) {
473
547
cache := NewHotPeerCache (Read )
@@ -509,22 +583,9 @@ func (t *testHotPeerCache) TestCacheInherit(c *C) {
509
583
510
584
func BenchmarkCheckRegionFlow (b * testing.B ) {
511
585
cache := NewHotPeerCache (Read )
512
- region := core .NewRegionInfo (& metapb.Region {
513
- Id : 1 ,
514
- Peers : []* metapb.Peer {
515
- {Id : 101 , StoreId : 1 },
516
- {Id : 102 , StoreId : 2 },
517
- {Id : 103 , StoreId : 3 },
518
- },
519
- },
520
- & metapb.Peer {Id : 101 , StoreId : 1 },
521
- )
522
- newRegion := region .Clone (
523
- core .WithInterval (& pdpb.TimeInterval {StartTimestamp : 0 , EndTimestamp : 10 }),
524
- core .SetReadBytes (30000 * 10 ),
525
- core .SetReadKeys (300000 * 10 ))
586
+ region := buildRegion (Read , 3 , 10 )
526
587
peerInfos := make ([]* core.PeerInfo , 0 )
527
- for _ , peer := range newRegion .GetPeers () {
588
+ for _ , peer := range region .GetPeers () {
528
589
peerInfo := core .NewPeerInfo (peer , region .GetLoads (), 10 )
529
590
peerInfos = append (peerInfos , peerInfo )
530
591
}
0 commit comments