@@ -63,15 +63,15 @@ namespace {
63
63
64
64
// Futility margin
65
65
Value futility_margin (Depth d, bool improving) {
66
- return Value (147 * (d - improving));
66
+ return Value (168 * (d - improving));
67
67
}
68
68
69
69
// Reductions lookup table, initialized at startup
70
70
int Reductions[MAX_MOVES]; // [depth or moveNumber]
71
71
72
72
Depth reduction (bool i, Depth d, int mn, Value delta, Value rootDelta) {
73
73
int r = Reductions[d] * Reductions[mn];
74
- return (r + 1627 - int (delta) * 1024 / int (rootDelta)) / 1024 + (!i && r > 992 );
74
+ return (r + 1463 - int (delta) * 1024 / int (rootDelta)) / 1024 + (!i && r > 1010 );
75
75
}
76
76
77
77
constexpr int futility_move_count (bool improving, Depth depth) {
@@ -80,7 +80,7 @@ namespace {
80
80
81
81
// History and stats update bonus, based on depth
82
82
int stat_bonus (Depth d) {
83
- return std::min ((8 * d + 281 ) * d - 241 , 1949 );
83
+ return std::min ((9 * d + 270 ) * d - 311 , 2145 );
84
84
}
85
85
86
86
// Add a small random component to draw evaluations to avoid 3-fold blindness
@@ -157,7 +157,7 @@ namespace {
157
157
void Search::init () {
158
158
159
159
for (int i = 1 ; i < MAX_MOVES; ++i)
160
- Reductions[i] = int ((21.14 + std::log (Threads.size ()) / 2 ) * std::log (i));
160
+ Reductions[i] = int ((20.81 + std::log (Threads.size ()) / 2 ) * std::log (i));
161
161
}
162
162
163
163
@@ -303,10 +303,10 @@ void Thread::search() {
303
303
304
304
multiPV = std::min (multiPV, rootMoves.size ());
305
305
306
- complexityAverage.set (211 , 1 );
306
+ complexityAverage.set (202 , 1 );
307
307
308
308
trend = SCORE_ZERO;
309
- optimism[ us] = Value (33 );
309
+ optimism[ us] = Value (39 );
310
310
optimism[~us] = -optimism[us];
311
311
312
312
int searchAgainCounter = 0 ;
@@ -349,16 +349,16 @@ void Thread::search() {
349
349
if (rootDepth >= 4 )
350
350
{
351
351
Value prev = rootMoves[pvIdx].averageScore ;
352
- delta = Value (19 ) + int (prev) * prev / 18321 ;
352
+ delta = Value (16 ) + int (prev) * prev / 19178 ;
353
353
alpha = std::max (prev - delta,-VALUE_INFINITE);
354
354
beta = std::min (prev + delta, VALUE_INFINITE);
355
355
356
356
// Adjust trend and optimism based on root move's previousScore
357
- int tr = sigmoid (prev, 4 , 11 , 92 , 119 , 1 );
357
+ int tr = sigmoid (prev, 3 , 8 , 90 , 125 , 1 );
358
358
trend = (us == WHITE ? make_score (tr, tr / 2 )
359
359
: -make_score (tr, tr / 2 ));
360
360
361
- int opt = sigmoid (prev, 9 , 18 , 115 , 12250 , 187 );
361
+ int opt = sigmoid (prev, 8 , 17 , 144 , 13966 , 183 );
362
362
optimism[ us] = Value (opt);
363
363
optimism[~us] = -optimism[us];
364
364
}
@@ -459,17 +459,17 @@ void Thread::search() {
459
459
&& !Threads.stop
460
460
&& !mainThread->stopOnPonderhit )
461
461
{
462
- double fallingEval = (66 + 12 * (mainThread->bestPreviousAverageScore - bestValue)
463
- + 6 * (mainThread->iterValue [iterIdx] - bestValue)) / 809.70 ;
462
+ double fallingEval = (69 + 12 * (mainThread->bestPreviousAverageScore - bestValue)
463
+ + 6 * (mainThread->iterValue [iterIdx] - bestValue)) / 781.4 ;
464
464
fallingEval = std::clamp (fallingEval, 0.5 , 1.5 );
465
465
466
466
// If the bestMove is stable over several iterations, reduce time accordingly
467
- timeReduction = lastBestMoveDepth + 8 < completedDepth ? 1.73 : 0.94 ;
468
- double reduction = (1.66 + mainThread->previousTimeReduction ) / (2.35 * timeReduction);
467
+ timeReduction = lastBestMoveDepth + 10 < completedDepth ? 1.63 : 0.73 ;
468
+ double reduction = (1.56 + mainThread->previousTimeReduction ) / (2.20 * timeReduction);
469
469
double bestMoveInstability = 1.073 + std::max (1.0 , 2.25 - 9.9 / rootDepth)
470
470
* totBestMoveChanges / Threads.size ();
471
471
int complexity = mainThread->complexityAverage .value ();
472
- double complexPosition = std::clamp (1.0 + (complexity - 293 ) / 1525.0 , 0.5 , 1.5 );
472
+ double complexPosition = std::clamp (1.0 + (complexity - 326 ) / 1618.1 , 0.5 , 1.5 );
473
473
474
474
double totalTime = Time.optimum () * fallingEval * reduction * bestMoveInstability * complexPosition;
475
475
@@ -490,7 +490,7 @@ void Thread::search() {
490
490
}
491
491
else if ( Threads.increaseDepth
492
492
&& !mainThread->ponder
493
- && Time.elapsed () > totalTime * 0.49 )
493
+ && Time.elapsed () > totalTime * 0.43 )
494
494
Threads.increaseDepth = false ;
495
495
else
496
496
Threads.increaseDepth = true ;
@@ -766,7 +766,7 @@ namespace {
766
766
// margin and the improving flag are used in various pruning heuristics.
767
767
improvement = (ss-2 )->staticEval != VALUE_NONE ? ss->staticEval - (ss-2 )->staticEval
768
768
: (ss-4 )->staticEval != VALUE_NONE ? ss->staticEval - (ss-4 )->staticEval
769
- : 184 ;
769
+ : 175 ;
770
770
771
771
improving = improvement > 0 ;
772
772
complexity = abs (ss->staticEval - (us == WHITE ? eg_value (pos.psq_score ()) : -eg_value (pos.psq_score ())));
@@ -777,8 +777,8 @@ namespace {
777
777
// If eval is really low check with qsearch if it can exceed alpha, if it can't,
778
778
// return a fail low.
779
779
if ( !PvNode
780
- && depth <= 6
781
- && eval < alpha - 486 - 314 * depth * depth)
780
+ && depth <= 7
781
+ && eval < alpha - 348 - 258 * depth * depth)
782
782
{
783
783
value = qsearch<NonPV>(pos, ss, alpha - 1 , alpha);
784
784
if (value < alpha)
@@ -791,24 +791,24 @@ namespace {
791
791
&& depth < 8
792
792
&& eval - futility_margin (depth, improving) - (ss-1 )->statScore / 256 >= beta
793
793
&& eval >= beta
794
- && eval < 22266 ) // larger than VALUE_KNOWN_WIN, but smaller than TB wins.
794
+ && eval < 26305 ) // larger than VALUE_KNOWN_WIN, but smaller than TB wins.
795
795
return eval;
796
796
797
797
// Step 9. Null move search with verification search (~22 Elo)
798
798
if ( !PvNode
799
799
&& (ss-1 )->currentMove != MOVE_NULL
800
- && (ss-1 )->statScore < 15075
800
+ && (ss-1 )->statScore < 14695
801
801
&& eval >= beta
802
802
&& eval >= ss->staticEval
803
- && ss->staticEval >= beta - 18 * depth - improvement / 19 + 215 + complexity / 30
803
+ && ss->staticEval >= beta - 15 * depth - improvement / 15 + 198 + complexity / 28
804
804
&& !excludedMove
805
805
&& pos.non_pawn_material (us)
806
806
&& (ss->ply >= thisThread->nmpMinPly || us != thisThread->nmpColor ))
807
807
{
808
808
assert (eval - beta >= 0 );
809
809
810
810
// Null move dynamic reduction based on depth, eval and complexity of position
811
- Depth R = std::min (int (eval - beta) / 184 , 4 ) + depth / 3 + 4 - (complexity > 799 );
811
+ Depth R = std::min (int (eval - beta) / 147 , 5 ) + depth / 3 + 4 - (complexity > 753 );
812
812
813
813
ss->currentMove = MOVE_NULL;
814
814
ss->continuationHistory = &thisThread->continuationHistory [0 ][0 ][NO_PIECE][0 ];
@@ -844,7 +844,7 @@ namespace {
844
844
}
845
845
}
846
846
847
- probCutBeta = beta + 204 - 52 * improving;
847
+ probCutBeta = beta + 179 - 46 * improving;
848
848
849
849
// Step 10. ProbCut (~4 Elo)
850
850
// If we have a good enough capture and a reduced search returns a value
@@ -920,7 +920,7 @@ namespace {
920
920
moves_loop: // When in check, search starts here
921
921
922
922
// Step 12. A small Probcut idea, when we are in check (~0 Elo)
923
- probCutBeta = beta + 401 ;
923
+ probCutBeta = beta + 481 ;
924
924
if ( ss->inCheck
925
925
&& !PvNode
926
926
&& depth >= 2
@@ -1014,14 +1014,14 @@ namespace {
1014
1014
if ( !pos.empty (to_sq (move))
1015
1015
&& !givesCheck
1016
1016
&& !PvNode
1017
- && lmrDepth < 7
1017
+ && lmrDepth < 6
1018
1018
&& !ss->inCheck
1019
- && ss->staticEval + 424 + 138 * lmrDepth + PieceValue[EG][pos.piece_on (to_sq (move))]
1020
- + captureHistory[movedPiece][to_sq (move)][type_of (pos.piece_on (to_sq (move)))] / 7 < alpha)
1019
+ && ss->staticEval + 281 + 179 * lmrDepth + PieceValue[EG][pos.piece_on (to_sq (move))]
1020
+ + captureHistory[movedPiece][to_sq (move)][type_of (pos.piece_on (to_sq (move)))] / 6 < alpha)
1021
1021
continue ;
1022
1022
1023
1023
// SEE based pruning (~9 Elo)
1024
- if (!pos.see_ge (move, Value (-214 ) * depth))
1024
+ if (!pos.see_ge (move, Value (-203 ) * depth))
1025
1025
continue ;
1026
1026
}
1027
1027
else
@@ -1040,11 +1040,11 @@ namespace {
1040
1040
// Futility pruning: parent node (~9 Elo)
1041
1041
if ( !ss->inCheck
1042
1042
&& lmrDepth < 11
1043
- && ss->staticEval + 147 + 125 * lmrDepth + history / 64 <= alpha)
1043
+ && ss->staticEval + 122 + 138 * lmrDepth + history / 60 <= alpha)
1044
1044
continue ;
1045
1045
1046
1046
// Prune moves with negative SEE (~3 Elo)
1047
- if (!pos.see_ge (move, Value (-23 * lmrDepth * lmrDepth - 31 * lmrDepth)))
1047
+ if (!pos.see_ge (move, Value (-25 * lmrDepth * lmrDepth - 20 * lmrDepth)))
1048
1048
continue ;
1049
1049
}
1050
1050
}
@@ -1059,15 +1059,15 @@ namespace {
1059
1059
// a reduced search on all the other moves but the ttMove and if the
1060
1060
// result is lower than ttValue minus a margin, then we will extend the ttMove.
1061
1061
if ( !rootNode
1062
- && depth >= 6 + 2 * (PvNode && tte->is_pv ())
1062
+ && depth >= 4 + 2 * (PvNode && tte->is_pv ())
1063
1063
&& move == ttMove
1064
1064
&& !excludedMove // Avoid recursive singular search
1065
1065
/* && ttValue != VALUE_NONE Already implicit in the next condition */
1066
1066
&& abs (ttValue) < VALUE_KNOWN_WIN
1067
1067
&& (tte->bound () & BOUND_LOWER)
1068
1068
&& tte->depth () >= depth - 3 )
1069
1069
{
1070
- Value singularBeta = ttValue - 4 * depth;
1070
+ Value singularBeta = ttValue - 3 * depth;
1071
1071
Depth singularDepth = (depth - 1 ) / 2 ;
1072
1072
1073
1073
ss->excludedMove = move;
@@ -1080,7 +1080,7 @@ namespace {
1080
1080
1081
1081
// Avoid search explosion by limiting the number of double extensions
1082
1082
if ( !PvNode
1083
- && value < singularBeta - 52
1083
+ && value < singularBeta - 26
1084
1084
&& ss->doubleExtensions <= 8 )
1085
1085
extension = 2 ;
1086
1086
}
@@ -1100,15 +1100,15 @@ namespace {
1100
1100
1101
1101
// Check extensions (~1 Elo)
1102
1102
else if ( givesCheck
1103
- && depth > 8
1104
- && abs (ss->staticEval ) > 81 )
1103
+ && depth > 9
1104
+ && abs (ss->staticEval ) > 71 )
1105
1105
extension = 1 ;
1106
1106
1107
1107
// Quiet ttMove extensions (~0 Elo)
1108
1108
else if ( PvNode
1109
1109
&& move == ttMove
1110
1110
&& move == ss->killers [0 ]
1111
- && (*contHist[0 ])[movedPiece][to_sq (move)] >= 7546 )
1111
+ && (*contHist[0 ])[movedPiece][to_sq (move)] >= 5491 )
1112
1112
extension = 1 ;
1113
1113
}
1114
1114
@@ -1170,18 +1170,18 @@ namespace {
1170
1170
+ (*contHist[0 ])[movedPiece][to_sq (move)]
1171
1171
+ (*contHist[1 ])[movedPiece][to_sq (move)]
1172
1172
+ (*contHist[3 ])[movedPiece][to_sq (move)]
1173
- - 4123 ;
1173
+ - 4334 ;
1174
1174
1175
1175
// Decrease/increase reduction for moves with a good/bad history (~30 Elo)
1176
- r -= ss->statScore / 17417 ;
1176
+ r -= ss->statScore / 15914 ;
1177
1177
1178
1178
// In general we want to cap the LMR depth search at newDepth. But if reductions
1179
1179
// are really negative and movecount is low, we allow this move to be searched
1180
1180
// deeper than the first move (this may lead to hidden double extensions).
1181
1181
int deeper = r >= -1 ? 0
1182
- : moveCount <= 5 ? 2
1183
- : PvNode && depth > 3 ? 1
1184
- : cutNode && moveCount <= 7 ? 1
1182
+ : moveCount <= 4 ? 2
1183
+ : PvNode && depth > 4 ? 1
1184
+ : cutNode && moveCount <= 8 ? 1
1185
1185
: 0 ;
1186
1186
1187
1187
Depth d = std::clamp (newDepth - r, 1 , newDepth + deeper);
@@ -1190,7 +1190,7 @@ namespace {
1190
1190
1191
1191
// If the son is reduced and fails high it will be re-searched at full depth
1192
1192
doFullDepthSearch = value > alpha && d < newDepth;
1193
- doDeeperSearch = value > (alpha + 76 + 11 * (newDepth - d));
1193
+ doDeeperSearch = value > (alpha + 78 + 11 * (newDepth - d));
1194
1194
didLMR = true ;
1195
1195
}
1196
1196
else
@@ -1342,7 +1342,7 @@ namespace {
1342
1342
// or fail low was really bad
1343
1343
bool extraBonus = PvNode
1344
1344
|| cutNode
1345
- || bestValue < alpha - 71 * depth;
1345
+ || bestValue < alpha - 70 * depth;
1346
1346
1347
1347
update_continuation_histories (ss-1 , pos.piece_on (prevSq), prevSq, stat_bonus (depth) * (1 + extraBonus));
1348
1348
}
@@ -1473,7 +1473,7 @@ namespace {
1473
1473
if (PvNode && bestValue > alpha)
1474
1474
alpha = bestValue;
1475
1475
1476
- futilityBase = bestValue + 139 ;
1476
+ futilityBase = bestValue + 118 ;
1477
1477
}
1478
1478
1479
1479
const PieceToHistory* contHist[] = { (ss-1 )->continuationHistory , (ss-2 )->continuationHistory ,
0 commit comments