-
Notifications
You must be signed in to change notification settings - Fork 1.8k
/
Copy pathserver_selection.prose.operation_count.test.ts
191 lines (153 loc) · 6.66 KB
/
server_selection.prose.operation_count.test.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
import { expect } from 'chai';
import { setTimeout } from 'timers';
import { promisify } from 'util';
import { CommandStartedEvent } from '../../../src';
import { Collection } from '../../../src/collection';
import { MongoClient } from '../../../src/mongo_client';
const failPoint = {
configureFailPoint: 'failCommand',
mode: { times: 10000 },
data: {
failCommands: ['find'],
blockConnection: true,
blockTimeMS: 500,
appName: 'loadBalancingTest'
}
};
const POOL_SIZE = 100;
async function runTaskGroup(collection: Collection, count: 10 | 100 | 1000) {
for (let i = 0; i < count; ++i) {
await collection.findOne({});
}
}
async function ensurePoolIsFull(client: MongoClient) {
let connectionCount = 0;
const onConnectionCreated = () => connectionCount++;
client.on('connectionCreated', onConnectionCreated);
// 250ms should be plenty of time to fill the connection pool,
// but just in case we'll loop a couple of times.
for (let i = 0; connectionCount < POOL_SIZE * 2 && i < 10; ++i) {
await promisify(setTimeout)(250);
}
client.removeListener('connectionCreated', onConnectionCreated);
if (connectionCount !== POOL_SIZE * 2) {
throw new Error('Connection pool did not fill up');
}
}
// Step 1: Configure a sharded cluster with two mongoses. Use a 4.2.9 or newer server version.
const TEST_METADATA: MongoDBMetadataUI = { requires: { mongodb: '>=4.2.9', topology: 'sharded' } };
describe('operationCount-based Selection Within Latency Window - Prose Test', function () {
let client: MongoClient;
let seeds: Array<string>;
let counts: Record<string, number> = {};
const updateCount = ({ address }: CommandStartedEvent) => {
const mongosPort = address.split(':')[1];
const count = counts[mongosPort] ?? 0;
counts[mongosPort] = count + 1;
};
beforeEach(async function () {
// Step 3: Create a client with both mongoses' addresses in its seed list, appName="loadBalancingTest", and localThresholdMS=30000.
const uri = this.configuration.url({
appName: 'loadBalancingTest',
localThresholdMS: 30000,
minPoolSize: POOL_SIZE,
maxPoolSize: POOL_SIZE,
monitorCommands: true,
useMultipleMongoses: true
});
client = this.configuration.newClient(uri, {
appName: 'loadBalancingTest',
localThresholdMS: 30000,
minPoolSize: POOL_SIZE,
maxPoolSize: POOL_SIZE,
monitorCommands: true
});
client.on('commandStarted', updateCount);
const poolIsFullPromise = ensurePoolIsFull(client);
await client.connect();
// Step 4: Using CMAP events, ensure the client's connection pools for both mongoses have been saturated
await poolIsFullPromise;
seeds = client.topology.s.seedlist.map(address => address.toString());
counts = {};
});
afterEach(async function () {
await client.close();
client = undefined;
seeds = [];
});
it('needs to run on exactly two mongoses', TEST_METADATA, function () {
expect(seeds).to.have.lengthOf(2);
});
context('when one mongos is overloaded', function () {
let failCommandClient: MongoClient;
beforeEach(async function () {
// Step 2: Enable the following failpoint against exactly one of the mongoses:
const failingSeed = seeds[0];
failCommandClient = this.configuration.newClient(`mongodb://${failingSeed}/integration_test`);
await failCommandClient.connect();
await failCommandClient.db('admin').command(failPoint);
});
afterEach(async function () {
// Step 7: Disable the failpoint.
await failCommandClient.db('admin').command({
configureFailPoint: 'failCommand',
mode: 'off',
data: {
failCommands: ['find'],
blockConnection: true,
blockTimeMS: 500,
appName: 'loadBalancingTest'
}
});
await failCommandClient.close();
failCommandClient = undefined;
});
it('sends fewer requests to the overloaded server', TEST_METADATA, async function () {
const failingSeed = seeds[0];
const collection = client.db('test-db').collection('collection0');
// Step 5: Start 10 concurrent threads / tasks that each run 10 findOne operations with empty filters using that client.
await Promise.all(Array.from({ length: 10 }, () => runTaskGroup(collection, 10)));
// Step 6: Using command monitoring events, assert that fewer than 25% of the CommandStartedEvents
// occurred on the mongos that the failpoint was enabled on.
const port = failingSeed.split(':')[1];
const percentageSentToSlowHost = (counts[port] / 100) * 100;
expect(percentageSentToSlowHost).to.be.lessThan(25);
});
});
it('equally distributes operations with both hosts are fine', TEST_METADATA, async function () {
const collection = client.db('test-db').collection('collection0');
const numberTaskGroups = 10;
const numberOfTasks = 1000;
const totalNumberOfTasks = numberTaskGroups * numberOfTasks;
// This test has proved flakey, not just for Node. The number of iterations for the test has been increased,
// to prevent the test from failing.
// Step 8: Start 10 concurrent threads / tasks that each run 100 findOne operations with empty filters using that client.
await Promise.all(
Array.from({ length: numberTaskGroups }, () => runTaskGroup(collection, numberOfTasks))
);
// Step 9: Using command monitoring events, assert that each mongos was selected roughly 50% of the time (within +/- 10%).
const [host1, host2] = seeds.map(seed => seed.split(':')[1]);
const percentageToHost1 = (counts[host1] / totalNumberOfTasks) * 100;
const percentageToHost2 = (counts[host2] / totalNumberOfTasks) * 100;
expect(percentageToHost1).to.be.greaterThan(40).and.lessThan(60);
expect(percentageToHost2).to.be.greaterThan(40).and.lessThan(60);
});
it(
'equally distributes operations with both hosts when requests are in parallel',
TEST_METADATA,
async function () {
const collection = client.db('test-db').collection('collection0');
const { insertedId } = await collection.insertOne({ name: 'bumpy' });
const n = 1000;
for (let i = 0; i < n; ++i) {
await collection.findOne({ _id: insertedId });
}
// Step 9: Using command monitoring events, assert that each mongos was selected roughly 50% of the time (within +/- 10%).
const [host1, host2] = seeds.map(seed => seed.split(':')[1]);
const percentageToHost1 = (counts[host1] / n) * 100;
const percentageToHost2 = (counts[host2] / n) * 100;
expect(percentageToHost1).to.be.greaterThan(40).and.lessThan(60);
expect(percentageToHost2).to.be.greaterThan(40).and.lessThan(60);
}
);
});