Skip to content

Commit 8246c77

Browse files
fix test cases
Signed-off-by: Arthur Schreiber <[email protected]>
1 parent 947d748 commit 8246c77

File tree

2 files changed

+79
-70
lines changed

2 files changed

+79
-70
lines changed

go/pools/smartconnpool/pool.go

Lines changed: 10 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -259,34 +259,23 @@ func (pool *ConnPool[C]) Open(connect Connector[C], refresh RefreshCheck) *ConnP
259259
// but calling ConnPool.Put is still allowed. This function will not return until all of the pool's
260260
// connections have been returned or the default PoolCloseTimeout has elapsed
261261
func (pool *ConnPool[C]) Close() {
262-
ctx, cancel := context.WithTimeout(context.Background(), PoolCloseTimeout)
263-
defer cancel()
264-
265-
if err := pool.CloseWithContext(ctx); err != nil {
266-
log.Errorf("failed to close pool %q: %v", pool.Name, err)
267-
}
268-
}
269-
270-
// CloseWithContext behaves like Close but allows passing in a Context to time out the
271-
// pool closing operation
272-
func (pool *ConnPool[C]) CloseWithContext(ctx context.Context) error {
273262
pool.capacityMu.Lock()
274263
defer pool.capacityMu.Unlock()
275264

276265
if pool.close == nil || pool.capacity.Load() == 0 {
277266
// already closed
278-
return nil
267+
return
279268
}
280269

281270
// close all the connections in the pool; if we time out while waiting for
282271
// users to return our connections, we still want to finish the shutdown
283272
// for the pool
284-
err := pool.setCapacity(ctx, 0)
273+
pool.setCapacity(0)
285274

286275
close(pool.close)
287276
pool.workers.Wait()
288277
pool.close = nil
289-
return err
278+
return
290279
}
291280

292281
func (pool *ConnPool[C]) reopen() {
@@ -298,19 +287,14 @@ func (pool *ConnPool[C]) reopen() {
298287
return
299288
}
300289

301-
ctx, cancel := context.WithTimeout(context.Background(), PoolCloseTimeout)
302-
defer cancel()
303-
304290
// to re-open the connection pool, first set the capacity to 0 so we close
305291
// all the existing connections, as they're now connected to a stale MySQL
306292
// instance.
307-
if err := pool.setCapacity(ctx, 0); err != nil {
308-
log.Errorf("failed to reopen pool %q: %v", pool.Name, err)
309-
}
293+
pool.setCapacity(0)
310294

311295
// the second call to setCapacity cannot fail because it's only increasing the number
312296
// of connections and doesn't need to shut down any
313-
_ = pool.setCapacity(ctx, capacity)
297+
pool.setCapacity(capacity)
314298
}
315299

316300
// IsOpen returns whether the pool is open
@@ -713,23 +697,24 @@ func (pool *ConnPool[C]) getWithSetting(ctx context.Context, setting *Setting) (
713697
// that means waiting for clients to return connections to the pool.
714698
// If the given context times out before we've managed to close enough connections
715699
// an error will be returned.
716-
func (pool *ConnPool[C]) SetCapacity(ctx context.Context, newcap int64) error {
700+
func (pool *ConnPool[C]) SetCapacity(newcap int64) {
717701
pool.capacityMu.Lock()
718702
defer pool.capacityMu.Unlock()
719-
return pool.setCapacity(ctx, newcap)
703+
pool.setCapacity(newcap)
720704
}
721705

722706
// setCapacity is the internal implementation for SetCapacity; it must be called
723707
// with pool.capacityMu being held
724-
func (pool *ConnPool[C]) setCapacity(ctx context.Context, newcap int64) error {
708+
func (pool *ConnPool[C]) setCapacity(newcap int64) {
725709
if newcap < 0 {
726710
panic("negative capacity")
727711
}
728712

729713
oldcap := pool.capacity.Swap(newcap)
730714
if oldcap == newcap {
731-
return nil
715+
return
732716
}
717+
733718
// update the idle count to match the new capacity if necessary
734719
// wait for connections to be returned to the pool if we're reducing the capacity.
735720
defer pool.setIdleCount()
@@ -775,8 +760,6 @@ func (pool *ConnPool[C]) setCapacity(ctx context.Context, newcap int64) error {
775760
pool.closedConn()
776761
}
777762
}
778-
779-
return nil
780763
}
781764

782765
func (pool *ConnPool[C]) closeIdleResources(now time.Time) {

go/pools/smartconnpool/pool_test.go

Lines changed: 69 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -209,15 +209,15 @@ func TestOpen(t *testing.T) {
209209
assert.EqualValues(t, 6, state.lastID.Load())
210210

211211
// SetCapacity
212-
err = p.SetCapacity(ctx, 3)
213-
require.NoError(t, err)
212+
p.SetCapacity(3)
213+
214214
assert.EqualValues(t, 3, state.open.Load())
215215
assert.EqualValues(t, 6, state.lastID.Load())
216216
assert.EqualValues(t, 3, p.Capacity())
217217
assert.EqualValues(t, 3, p.Available())
218218

219-
err = p.SetCapacity(ctx, 6)
220-
require.NoError(t, err)
219+
p.SetCapacity(6)
220+
221221
assert.EqualValues(t, 6, p.Capacity())
222222
assert.EqualValues(t, 6, p.Available())
223223

@@ -266,13 +266,8 @@ func TestShrinking(t *testing.T) {
266266
require.NoError(t, err)
267267
resources[i] = r
268268
}
269-
done := make(chan bool)
270-
go func() {
271-
err := p.SetCapacity(ctx, 3)
272-
require.NoError(t, err)
273269

274-
done <- true
275-
}()
270+
p.SetCapacity(3)
276271
expected := map[string]any{
277272
"Capacity": 3,
278273
"Available": -1, // negative because we've borrowed past our capacity
@@ -294,27 +289,59 @@ func TestShrinking(t *testing.T) {
294289
assert.Equal(t, expected, stats)
295290
}
296291
}
297-
// There are already 2 resources available in the pool.
298-
// So, returning one should be enough for SetCapacity to complete.
292+
293+
p.put(resources[0], resources[0].generation)
294+
assert.Equal(t, map[string]any{
295+
"Capacity": 3,
296+
"Available": 0,
297+
"Active": 4,
298+
"InUse": 3,
299+
"WaitCount": 0,
300+
"WaitTime": time.Duration(0),
301+
"IdleTimeout": 1 * time.Second,
302+
"IdleClosed": 0,
303+
"MaxLifetimeClosed": 0,
304+
}, p.StatsJSON())
305+
306+
p.put(resources[1], resources[1].generation)
307+
assert.Equal(t, map[string]any{
308+
"Capacity": 3,
309+
"Available": 1,
310+
"Active": 4,
311+
"InUse": 2,
312+
"WaitCount": 0,
313+
"WaitTime": time.Duration(0),
314+
"IdleTimeout": 1 * time.Second,
315+
"IdleClosed": 0,
316+
"MaxLifetimeClosed": 0,
317+
}, p.StatsJSON())
318+
319+
p.put(resources[2], resources[2].generation)
320+
assert.Equal(t, map[string]any{
321+
"Capacity": 3,
322+
"Available": 2,
323+
"Active": 4,
324+
"InUse": 1,
325+
"WaitCount": 0,
326+
"WaitTime": time.Duration(0),
327+
"IdleTimeout": 1 * time.Second,
328+
"IdleClosed": 0,
329+
"MaxLifetimeClosed": 0,
330+
}, p.StatsJSON())
331+
299332
p.put(resources[3], resources[3].generation)
300-
<-done
301-
// Return the rest of the resources
302-
for i := 0; i < 3; i++ {
303-
p.put(resources[i], resources[i].generation)
304-
}
305-
stats := p.StatsJSON()
306-
expected = map[string]any{
333+
assert.Equal(t, map[string]any{
307334
"Capacity": 3,
308335
"Available": 3,
309336
"Active": 3,
310337
"InUse": 0,
311338
"WaitCount": 0,
312339
"WaitTime": time.Duration(0),
313340
"IdleTimeout": 1 * time.Second,
314-
"IdleClosed": 0,
341+
"IdleClosed": 1,
315342
"MaxLifetimeClosed": 0,
316-
}
317-
assert.Equal(t, expected, stats)
343+
}, p.StatsJSON())
344+
318345
assert.EqualValues(t, 3, state.open.Load())
319346

320347
// Ensure no deadlock if SetCapacity is called after we start
@@ -330,36 +357,36 @@ func TestShrinking(t *testing.T) {
330357
require.NoError(t, err)
331358
resources[i] = r
332359
}
360+
361+
wg := sync.WaitGroup{}
333362
// This will wait because pool is empty
363+
wg.Add(1)
334364
go func() {
335365
r, err := p.Get(ctx, nil)
336366
require.NoError(t, err)
337367
p.put(r, r.generation)
338-
done <- true
368+
wg.Done()
339369
}()
340370

341-
// This will also wait
342-
go func() {
343-
err := p.SetCapacity(ctx, 2)
344-
require.NoError(t, err)
345-
done <- true
346-
}()
371+
p.SetCapacity(2)
372+
347373
time.Sleep(10 * time.Millisecond)
348374

349375
// This should not hang
350376
for i := 0; i < 3; i++ {
351377
p.put(resources[i], resources[i].generation)
352378
}
353-
<-done
354-
<-done
379+
wg.Wait()
380+
355381
assert.EqualValues(t, 2, p.Capacity())
356382
assert.EqualValues(t, 2, p.Available())
357383
assert.EqualValues(t, 1, p.Metrics.WaitCount())
358384
assert.EqualValues(t, p.Metrics.WaitCount(), len(state.waits))
359385
assert.EqualValues(t, 2, state.open.Load())
360386

361387
// Test race condition of SetCapacity with itself
362-
err = p.SetCapacity(ctx, 3)
388+
p.SetCapacity(3)
389+
363390
require.NoError(t, err)
364391
for i := 0; i < 3; i++ {
365392
var r *Pooled[*TestConn]
@@ -372,35 +399,34 @@ func TestShrinking(t *testing.T) {
372399
require.NoError(t, err)
373400
resources[i] = r
374401
}
402+
375403
// This will wait because pool is empty
404+
wg.Add(1)
376405
go func() {
377406
r, err := p.Get(ctx, nil)
378407
require.NoError(t, err)
379408
p.put(r, r.generation)
380-
done <- true
409+
wg.Done()
381410
}()
382411
time.Sleep(10 * time.Millisecond)
383412

384413
// This will wait till we Put
385-
go func() {
386-
err := p.SetCapacity(ctx, 2)
387-
require.NoError(t, err)
388-
}()
414+
p.SetCapacity(2)
415+
389416
time.Sleep(10 * time.Millisecond)
390-
go func() {
391-
err := p.SetCapacity(ctx, 4)
392-
require.NoError(t, err)
393-
}()
417+
418+
p.SetCapacity(4)
419+
394420
time.Sleep(10 * time.Millisecond)
395421

396422
// This should not hang
397423
for i := 0; i < 3; i++ {
398424
p.put(resources[i], resources[i].generation)
399425
}
400-
<-done
426+
wg.Wait()
401427

402428
assert.Panics(t, func() {
403-
_ = p.SetCapacity(ctx, -1)
429+
p.SetCapacity(-1)
404430
})
405431

406432
assert.EqualValues(t, 4, p.Capacity())

0 commit comments

Comments
 (0)