13
13
14
14
import pytest
15
15
import numpy as np
16
- import pytest_benchmark
17
-
18
- # FIXME: Remove this when rapids_pytest_benchmark.gpubenchmark is available
19
- # everywhere
20
- try :
21
- from rapids_pytest_benchmark import setFixtureParamNames
22
- except ImportError :
23
- print (
24
- "\n \n WARNING: rapids_pytest_benchmark is not installed, "
25
- "falling back to pytest_benchmark fixtures.\n "
26
- )
27
-
28
- # if rapids_pytest_benchmark is not available, just perfrom time-only
29
- # benchmarking and replace the util functions with nops
30
- gpubenchmark = pytest_benchmark .plugin .benchmark
31
-
32
- def setFixtureParamNames (* args , ** kwargs ):
33
- pass
34
-
35
16
36
17
import rmm
37
18
import dask_cudf
@@ -50,6 +31,7 @@ def setFixtureParamNames(*args, **kwargs):
50
31
pool_allocator ,
51
32
)
52
33
34
+
53
35
# duck-type compatible Dataset for RMAT data
54
36
class RmatDataset :
55
37
def __init__ (self , scale = 4 , edgefactor = 2 , mg = False ):
@@ -198,11 +180,6 @@ def reinitRMM(managed_mem, pool_alloc):
198
180
199
181
@pytest .fixture (scope = "module" , params = rmm_fixture_params )
200
182
def rmm_config (request ):
201
- # Since parameterized fixtures do not assign param names to param values,
202
- # manually call the helper to do so. Ensure the order of the name list
203
- # passed to it matches if there are >1 params.
204
- # If the request only contains n params, only the first n names are set.
205
- setFixtureParamNames (request , ["managed_mem" , "pool_allocator" ])
206
183
reinitRMM (request .param [0 ], request .param [1 ])
207
184
208
185
@@ -215,7 +192,6 @@ def dataset(request, rmm_config):
215
192
tests/fixtures are done with the Dataset, it has the Dask cluster and
216
193
client torn down (if MG) and all data loaded is freed.
217
194
"""
218
- setFixtureParamNames (request , ["dataset" ])
219
195
dataset = request .param [0 ]
220
196
client = cluster = None
221
197
# For now, only RmatDataset instanaces support MG and have a "mg" attr.
@@ -283,8 +259,8 @@ def get_vertex_pairs(G, num_vertices=10):
283
259
284
260
###############################################################################
285
261
# Benchmarks
286
- def bench_create_graph (gpubenchmark , edgelist ):
287
- gpubenchmark (
262
+ def bench_create_graph (benchmark , edgelist ):
263
+ benchmark (
288
264
cugraph .from_cudf_edgelist ,
289
265
edgelist ,
290
266
source = "src" ,
@@ -298,8 +274,8 @@ def bench_create_graph(gpubenchmark, edgelist):
298
274
# results in thousands of rounds before the default threshold is met, so lower
299
275
# the max_time for this benchmark.
300
276
@pytest .mark .benchmark (warmup = True , warmup_iterations = 10 , max_time = 0.005 )
301
- def bench_create_digraph (gpubenchmark , edgelist ):
302
- gpubenchmark (
277
+ def bench_create_digraph (benchmark , edgelist ):
278
+ benchmark (
303
279
cugraph .from_cudf_edgelist ,
304
280
edgelist ,
305
281
source = "src" ,
@@ -309,26 +285,26 @@ def bench_create_digraph(gpubenchmark, edgelist):
309
285
)
310
286
311
287
312
- def bench_renumber (gpubenchmark , edgelist ):
313
- gpubenchmark (NumberMap .renumber , edgelist , "src" , "dst" )
288
+ def bench_renumber (benchmark , edgelist ):
289
+ benchmark (NumberMap .renumber , edgelist , "src" , "dst" )
314
290
315
291
316
- def bench_pagerank (gpubenchmark , transposed_graph ):
292
+ def bench_pagerank (benchmark , transposed_graph ):
317
293
pagerank = (
318
294
dask_cugraph .pagerank
319
295
if is_graph_distributed (transposed_graph )
320
296
else cugraph .pagerank
321
297
)
322
- gpubenchmark (pagerank , transposed_graph )
298
+ benchmark (pagerank , transposed_graph )
323
299
324
300
325
- def bench_bfs (gpubenchmark , graph ):
301
+ def bench_bfs (benchmark , graph ):
326
302
bfs = dask_cugraph .bfs if is_graph_distributed (graph ) else cugraph .bfs
327
303
start = graph .edgelist .edgelist_df ["src" ][0 ]
328
- gpubenchmark (bfs , graph , start )
304
+ benchmark (bfs , graph , start )
329
305
330
306
331
- def bench_sssp (gpubenchmark , graph ):
307
+ def bench_sssp (benchmark , graph ):
332
308
if not graph .is_weighted ():
333
309
pytest .skip ("Skipping: Unweighted Graphs are not supported by SSSP" )
334
310
@@ -340,102 +316,102 @@ def bench_sssp(gpubenchmark, graph):
340
316
341
317
start = start_col .to_arrow ().to_pylist ()[0 ]
342
318
343
- gpubenchmark (sssp , graph , start )
319
+ benchmark (sssp , graph , start )
344
320
345
321
346
- def bench_jaccard (gpubenchmark , unweighted_graph ):
322
+ def bench_jaccard (benchmark , unweighted_graph ):
347
323
G = unweighted_graph
348
324
# algo cannot compute neighbors on all nodes without running into OOM
349
325
# this is why we will call jaccard on a subset of nodes
350
326
vert_pairs = get_vertex_pairs (G )
351
327
jaccard = dask_cugraph .jaccard if is_graph_distributed (G ) else cugraph .jaccard
352
- gpubenchmark (jaccard , G , vert_pairs )
328
+ benchmark (jaccard , G , vert_pairs )
353
329
354
330
355
- def bench_sorensen (gpubenchmark , unweighted_graph ):
331
+ def bench_sorensen (benchmark , unweighted_graph ):
356
332
G = unweighted_graph
357
333
# algo cannot compute neighbors on all nodes without running into OOM
358
334
# this is why we will call sorensen on a subset of nodes
359
335
vert_pairs = get_vertex_pairs (G )
360
336
sorensen = dask_cugraph .sorensen if is_graph_distributed (G ) else cugraph .sorensen
361
- gpubenchmark (sorensen , G , vert_pairs )
337
+ benchmark (sorensen , G , vert_pairs )
362
338
363
339
364
- def bench_louvain (gpubenchmark , graph ):
340
+ def bench_louvain (benchmark , graph ):
365
341
louvain = dask_cugraph .louvain if is_graph_distributed (graph ) else cugraph .louvain
366
- gpubenchmark (louvain , graph )
342
+ benchmark (louvain , graph )
367
343
368
344
369
- def bench_weakly_connected_components (gpubenchmark , graph ):
345
+ def bench_weakly_connected_components (benchmark , graph ):
370
346
if is_graph_distributed (graph ):
371
347
pytest .skip ("distributed graphs are not supported" )
372
348
if graph .is_directed ():
373
349
G = graph .to_undirected ()
374
350
else :
375
351
G = graph
376
- gpubenchmark (cugraph .weakly_connected_components , G )
352
+ benchmark (cugraph .weakly_connected_components , G )
377
353
378
354
379
- def bench_overlap (gpubenchmark , unweighted_graph ):
355
+ def bench_overlap (benchmark , unweighted_graph ):
380
356
G = unweighted_graph
381
357
# algo cannot compute neighbors on all nodes without running into OOM
382
358
# this is why we will call sorensen on a subset of nodes
383
359
vertex_pairs = get_vertex_pairs (G )
384
360
overlap = dask_cugraph .overlap if is_graph_distributed (G ) else cugraph .overlap
385
- gpubenchmark (overlap , G , vertex_pairs )
361
+ benchmark (overlap , G , vertex_pairs )
386
362
387
363
388
- def bench_triangle_count (gpubenchmark , graph ):
364
+ def bench_triangle_count (benchmark , graph ):
389
365
tc = (
390
366
dask_cugraph .triangle_count
391
367
if is_graph_distributed (graph )
392
368
else cugraph .triangle_count
393
369
)
394
- gpubenchmark (tc , graph )
370
+ benchmark (tc , graph )
395
371
396
372
397
- def bench_spectralBalancedCutClustering (gpubenchmark , graph ):
373
+ def bench_spectralBalancedCutClustering (benchmark , graph ):
398
374
if is_graph_distributed (graph ):
399
375
pytest .skip ("distributed graphs are not supported" )
400
- gpubenchmark (cugraph .spectralBalancedCutClustering , graph , 2 )
376
+ benchmark (cugraph .spectralBalancedCutClustering , graph , 2 )
401
377
402
378
403
379
@pytest .mark .skip (reason = "Need to guarantee graph has weights, " "not doing that yet" )
404
- def bench_spectralModularityMaximizationClustering (gpubenchmark , graph ):
380
+ def bench_spectralModularityMaximizationClustering (benchmark , graph ):
405
381
smmc = (
406
382
dask_cugraph .spectralModularityMaximizationClustering
407
383
if is_graph_distributed (graph )
408
384
else cugraph .spectralModularityMaximizationClustering
409
385
)
410
- gpubenchmark (smmc , graph , 2 )
386
+ benchmark (smmc , graph , 2 )
411
387
412
388
413
- def bench_graph_degree (gpubenchmark , graph ):
414
- gpubenchmark (graph .degree )
389
+ def bench_graph_degree (benchmark , graph ):
390
+ benchmark (graph .degree )
415
391
416
392
417
- def bench_graph_degrees (gpubenchmark , graph ):
393
+ def bench_graph_degrees (benchmark , graph ):
418
394
if is_graph_distributed (graph ):
419
395
pytest .skip ("distributed graphs are not supported" )
420
- gpubenchmark (graph .degrees )
396
+ benchmark (graph .degrees )
421
397
422
398
423
- def bench_betweenness_centrality (gpubenchmark , graph ):
399
+ def bench_betweenness_centrality (benchmark , graph ):
424
400
bc = (
425
401
dask_cugraph .betweenness_centrality
426
402
if is_graph_distributed (graph )
427
403
else cugraph .betweenness_centrality
428
404
)
429
- gpubenchmark (bc , graph , k = 10 , random_state = 123 )
405
+ benchmark (bc , graph , k = 10 , random_state = 123 )
430
406
431
407
432
- def bench_edge_betweenness_centrality (gpubenchmark , graph ):
408
+ def bench_edge_betweenness_centrality (benchmark , graph ):
433
409
if is_graph_distributed (graph ):
434
410
pytest .skip ("distributed graphs are not supported" )
435
- gpubenchmark (cugraph .edge_betweenness_centrality , graph , k = 10 , seed = 123 )
411
+ benchmark (cugraph .edge_betweenness_centrality , graph , k = 10 , seed = 123 )
436
412
437
413
438
- def bench_uniform_neighbor_sample (gpubenchmark , graph ):
414
+ def bench_uniform_neighbor_sample (benchmark , graph ):
439
415
uns = (
440
416
dask_cugraph .uniform_neighbor_sample
441
417
if is_graph_distributed (graph )
@@ -455,13 +431,13 @@ def bench_uniform_neighbor_sample(gpubenchmark, graph):
455
431
start_list = start_list .compute ()
456
432
457
433
fanout_vals = [5 , 5 , 5 ]
458
- gpubenchmark (uns , graph , start_list = start_list , fanout_vals = fanout_vals )
434
+ benchmark (uns , graph , start_list = start_list , fanout_vals = fanout_vals )
459
435
460
436
461
- def bench_egonet (gpubenchmark , graph ):
437
+ def bench_egonet (benchmark , graph ):
462
438
egonet = (
463
439
dask_cugraph .ego_graph if is_graph_distributed (graph ) else cugraph .ego_graph
464
440
)
465
441
n = 1
466
442
radius = 2
467
- gpubenchmark (egonet , graph , n , radius = radius )
443
+ benchmark (egonet , graph , n , radius = radius )
0 commit comments