Skip to content

Commit 0b50bf9

Browse files
authored
Removes unused benchmark infra: rapids-pytest-benchmark and asvdb (#4865)
This PR removes `rapids-pytest-benchmark` and references to one of its dependencies, `asvdb`. These packages were used for adding GPU memory usage to pytest-benchmark output and for updating ASV output files with pytest-benchmark results so the ASV front end can be used on pytest-benchmark suites. Since we're not using the ASV frontend anymore, and the GPU memory usage mechanism used by rapids-pytest-benchmark is outdated and unmaintained (and slower than other tools available), we're removing references to these packages. cc @vyasr @robertmaynard Authors: - Rick Ratzel (https://github.com/rlratzel) - Ralph Liu (https://github.com/nv-rliu) Approvers: - Don Acosta (https://github.com/acostadon) - Alex Barghi (https://github.com/alexbarghi-nv) URL: #4865
1 parent 1088403 commit 0b50bf9

File tree

23 files changed

+129
-409
lines changed

23 files changed

+129
-409
lines changed

benchmarks/cugraph-dgl/pytest-based/bench_cugraph_dgl_uniform_neighbor_sample.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
1+
# Copyright (c) 2022-2025, NVIDIA CORPORATION.
22
#
33
# Licensed under the Apache License, Version 2.0 (the "License");
44
# you may not use this file except in compliance with the License.
@@ -22,10 +22,6 @@
2222
import numpy as np
2323
import cupy as cp
2424

25-
# Facing issues with rapids-pytest-benchmark plugin
26-
# pytest-benchmark.
27-
import pytest_benchmark
28-
2925
from cugraph.generators import rmat
3026
from cugraph.experimental import datasets
3127
from cugraph_benchmarking import params

benchmarks/cugraph-service/pytest-based/bench_cgs_uniform_neighbor_sample.py

Lines changed: 3 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
1+
# Copyright (c) 2022-2025, NVIDIA CORPORATION.
22
#
33
# Licensed under the Apache License, Version 2.0 (the "License");
44
# you may not use this file except in compliance with the License.
@@ -18,18 +18,6 @@
1818
import pytest
1919
import numpy as np
2020

21-
# If the rapids-pytest-benchmark plugin is installed, the "gpubenchmark"
22-
# fixture will be available automatically. Check that this fixture is available
23-
# by trying to import rapids_pytest_benchmark, and if that fails, set
24-
# "gpubenchmark" to the standard "benchmark" fixture provided by
25-
# pytest-benchmark.
26-
try:
27-
import rapids_pytest_benchmark # noqa: F401
28-
except ImportError:
29-
import pytest_benchmark
30-
31-
gpubenchmark = pytest_benchmark.plugin.benchmark
32-
3321
from cugraph_service_client import CugraphServiceClient
3422
from cugraph_service_client.exceptions import CugraphServiceError
3523
from cugraph_service_client import RemoteGraph
@@ -178,7 +166,7 @@ def remote_graph_objs(request):
178166
"with_replacement", [False], ids=lambda v: f"with_replacement={v}"
179167
)
180168
def bench_cgs_uniform_neighbor_sample(
181-
gpubenchmark, remote_graph_objs, batch_size, fanout, with_replacement
169+
benchmark, remote_graph_objs, batch_size, fanout, with_replacement
182170
):
183171
(G, num_verts, uniform_neighbor_sample_func) = remote_graph_objs
184172

@@ -188,7 +176,7 @@ def bench_cgs_uniform_neighbor_sample(
188176
)
189177
# print(f"\n{uns_args}")
190178
# FIXME: uniform_neighbor_sample cannot take a np.ndarray for start_list
191-
result = gpubenchmark(
179+
result = benchmark(
192180
uniform_neighbor_sample_func,
193181
G,
194182
start_list=uns_args["start_list"],

benchmarks/cugraph/pytest-based/README.md

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -13,9 +13,6 @@ directory under the root of the `cuGraph` source tree.
1313
* cugraph built and installed (or `cugraph` sources and built C++ extensions
1414
available on `PYTHONPATH`)
1515

16-
* rapids-pytest-benchmark pytest plugin (`conda install -c rapidsai
17-
rapids-pytest-benchmark`)
18-
1916
* The benchmark datasets downloaded and installed in <cugraph>/datasets. Run the
2017
script below from the <cugraph>/datasets directory:
2118
```
@@ -25,8 +22,7 @@ cd <cugraph>/datasets
2522

2623
## Usage (Python)
2724
### Python
28-
* Run `pytest --help` (with the rapids-pytest-benchmark plugin installed) for
29-
the full list of options
25+
* Run `pytest --help` for the full list of options
3026

3127
* See also the `pytest.ini` file in this directory for examples of how to enable
3228
options by default and define marks
@@ -44,9 +40,9 @@ _**NOTE: these commands must be run from the `<cugraph_root>/benchmarks` directo
4440
(rapids) user@machine:/cugraph/benchmarks> pytest -x
4541
```
4642

47-
* Run all the benchmarks but do not reinit RMM with different configurations
43+
* Run all the benchmarks and allow RMM to reinit with different configurations
4844
```
49-
(rapids) user@machine:/cugraph/benchmarks> pytest --no-rmm-reinit
45+
(rapids) user@machine:/cugraph/benchmarks> pytest --allow-rmm-reinit
5046
```
5147

5248
* Show what benchmarks would be run with the given options, but do not run them

benchmarks/cugraph/pytest-based/bench_algos.py

Lines changed: 41 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -13,25 +13,6 @@
1313

1414
import pytest
1515
import numpy as np
16-
import pytest_benchmark
17-
18-
# FIXME: Remove this when rapids_pytest_benchmark.gpubenchmark is available
19-
# everywhere
20-
try:
21-
from rapids_pytest_benchmark import setFixtureParamNames
22-
except ImportError:
23-
print(
24-
"\n\nWARNING: rapids_pytest_benchmark is not installed, "
25-
"falling back to pytest_benchmark fixtures.\n"
26-
)
27-
28-
# if rapids_pytest_benchmark is not available, just perfrom time-only
29-
# benchmarking and replace the util functions with nops
30-
gpubenchmark = pytest_benchmark.plugin.benchmark
31-
32-
def setFixtureParamNames(*args, **kwargs):
33-
pass
34-
3516

3617
import rmm
3718
import dask_cudf
@@ -50,6 +31,7 @@ def setFixtureParamNames(*args, **kwargs):
5031
pool_allocator,
5132
)
5233

34+
5335
# duck-type compatible Dataset for RMAT data
5436
class RmatDataset:
5537
def __init__(self, scale=4, edgefactor=2, mg=False):
@@ -198,11 +180,6 @@ def reinitRMM(managed_mem, pool_alloc):
198180

199181
@pytest.fixture(scope="module", params=rmm_fixture_params)
200182
def rmm_config(request):
201-
# Since parameterized fixtures do not assign param names to param values,
202-
# manually call the helper to do so. Ensure the order of the name list
203-
# passed to it matches if there are >1 params.
204-
# If the request only contains n params, only the first n names are set.
205-
setFixtureParamNames(request, ["managed_mem", "pool_allocator"])
206183
reinitRMM(request.param[0], request.param[1])
207184

208185

@@ -215,7 +192,6 @@ def dataset(request, rmm_config):
215192
tests/fixtures are done with the Dataset, it has the Dask cluster and
216193
client torn down (if MG) and all data loaded is freed.
217194
"""
218-
setFixtureParamNames(request, ["dataset"])
219195
dataset = request.param[0]
220196
client = cluster = None
221197
# For now, only RmatDataset instanaces support MG and have a "mg" attr.
@@ -283,8 +259,8 @@ def get_vertex_pairs(G, num_vertices=10):
283259

284260
###############################################################################
285261
# Benchmarks
286-
def bench_create_graph(gpubenchmark, edgelist):
287-
gpubenchmark(
262+
def bench_create_graph(benchmark, edgelist):
263+
benchmark(
288264
cugraph.from_cudf_edgelist,
289265
edgelist,
290266
source="src",
@@ -298,8 +274,8 @@ def bench_create_graph(gpubenchmark, edgelist):
298274
# results in thousands of rounds before the default threshold is met, so lower
299275
# the max_time for this benchmark.
300276
@pytest.mark.benchmark(warmup=True, warmup_iterations=10, max_time=0.005)
301-
def bench_create_digraph(gpubenchmark, edgelist):
302-
gpubenchmark(
277+
def bench_create_digraph(benchmark, edgelist):
278+
benchmark(
303279
cugraph.from_cudf_edgelist,
304280
edgelist,
305281
source="src",
@@ -309,26 +285,26 @@ def bench_create_digraph(gpubenchmark, edgelist):
309285
)
310286

311287

312-
def bench_renumber(gpubenchmark, edgelist):
313-
gpubenchmark(NumberMap.renumber, edgelist, "src", "dst")
288+
def bench_renumber(benchmark, edgelist):
289+
benchmark(NumberMap.renumber, edgelist, "src", "dst")
314290

315291

316-
def bench_pagerank(gpubenchmark, transposed_graph):
292+
def bench_pagerank(benchmark, transposed_graph):
317293
pagerank = (
318294
dask_cugraph.pagerank
319295
if is_graph_distributed(transposed_graph)
320296
else cugraph.pagerank
321297
)
322-
gpubenchmark(pagerank, transposed_graph)
298+
benchmark(pagerank, transposed_graph)
323299

324300

325-
def bench_bfs(gpubenchmark, graph):
301+
def bench_bfs(benchmark, graph):
326302
bfs = dask_cugraph.bfs if is_graph_distributed(graph) else cugraph.bfs
327303
start = graph.edgelist.edgelist_df["src"][0]
328-
gpubenchmark(bfs, graph, start)
304+
benchmark(bfs, graph, start)
329305

330306

331-
def bench_sssp(gpubenchmark, graph):
307+
def bench_sssp(benchmark, graph):
332308
if not graph.is_weighted():
333309
pytest.skip("Skipping: Unweighted Graphs are not supported by SSSP")
334310

@@ -340,102 +316,102 @@ def bench_sssp(gpubenchmark, graph):
340316

341317
start = start_col.to_arrow().to_pylist()[0]
342318

343-
gpubenchmark(sssp, graph, start)
319+
benchmark(sssp, graph, start)
344320

345321

346-
def bench_jaccard(gpubenchmark, unweighted_graph):
322+
def bench_jaccard(benchmark, unweighted_graph):
347323
G = unweighted_graph
348324
# algo cannot compute neighbors on all nodes without running into OOM
349325
# this is why we will call jaccard on a subset of nodes
350326
vert_pairs = get_vertex_pairs(G)
351327
jaccard = dask_cugraph.jaccard if is_graph_distributed(G) else cugraph.jaccard
352-
gpubenchmark(jaccard, G, vert_pairs)
328+
benchmark(jaccard, G, vert_pairs)
353329

354330

355-
def bench_sorensen(gpubenchmark, unweighted_graph):
331+
def bench_sorensen(benchmark, unweighted_graph):
356332
G = unweighted_graph
357333
# algo cannot compute neighbors on all nodes without running into OOM
358334
# this is why we will call sorensen on a subset of nodes
359335
vert_pairs = get_vertex_pairs(G)
360336
sorensen = dask_cugraph.sorensen if is_graph_distributed(G) else cugraph.sorensen
361-
gpubenchmark(sorensen, G, vert_pairs)
337+
benchmark(sorensen, G, vert_pairs)
362338

363339

364-
def bench_louvain(gpubenchmark, graph):
340+
def bench_louvain(benchmark, graph):
365341
louvain = dask_cugraph.louvain if is_graph_distributed(graph) else cugraph.louvain
366-
gpubenchmark(louvain, graph)
342+
benchmark(louvain, graph)
367343

368344

369-
def bench_weakly_connected_components(gpubenchmark, graph):
345+
def bench_weakly_connected_components(benchmark, graph):
370346
if is_graph_distributed(graph):
371347
pytest.skip("distributed graphs are not supported")
372348
if graph.is_directed():
373349
G = graph.to_undirected()
374350
else:
375351
G = graph
376-
gpubenchmark(cugraph.weakly_connected_components, G)
352+
benchmark(cugraph.weakly_connected_components, G)
377353

378354

379-
def bench_overlap(gpubenchmark, unweighted_graph):
355+
def bench_overlap(benchmark, unweighted_graph):
380356
G = unweighted_graph
381357
# algo cannot compute neighbors on all nodes without running into OOM
382358
# this is why we will call sorensen on a subset of nodes
383359
vertex_pairs = get_vertex_pairs(G)
384360
overlap = dask_cugraph.overlap if is_graph_distributed(G) else cugraph.overlap
385-
gpubenchmark(overlap, G, vertex_pairs)
361+
benchmark(overlap, G, vertex_pairs)
386362

387363

388-
def bench_triangle_count(gpubenchmark, graph):
364+
def bench_triangle_count(benchmark, graph):
389365
tc = (
390366
dask_cugraph.triangle_count
391367
if is_graph_distributed(graph)
392368
else cugraph.triangle_count
393369
)
394-
gpubenchmark(tc, graph)
370+
benchmark(tc, graph)
395371

396372

397-
def bench_spectralBalancedCutClustering(gpubenchmark, graph):
373+
def bench_spectralBalancedCutClustering(benchmark, graph):
398374
if is_graph_distributed(graph):
399375
pytest.skip("distributed graphs are not supported")
400-
gpubenchmark(cugraph.spectralBalancedCutClustering, graph, 2)
376+
benchmark(cugraph.spectralBalancedCutClustering, graph, 2)
401377

402378

403379
@pytest.mark.skip(reason="Need to guarantee graph has weights, " "not doing that yet")
404-
def bench_spectralModularityMaximizationClustering(gpubenchmark, graph):
380+
def bench_spectralModularityMaximizationClustering(benchmark, graph):
405381
smmc = (
406382
dask_cugraph.spectralModularityMaximizationClustering
407383
if is_graph_distributed(graph)
408384
else cugraph.spectralModularityMaximizationClustering
409385
)
410-
gpubenchmark(smmc, graph, 2)
386+
benchmark(smmc, graph, 2)
411387

412388

413-
def bench_graph_degree(gpubenchmark, graph):
414-
gpubenchmark(graph.degree)
389+
def bench_graph_degree(benchmark, graph):
390+
benchmark(graph.degree)
415391

416392

417-
def bench_graph_degrees(gpubenchmark, graph):
393+
def bench_graph_degrees(benchmark, graph):
418394
if is_graph_distributed(graph):
419395
pytest.skip("distributed graphs are not supported")
420-
gpubenchmark(graph.degrees)
396+
benchmark(graph.degrees)
421397

422398

423-
def bench_betweenness_centrality(gpubenchmark, graph):
399+
def bench_betweenness_centrality(benchmark, graph):
424400
bc = (
425401
dask_cugraph.betweenness_centrality
426402
if is_graph_distributed(graph)
427403
else cugraph.betweenness_centrality
428404
)
429-
gpubenchmark(bc, graph, k=10, random_state=123)
405+
benchmark(bc, graph, k=10, random_state=123)
430406

431407

432-
def bench_edge_betweenness_centrality(gpubenchmark, graph):
408+
def bench_edge_betweenness_centrality(benchmark, graph):
433409
if is_graph_distributed(graph):
434410
pytest.skip("distributed graphs are not supported")
435-
gpubenchmark(cugraph.edge_betweenness_centrality, graph, k=10, seed=123)
411+
benchmark(cugraph.edge_betweenness_centrality, graph, k=10, seed=123)
436412

437413

438-
def bench_uniform_neighbor_sample(gpubenchmark, graph):
414+
def bench_uniform_neighbor_sample(benchmark, graph):
439415
uns = (
440416
dask_cugraph.uniform_neighbor_sample
441417
if is_graph_distributed(graph)
@@ -455,13 +431,13 @@ def bench_uniform_neighbor_sample(gpubenchmark, graph):
455431
start_list = start_list.compute()
456432

457433
fanout_vals = [5, 5, 5]
458-
gpubenchmark(uns, graph, start_list=start_list, fanout_vals=fanout_vals)
434+
benchmark(uns, graph, start_list=start_list, fanout_vals=fanout_vals)
459435

460436

461-
def bench_egonet(gpubenchmark, graph):
437+
def bench_egonet(benchmark, graph):
462438
egonet = (
463439
dask_cugraph.ego_graph if is_graph_distributed(graph) else cugraph.ego_graph
464440
)
465441
n = 1
466442
radius = 2
467-
gpubenchmark(egonet, graph, n, radius=radius)
443+
benchmark(egonet, graph, n, radius=radius)

benchmarks/cugraph/pytest-based/bench_cugraph_uniform_neighbor_sample.py

Lines changed: 3 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# Copyright (c) 2022-2024, NVIDIA CORPORATION.
1+
# Copyright (c) 2022-2025, NVIDIA CORPORATION.
22
#
33
# Licensed under the Apache License, Version 2.0 (the "License");
44
# you may not use this file except in compliance with the License.
@@ -22,18 +22,6 @@
2222
import dask_cudf
2323
import rmm
2424

25-
# If the rapids-pytest-benchmark plugin is installed, the "gpubenchmark"
26-
# fixture will be available automatically. Check that this fixture is available
27-
# by trying to import rapids_pytest_benchmark, and if that fails, set
28-
# "gpubenchmark" to the standard "benchmark" fixture provided by
29-
# pytest-benchmark.
30-
try:
31-
import rapids_pytest_benchmark # noqa: F401
32-
except ImportError:
33-
import pytest_benchmark
34-
35-
gpubenchmark = pytest_benchmark.plugin.benchmark
36-
3725
from cugraph import (
3826
MultiGraph,
3927
uniform_neighbor_sample,
@@ -271,7 +259,7 @@ def uns_func(*args, **kwargs):
271259
"with_replacement", [False], ids=lambda v: f"with_replacement={v}"
272260
)
273261
def bench_cugraph_uniform_neighbor_sample(
274-
gpubenchmark, graph_objs, batch_size, fanout, with_replacement
262+
benchmark, graph_objs, batch_size, fanout, with_replacement
275263
):
276264
(G, num_verts, uniform_neighbor_sample_func) = graph_objs
277265

@@ -281,7 +269,7 @@ def bench_cugraph_uniform_neighbor_sample(
281269
)
282270
# print(f"\n{uns_args}")
283271
# FIXME: uniform_neighbor_sample cannot take a np.ndarray for start_list
284-
result = gpubenchmark(
272+
result = benchmark(
285273
uniform_neighbor_sample_func,
286274
G,
287275
start_list=uns_args["start_list"],

0 commit comments

Comments
 (0)