Skip to content
This repository was archived by the owner on Nov 17, 2023. It is now read-only.

[OpPerf] Handle positional arguments #15761

Merged
merged 11 commits into from
Aug 19, 2019
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 8 additions & 2 deletions benchmark/opperf/rules/default_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,10 @@

"""Default Input Tensor shapes to use for benchmarking"""

# For operators like concat, Elementwisesum, squeeze, stack
# argument data is passed as variable arg (*args)
DEFAULT_ARGS = [(1024, 1024)]

# For Unary operators like abs, arccos, arcsin etc..
DEFAULT_DATA = [(1024, 1024), (10000, 1), (10000, 100)]

Expand Down Expand Up @@ -146,7 +150,8 @@
"data_4d": DEFAULT_DATA_4d,
"dim1": DEFAULT_DIM_1,
"dim2": DEFAULT_DIM_2,
"block_size": DEFAULT_BLOCK_SIZE}
"block_size": DEFAULT_BLOCK_SIZE,
"args": DEFAULT_ARGS}


# These are names of MXNet operator parameters that is of type NDArray.
Expand All @@ -157,4 +162,5 @@
PARAMS_OF_TYPE_NDARRAY = ["lhs", "rhs", "data", "base", "exp", "sample",
"mu", "sigma", "lam", "alpha", "beta", "gamma", "k", "p",
"low", "high", "weight", "bias", "moving_mean", "moving_var",
"weight", "weight32", "grad", "mean", "var", "mom", "n", "d", "v", "z", "g", "delta"]
"weight", "weight32", "grad", "mean", "var", "mom", "n", "d",
"v", "z", "g", "delta", "args"]
40 changes: 28 additions & 12 deletions benchmark/opperf/utils/benchmark_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,40 +28,56 @@

def _prepare_op_inputs(inputs, run_backward, dtype, ctx):
kwargs_list = []
args_list = []

for inp in inputs:
kwargs = {}
for key, value in inp.items():
if key in PARAMS_OF_TYPE_NDARRAY:
if key in PARAMS_OF_TYPE_NDARRAY and key=='args':
args_list.append(get_mx_ndarray(ctx=ctx, in_tensor=value,
dtype=dtype,
initializer=nd.normal,
attach_grad=run_backward))
elif key in PARAMS_OF_TYPE_NDARRAY:
kwargs[key] = get_mx_ndarray(ctx=ctx, in_tensor=value,
dtype=dtype,
initializer=nd.normal,
attach_grad=run_backward)
else:
kwargs[key] = value
kwargs_list.append(kwargs)

return kwargs_list
return args_list, kwargs_list


def _run_nd_operator_performance_test(op, inputs, run_backward, warmup, runs, kwargs_list):
def _run_nd_operator_performance_test(op, inputs, run_backward, warmup, runs, args_list, kwargs_list):
if run_backward:
benchmark_helper_func = nd_forward_backward_and_profile
else:
benchmark_helper_func = nd_forward_and_profile

if not args_list:
_, _ = benchmark_helper_func(op, warmup, [], **kwargs_list[0])
else:
# Warm up, ignore the profiler output
_, _ = benchmark_helper_func(op, warmup, **kwargs_list[0])
_, _ = benchmark_helper_func(op, warmup, args_list[0], **kwargs_list[0])

# Run Benchmarks
op_benchmark_result = {op.__name__: []}
logging.info("Begin Benchmark - {name}".format(name=op.__name__))
for idx, kwargs in enumerate(kwargs_list):
_, profiler_output = benchmark_helper_func(op, runs, **kwargs)
if not args_list:
for idx, kwargs in enumerate(kwargs_list):
_, profiler_output = benchmark_helper_func(op, runs, [], **kwargs)

# Add inputs used for profiling this operator into result
profiler_output["inputs"] = inputs[idx]
op_benchmark_result[op.__name__].append(profiler_output)
else:
for idx, (args,kwargs) in enumerate(zip(args_list,kwargs_list)):
_, profiler_output = benchmark_helper_func(op, runs, args, **kwargs)

# Add inputs used for profiling this operator into result
profiler_output["inputs"] = inputs[idx]
op_benchmark_result[op.__name__].append(profiler_output)
# Add inputs used for profiling this operator into result
profiler_output["inputs"] = inputs[idx]
op_benchmark_result[op.__name__].append(profiler_output)
logging.info("Complete Benchmark - {name}".format(name=op.__name__))
return op_benchmark_result

Expand Down Expand Up @@ -98,15 +114,15 @@ def run_performance_test(ops, inputs, run_backward=True,
List of dictionary of benchmark results. key -> name of the operator, Value is benchmark results.

"""
kwargs_list = _prepare_op_inputs(inputs, run_backward, dtype, ctx)
args_list, kwargs_list = _prepare_op_inputs(inputs, run_backward, dtype, ctx)

if not isinstance(ops, list):
ops = [ops]

op_benchmark_result = []
for op in ops:
if hasattr(mx.nd, op.__name__):
benchmark_result = _run_nd_operator_performance_test(op, inputs, run_backward, warmup, runs, kwargs_list)
benchmark_result = _run_nd_operator_performance_test(op, inputs, run_backward, warmup, runs, args_list, kwargs_list)
else:
raise ValueError("Unknown NDArray operator provided to benchmark. - ", op.__name__)
op_benchmark_result.append(benchmark_result)
Expand Down
10 changes: 8 additions & 2 deletions benchmark/opperf/utils/ndarray_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,10 @@ def nd_forward_backward_and_profile(op, runs, *args, **kwargs):
"""
for _ in range(runs):
with mx.autograd.record():
res = op(*args, **kwargs)
if not isinstance(args[0],nd.NDArray):
res = op(**kwargs)
else:
res = op(*args, **kwargs)
res.backward()
nd.waitall()
return res
Expand Down Expand Up @@ -76,7 +79,10 @@ def nd_forward_and_profile(op, runs, *args, **kwargs):
any results from NDArray operation execution
"""
for _ in range(runs):
res = op(*args, **kwargs)
if not isinstance(args[0],nd.NDArray):
res = op(**kwargs)
else:
res = op(*args, **kwargs)
nd.waitall()
return res

Expand Down