Skip to content

Commit 6cf964a

Browse files
authored
Improve FC perf when no_bias=False (apache#15033)
* Improve FC perf when no_bias=False * Add Issue number in comment * Correct req
1 parent 01cf29d commit 6cf964a

File tree

3 files changed

+34
-3
lines changed

3 files changed

+34
-3
lines changed

src/operator/nn/fully_connected-inl.h

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@
3636
#include "../elemwise_op_common.h"
3737
#include "../linalg.h"
3838
#include "../../common/utils.h"
39+
#include "../tensor/broadcast_reduce_op.h"
3940

4041
namespace mxnet {
4142
namespace op {
@@ -169,7 +170,18 @@ void FCBackward(const OpContext &ctx, const FullyConnectedParam &param,
169170
// gradient of bias
170171
if (!param.no_bias) {
171172
Tensor<xpu, 1, DType> gbias = in_grad[fullc::kBias].get<xpu, 1, DType>(s);
172-
Assign(gbias, req[fullc::kBias], sum_rows(grad));
173+
TBlob grad_blob = TBlob(grad);
174+
TBlob gbias_blob = TBlob(gbias);
175+
mxnet::TShape x(1, 0);
176+
mxnet::TShape small;
177+
if (shape_assign(&gbias_blob.shape_, Shape2(param.num_hidden, 1))) {
178+
small = gbias_blob.shape_;
179+
} else {
180+
small = ReduceAxesShapeImpl(grad_blob.shape_, dmlc::optional<mxnet::TShape>(x), true, false);
181+
}
182+
ReduceAxesComputeImpl<xpu, mshadow::red::sum, false, false,
183+
mshadow_op::identity>(ctx, {grad_blob}, {req[fullc::kBias]},
184+
{in_grad[fullc::kBias]}, small);
173185
}
174186
// gradient of data
175187
// Legacy approach shown here for comparison:

src/operator/nn/fully_connected.cc

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -316,11 +316,9 @@ NNVM_REGISTER_OP(_backward_FullyConnected)
316316
const FullyConnectedParam& params = nnvm::get<FullyConnectedParam>(attrs.parsed);
317317
return params.no_bias ? 2 : 3;
318318
})
319-
#if MXNET_USE_MKLDNN == 1
320319
.set_attr<FResourceRequest>("FResourceRequest", [](const NodeAttrs& n) {
321320
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};
322321
})
323-
#endif
324322
.set_attr<nnvm::TIsBackward>("TIsBackward", true)
325323
.set_attr<nnvm::FInplaceOption>("FInplaceOption", [](const NodeAttrs& attrs){
326324
return std::vector<std::pair<int, int> >{{1, 0}};

tests/python/unittest/test_operator.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -696,6 +696,27 @@ def test_symbol_pow():
696696
check_symbolic_backward(test, [data_tmp, exp_tmp], [np.ones(shape)], [data_dir, exp_dir])
697697

698698

699+
@with_seed()
700+
def test_fully_connected():
701+
data = mx.sym.var("data")
702+
fc_weight = mx.sym.var("weight")
703+
fc_bias = mx.sym.var("bias")
704+
fc = mx.sym.FullyConnected(data=data, weight=fc_weight, bias=fc_bias, num_hidden=10, no_bias=False, name='fc')
705+
data = mx.nd.random.uniform(shape=(5, 5, 5, 13), dtype=np.float32)
706+
fc_weight = mx.nd.random.uniform(shape=(10, 325), dtype=np.float32)
707+
fc_bias = mx.nd.random.uniform(shape=(10), dtype=np.float32)
708+
fc_bias2 = mx.nd.random.uniform(shape=(10, 1), dtype=np.float32)
709+
data_np = data.asnumpy().reshape(5, 325)
710+
fc_weight_np = np.transpose(fc_weight.asnumpy())
711+
fc_bias_np = fc_bias.asnumpy()
712+
res = np.dot(data_np, fc_weight_np) + fc_bias.asnumpy()
713+
check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np}, {'fc_output': res})
714+
check_numeric_gradient(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np},
715+
numeric_eps=1e-2, rtol=1e-4, atol=1e-2)
716+
# TODO: Fix Bug #15032 when bias has ndim > 1
717+
#check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias2.asnumpy()}, {'fc_output': res})
718+
719+
699720
@with_seed()
700721
def test_pow_fn():
701722
shape = (3, 4)

0 commit comments

Comments
 (0)