Skip to content

Commit 047828a

Browse files
committed
# This is a combination of 4 commits.
# This is the 1st commit message: Implements ldexp. # This is the commit message apache#2: Remove spaces. # This is the commit message apache#3: Change tests. # This is the commit message apache#4: Reorganize files.
1 parent 24a5cf0 commit 047828a

File tree

8 files changed

+272
-3
lines changed

8 files changed

+272
-3
lines changed

python/mxnet/ndarray/numpy/_op.py

Lines changed: 36 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
from ..ndarray import NDArray
2828

2929
__all__ = ['zeros', 'ones', 'add', 'subtract', 'multiply', 'divide', 'mod', 'power', 'tensordot',
30-
'linspace', 'expand_dims', 'tile', 'arange']
30+
'linspace', 'expand_dims', 'tile', 'arange', 'ldexp']
3131

3232

3333
@set_module('mxnet.ndarray.numpy')
@@ -632,3 +632,38 @@ def tile(A, reps):
632632
633633
"""
634634
return _unary_func_helper(A, _npi.tile, _np.tile, reps=reps)
635+
636+
637+
@set_module('mxnet.ndarray.numpy')
638+
def ldexp(x1, x2, out=None):
639+
"""
640+
ldexp(x1, x2, out=None)
641+
Returns x1 * 2**x2, element-wise.
642+
The mantissas `x1` and twos exponents `x2` are used to construct
643+
floating point numbers ``x1 * 2**x2``.
644+
Parameters
645+
----------
646+
x1 : ndarray or scalar
647+
Array of multipliers.
648+
x2 : ndarray or scalar, int
649+
Array of twos exponents.
650+
out : ndarray, optional
651+
A location into which the result is stored. If provided, it must have
652+
a shape that the inputs broadcast to. If not, a freshly-allocated array is returned.
653+
Returns
654+
-------
655+
y : ndarray or scalar
656+
The result of ``x1 * 2**x2``.
657+
This is a scalar if both `x1` and `x2` are scalars.
658+
Notes
659+
-----
660+
Complex dtypes are not supported, they will raise a TypeError.
661+
Different from numpy, we allow x2 to be float besides int.
662+
`ldexp` is useful as the inverse of `frexp`, if used by itself it is
663+
more clear to simply use the expression ``x1 * 2**x2``.
664+
Examples
665+
--------
666+
>>> np.ldexp(5, np.arange(4))
667+
array([ 5., 10., 20., 40.])
668+
"""
669+
return _ufunc_helper(x1, x2, _npi.ldexp, _np.ldexp, _npi.ldexp_scalar, _npi.rldexp_scalar, out)

python/mxnet/numpy/multiarray.py

Lines changed: 36 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@
4444
from ..ndarray.numpy import _internal as _npi
4545

4646
__all__ = ['ndarray', 'empty', 'array', 'zeros', 'ones', 'add', 'subtract', 'multiply', 'divide',
47-
'mod', 'power', 'tensordot', 'linspace', 'expand_dims', 'tile', 'arange']
47+
'mod', 'power', 'tensordot', 'linspace', 'expand_dims', 'tile', 'arange', 'ldexp']
4848

4949

5050
# This function is copied from ndarray.py since pylint
@@ -1819,3 +1819,38 @@ def arange(start, stop=None, step=1, dtype=None, ctx=None):
18191819
than `stop`.
18201820
"""
18211821
return _mx_nd_np.arange(start, stop, step, dtype, ctx)
1822+
1823+
1824+
@set_module('mxnet.numpy')
1825+
def ldexp(x1, x2, out=None):
1826+
"""
1827+
ldexp(x1, x2, out=None)
1828+
Returns x1 * 2**x2, element-wise.
1829+
The mantissas `x1` and twos exponents `x2` are used to construct
1830+
floating point numbers ``x1 * 2**x2``.
1831+
Parameters
1832+
----------
1833+
x1 : ndarray or scalar
1834+
Array of multipliers.
1835+
x2 : ndarray or scalar, int
1836+
Array of twos exponents.
1837+
out : ndarray, optional
1838+
A location into which the result is stored. If provided, it must have
1839+
a shape that the inputs broadcast to. If not, a freshly-allocated array is returned.
1840+
Returns
1841+
-------
1842+
y : ndarray or scalar
1843+
The result of ``x1 * 2**x2``.
1844+
This is a scalar if both `x1` and `x2` are scalars.
1845+
Notes
1846+
-----
1847+
Complex dtypes are not supported, they will raise a TypeError.
1848+
Different from numpy, we allow x2 to be float besides int.
1849+
`ldexp` is useful as the inverse of `frexp`, if used by itself it is
1850+
more clear to simply use the expression ``x1 * 2**x2``.
1851+
Examples
1852+
--------
1853+
>>> np.ldexp(5, np.arange(4))
1854+
array([ 5., 10., 20., 40.])
1855+
"""
1856+
return _mx_nd_np.ldexp(x1, x2, out)

python/mxnet/symbol/numpy/_symbol.py

Lines changed: 30 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030
from . import _internal as _npi
3131

3232
__all__ = ['zeros', 'ones', 'add', 'subtract', 'multiply', 'divide', 'mod', 'power', 'tensordot',
33-
'linspace', 'expand_dims', 'tile', 'arange']
33+
'linspace', 'expand_dims', 'tile', 'arange', 'ldexp']
3434

3535

3636
def _num_outputs(sym):
@@ -1269,4 +1269,33 @@ def arange(start, stop=None, step=1, dtype=None, ctx=None):
12691269
return _npi.arange(start=start, stop=stop, step=step, dtype=dtype, ctx=ctx)
12701270

12711271

1272+
@set_module('mxnet.symbol.numpy')
1273+
def ldexp(x1, x2, out=None):
1274+
"""
1275+
ldexp(x1, x2, out=None)
1276+
Returns x1 * 2**x2, element-wise.
1277+
The mantissas `x1` and twos exponents `x2` are used to construct
1278+
floating point numbers ``x1 * 2**x2``.
1279+
Parameters
1280+
----------
1281+
x1 : _Symbol
1282+
Array of multipliers.
1283+
x2 : _Symbol
1284+
Array of twos exponents.
1285+
out : _Symbol or None
1286+
Dummy parameter to keep the consistency with the ndarray counterpart.
1287+
Returns
1288+
-------
1289+
y : _Symbol
1290+
The result of ``x1 * 2**x2``.
1291+
Notes
1292+
-----
1293+
Complex dtypes are not supported, they will raise a TypeError.
1294+
Different from numpy, we allow x2 to be float besides int.
1295+
`ldexp` is useful as the inverse of `frexp`, if used by itself it is
1296+
more clear to simply use the expression ``x1 * 2**x2``.
1297+
"""
1298+
return _ufunc_helper(x1, x2, _npi.ldexp, _np.ldexp, _npi.ldexp_scalar, _npi.rldexp_scalar, out)
1299+
1300+
12721301
_set_np_symbol_class(_Symbol)

src/operator/mshadow_op.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -357,6 +357,17 @@ MXNET_UNARY_MATH_OP(reciprocal_cube_root, 1.0f / math::cbrt(a));
357357

358358
MXNET_UNARY_MATH_OP(reciprocal_cube_root_grad, -1.0f / (3.0f * math::cbrt(a) * math::id(a)));
359359

360+
/*! \brief used for generate element of ldexp */
361+
MXNET_BINARY_MATH_OP(ldexp, math::id(a) * math::pow(2.0f, b));
362+
363+
MXNET_BINARY_MATH_OP(ldexp_grad, math::pow(2.0f, b));
364+
365+
MXNET_BINARY_MATH_OP(ldexp_rgrad, math::id(a) * math::pow(2.0f, b) * math::log(2.0f));
366+
367+
MXNET_BINARY_MATH_OP(rldexp, math::id(b) * math::pow(2.0f, a)); // swap a and b if a is scalar.
368+
369+
MXNET_BINARY_MATH_OP(rldexp_grad, math::id(b) * math::pow(2.0f, a) * math::log(2.0f));
370+
360371
/*! \brief used for generate element of round */
361372
MXNET_SIMPLE_UNARY_MATH_OP(round);
362373

src/operator/numpy/np_elemwise_broadcast_op.cc

Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -182,5 +182,79 @@ MXNET_OPERATOR_REGISTER_NP_BINARY_SCALAR(_npi_rpower_scalar)
182182
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Compute<cpu, mshadow_op::rpower>)
183183
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseOut{"_backward_rpower_scalar"});
184184

185+
MXNET_OPERATOR_REGISTER_BINARY_BROADCAST(_npi_ldexp)
186+
.describe(R"code(
187+
ldexp(x1, x2, out=None)
188+
189+
Returns x1 * 2**x2, element-wise.
190+
191+
The mantissas `x1` and twos exponents `x2` are used to construct
192+
floating point numbers ``x1 * 2**x2``.
193+
194+
Parameters
195+
----------
196+
x1 : ndarray or scalar
197+
Array of multipliers.
198+
x2 : ndarray or scalar, int
199+
Array of twos exponents.
200+
out : ndarray, optional
201+
A location into which the result is stored. If provided, it must have
202+
a shape that the inputs broadcast to. If not, a freshly-allocated array is returned.
203+
204+
Returns
205+
-------
206+
y : ndarray or scalar
207+
The result of ``x1 * 2**x2``.
208+
This is a scalar if both `x1` and `x2` are scalars.
209+
210+
Notes
211+
-----
212+
Complex dtypes are not supported, they will raise a TypeError.
213+
Different from numpy, we allow x2 to be float besides int.
214+
215+
`ldexp` is useful as the inverse of `frexp`, if used by itself it is
216+
more clear to simply use the expression ``x1 * 2**x2``.
217+
218+
Examples
219+
--------
220+
>>> np.ldexp(5, np.arange(4))
221+
array([ 5., 10., 20., 40.])
222+
)code" ADD_FILELINE)
223+
.set_attr<FCompute>("FCompute<cpu>", BinaryBroadcastCompute<cpu, mshadow_op::ldexp>)
224+
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{"_backward_npi_ldexp"});
225+
226+
MXNET_OPERATOR_REGISTER_NP_BINARY_SCALAR(_npi_ldexp_scalar)
227+
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Compute<cpu, mshadow_op::ldexp>)
228+
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{"_backward_npi_ldexp_scalar"});
229+
230+
MXNET_OPERATOR_REGISTER_NP_BINARY_SCALAR(_npi_rldexp_scalar)
231+
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Compute<cpu, mshadow_op::rldexp>)
232+
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{"_backward_npi_rldexp_scalar"});
233+
234+
NNVM_REGISTER_OP(_backward_npi_ldexp)
235+
.set_num_inputs(3)
236+
.set_num_outputs(2)
237+
.set_attr<nnvm::TIsBackward>("TIsBackward", true)
238+
.set_attr<nnvm::FInplaceOption>("FInplaceOption",
239+
[](const NodeAttrs& attrs){
240+
return std::vector<std::pair<int, int> >{{0, 1}};
241+
})
242+
.set_attr<FResourceRequest>("FResourceRequest",
243+
[](const NodeAttrs& attrs) {
244+
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};
245+
})
246+
.set_attr<FCompute>("FCompute<cpu>", BinaryBroadcastBackwardUseIn<cpu, mshadow_op::ldexp_grad,
247+
mshadow_op::ldexp_rgrad>);
248+
249+
MXNET_OPERATOR_REGISTER_BINARY(_backward_npi_ldexp_scalar)
250+
.add_argument("scalar", "float", "scalar value")
251+
.set_attr_parser([](NodeAttrs *attrs) { attrs->parsed = std::stod(attrs->dict["scalar"]); })
252+
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Backward<cpu, mshadow_op::ldexp_grad>);
253+
254+
MXNET_OPERATOR_REGISTER_BINARY(_backward_npi_rldexp_scalar)
255+
.add_argument("scalar", "float", "scalar value")
256+
.set_attr_parser([](NodeAttrs *attrs) { attrs->parsed = std::stod(attrs->dict["scalar"]); })
257+
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Backward<cpu, mshadow_op::rldexp_grad>);
258+
185259
} // namespace op
186260
} // namespace mxnet

src/operator/numpy/np_elemwise_broadcast_op.cu

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -78,5 +78,24 @@ NNVM_REGISTER_OP(_npi_maximum_scalar)
7878
NNVM_REGISTER_OP(_npi_minimum_scalar)
7979
.set_attr<FCompute>("FCompute<gpu>", BinaryScalarOp::Compute<gpu, mshadow_op::minimum>);
8080

81+
NNVM_REGISTER_OP(_npi_ldexp)
82+
.set_attr<FCompute>("FCompute<gpu>", BinaryBroadcastCompute<gpu, mshadow_op::ldexp>);
83+
84+
NNVM_REGISTER_OP(_npi_ldexp_scalar)
85+
.set_attr<FCompute>("FCompute<gpu>", BinaryScalarOp::Compute<gpu, mshadow_op::ldexp>);
86+
87+
NNVM_REGISTER_OP(_npi_rldexp_scalar)
88+
.set_attr<FCompute>("FCompute<gpu>", BinaryScalarOp::Compute<gpu, mshadow_op::rldexp>);
89+
90+
NNVM_REGISTER_OP(_backward_npi_ldexp)
91+
.set_attr<FCompute>("FCompute<gpu>", BinaryBroadcastBackwardUseIn<gpu, mshadow_op::ldexp_grad,
92+
mshadow_op::ldexp_rgrad>);
93+
94+
NNVM_REGISTER_OP(_backward_npi_ldexp_scalar)
95+
.set_attr<FCompute>("FCompute<gpu>", BinaryScalarOp::Backward<gpu, mshadow_op::ldexp_grad>);
96+
97+
NNVM_REGISTER_OP(_backward_npi_rldexp_scalar)
98+
.set_attr<FCompute>("FCompute<gpu>", BinaryScalarOp::Backward<gpu, mshadow_op::rldexp_grad>);
99+
81100
} // namespace op
82101
} // namespace mxnet

src/operator/operator_tune.cc

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -362,6 +362,11 @@ IMPLEMENT_BINARY_WORKLOAD_BWD(mxnet::op::mshadow_op::smooth_l1_gradient); // NO
362362
IMPLEMENT_BLANK_WORKLOAD_FWD(mxnet::op::mxnet_op::set_to_int<0>); // NOLINT()
363363
IMPLEMENT_BLANK_WORKLOAD_FWD(mxnet::op::mxnet_op::set_to_int<1>); // NOLINT()
364364
IMPLEMENT_BLANK_WORKLOAD_FWD(mxnet::op::PopulateFullIdxRspKernel); // NOLINT()
365+
IMPLEMENT_BINARY_WORKLOAD_FWD(mxnet::op::mshadow_op::ldexp); // NOLINT()
366+
IMPLEMENT_BINARY_WORKLOAD_FWD(mxnet::op::mshadow_op::rldexp); // NOLINT()
367+
IMPLEMENT_BINARY_WORKLOAD_BWD(mxnet::op::mshadow_op::ldexp_grad); // NOLINT()
368+
IMPLEMENT_BINARY_WORKLOAD_BWD(mxnet::op::mshadow_op::ldexp_rgrad); // NOLINT()
369+
IMPLEMENT_BINARY_WORKLOAD_BWD(mxnet::op::mshadow_op::rldexp_grad); // NOLINT()
365370
/*!
366371
* \brief Tuner objects, *not* automatically generated
367372
*/

tests/python/unittest/test_numpy_op.py

Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -213,6 +213,67 @@ def test_np_dot():
213213
assert False
214214

215215

216+
@with_seed()
217+
@use_np
218+
def test_np_ldexp():
219+
class TestLdexp(HybridBlock):
220+
def __init__(self):
221+
super(TestLdexp, self).__init__()
222+
223+
def hybrid_forward(self, F, x1, x2):
224+
return F.np.ldexp(x1, x2)
225+
226+
def _np_ldexp(x1, x2):
227+
return x1 * _np.power(2.0, x2)
228+
229+
def dldx(x1, x2):
230+
grad_a = _np.power(2.0, x2)
231+
grad_b = _np_ldexp(x1, x2) * _np.log(2.0)
232+
if len(x1) == 1:
233+
grad_a = _np.sum(grad_a)
234+
if len(x2) == 1:
235+
grad_b = _np.sum(grad_b)
236+
return [grad_a, grad_b]
237+
238+
shapes = [
239+
((3, 1), (3, 1)),
240+
((3, 1, 2), (3, 1, 2)),
241+
((1, ),(1, )),
242+
((1, ), (2, )),
243+
((3, ), (1, )),
244+
((3, 0), (3, 0)), # zero-size shape
245+
((0, 1), (0, 1)), # zero-size shape
246+
((2, 0, 2), (2, 0, 2)), # zero-size shape
247+
]
248+
249+
for hybridize in [True, False]:
250+
for shape1, shape2 in shapes:
251+
for dtype in [_np.float16, _np.float32, _np.float64]:
252+
test_ldexp = TestLdexp()
253+
if hybridize:
254+
test_ldexp.hybridize()
255+
x1 = rand_ndarray(shape = shape1, dtype = dtype).as_np_ndarray()
256+
x1.attach_grad()
257+
x2 = rand_ndarray(shape = shape2, dtype = dtype).as_np_ndarray()
258+
x2.attach_grad()
259+
260+
np_out = _np_ldexp(x1.asnumpy(), x2.asnumpy())
261+
with mx.autograd.record():
262+
mx_out = test_ldexp(x1, x2)
263+
assert mx_out.shape == np_out.shape
264+
assert_almost_equal(mx_out.asnumpy(), np_out, rtol = 1e-1, atol = 1e-1)
265+
266+
mx_out.backward()
267+
np_backward = dldx(x1.asnumpy(), x2.asnumpy())
268+
assert_almost_equal(x1.grad.asnumpy(), np_backward[0], atol=1e-1, rtol=1e-1)
269+
assert_almost_equal(x2.grad.asnumpy(), np_backward[1], atol=1e-1, rtol=1e-1)
270+
271+
# Test imperative once again
272+
mx_out = np.ldexp(x1, x2)
273+
np_out = _np_ldexp(x1.asnumpy(), x2.asnumpy())
274+
assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-1, atol=1e-1)
275+
276+
216277
@with_seed()
217278
@use_np
218279
def test_np_sum():

0 commit comments

Comments
 (0)