Skip to content
This repository was archived by the owner on Nov 17, 2023. It is now read-only.

Commit 1c714c3

Browse files
committed
enable for windows
1 parent 153d93f commit 1c714c3

File tree

4 files changed

+20
-29
lines changed

4 files changed

+20
-29
lines changed

src/operator/numpy/np_broadcast_reduce_op.h

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -222,15 +222,15 @@ void NumpyReduceAxesCompute(const nnvm::NodeAttrs& attrs,
222222
const std::vector<TBlob>& inputs,
223223
const std::vector<OpReqType>& req,
224224
const std::vector<TBlob>& outputs) {
225+
using namespace mshadow;
225226
if (req[0] == kNullOp) return;
226227
const NumpyReduceAxesParam& param = nnvm::get<NumpyReduceAxesParam>(attrs.parsed);
227228
if (param.initial.has_value()) {
228229
LOG(FATAL) << "initial is not supported yet";
229230
}
231+
Stream<xpu>* s = ctx.get_stream<xpu>();
230232
if (inputs[0].shape_.Size() == 0 && outputs[0].shape_.Size() != 0) {
231233
using namespace mxnet_op;
232-
using namespace mshadow;
233-
Stream<xpu>* s = ctx.get_stream<xpu>();
234234
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
235235
Kernel<set_zero, xpu>::Launch(s, outputs[0].shape_.Size(), outputs[0].dptr<DType>());
236236
});
@@ -247,6 +247,13 @@ void NumpyReduceAxesCompute(const nnvm::NodeAttrs& attrs,
247247
LOG(FATAL) << "Only reduce op: `sum` is supported for boolean ndarrays";
248248
}
249249
TVMOpReduce(ctx, inputs[0], param.axis, outputs[0], req[0], reducer_name);
250+
if (normalize) {
251+
using namespace mshadow::expr;
252+
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, OType, {
253+
auto out = outputs[0].FlatTo2D<xpu, OType>(s);
254+
out /= scalar<OType>(inputs[0].Size()/outputs[0].Size());
255+
});
256+
}
250257
return;
251258
}
252259
#endif

src/operator/tensor/broadcast_reduce-inl.cuh

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -619,8 +619,6 @@ void Reduce(Stream<gpu> *s, const TBlob& small, const OpReqType req,
619619
ReduceImplConfig<ndim> config =
620620
ConfigureReduceImpl<ndim, DType>(small.shape_, big.shape_, NULL, NULL);
621621
if (safe_acc) {
622-
// TODO(haojin2): Use real-only type swtich for windows temporarily due to CI issues.
623-
#ifndef _WIN32
624622
MXNET_ACC_TYPE_SWITCH(mshadow::DataType<DType>::kFlag, DataType, AType, {
625623
typedef typename std::conditional<safe_acc, AType, DataType>::type AccType;
626624
MSHADOW_TYPE_SWITCH(small.type_flag_, OType, {
@@ -630,17 +628,6 @@ void Reduce(Stream<gpu> *s, const TBlob& small, const OpReqType req,
630628
stream, small, req, big, workspace, config);
631629
});
632630
});
633-
#else
634-
MXNET_REAL_ACC_TYPE_SWITCH(mshadow::DataType<DType>::kFlag, DataType, AType, {
635-
typedef typename std::conditional<safe_acc, AType, DataType>::type AccType;
636-
MSHADOW_TYPE_SWITCH(small.type_flag_, OType, {
637-
typedef typename std::conditional<safe_acc, OType, DataType>::type OutType;
638-
config = ConfigureReduceImpl<ndim, AccType>(small.shape_, big.shape_, NULL, NULL);
639-
ReduceImpl<Reducer, ndim, AccType, DataType, OutType, OP>(
640-
stream, small, req, big, workspace, config);
641-
});
642-
});
643-
#endif
644631
} else {
645632
ReduceImpl<Reducer, ndim, DType, DType, DType, OP>(stream, small, req, big, workspace, config);
646633
}

src/operator/tensor/broadcast_reduce-inl.h

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -241,8 +241,6 @@ void Reduce(Stream<cpu>* s, const TBlob& small, const OpReqType req,
241241
N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(),
242242
big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride);
243243
} else {
244-
// TODO(haojin2): Use real-only type swtich for windows temporarily due to CI issues.
245-
#ifndef _WIN32
246244
MXNET_ACC_TYPE_SWITCH(mshadow::DataType<DType>::kFlag, DataType, AType, {
247245
typedef typename std::conditional<safe_acc, AType, DataType>::type AccType;
248246
MSHADOW_TYPE_SWITCH_WITH_BOOL(small.type_flag_, OType, {
@@ -252,17 +250,6 @@ void Reduce(Stream<cpu>* s, const TBlob& small, const OpReqType req,
252250
big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride);
253251
});
254252
});
255-
#else
256-
MXNET_REAL_ACC_TYPE_SWITCH(mshadow::DataType<DType>::kFlag, DataType, AType, {
257-
typedef typename std::conditional<safe_acc, AType, DataType>::type AccType;
258-
MSHADOW_TYPE_SWITCH_WITH_BOOL(small.type_flag_, OType, {
259-
typedef typename std::conditional<safe_acc, OType, DataType>::type OutType;
260-
seq_reduce_compute<Reducer, ndim, AccType, DataType, OutType, OP>(
261-
N, M, req == kAddTo, big.dptr<DataType>(), small.dptr<OutType>(),
262-
big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride);
263-
});
264-
});
265-
#endif
266253
}
267254
}
268255

tests/python/unittest/test_numpy_op.py

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -614,6 +614,7 @@ def hybrid_forward(self, F, a, *args, **kwargs):
614614
def is_int(dtype):
615615
return 'int' in dtype
616616

617+
is_windows = sys.platform.startswith('win')
617618
in_data_dim = random.choice([2, 3, 4])
618619
shape = rand_shape_nd(in_data_dim, dim=3)
619620
acc_type = {'float16': 'float32', 'float32': 'float64', 'float64': 'float64',
@@ -667,11 +668,20 @@ def is_int(dtype):
667668
test_mean.hybridize()
668669

669670
if itype == 'bool':
670-
x = np.random.uniform(size=shape) > 0.5
671+
x = np.array(_np.random.uniform(size=shape) > 0.5)
671672
else:
672673
x = np.random.uniform(-128, 127, size=shape).astype(itype)
673674

674675
expected_ret = _np.mean(x.asnumpy(), axis=axis, dtype=dtype, keepdims=keepdims)
676+
677+
if itype == 'bool':
678+
if is_op_runnable() and (not is_windows) and dtype not in ['float16', 'int8']: # special handling of boolean ndarray
679+
y = test_mean(x)
680+
assert y.shape == expected_ret.shape
681+
assert_almost_equal(y.asnumpy(), expected_ret, rtol=1e-3 if dtype == 'float16' else 1e-3,
682+
atol=1e-5 if dtype == 'float16' else 1e-5)
683+
continue
684+
675685
y = test_mean(x)
676686
assert y.shape == expected_ret.shape
677687
assert_almost_equal(y.asnumpy(), expected_ret, rtol=1e-3 if dtype == 'float16' else 1e-3,

0 commit comments

Comments
 (0)