Skip to content

Commit 0ac3dc9

Browse files
authored
Update c style cast to c++ style cast (#2934)
Signed-off-by: Mike Essenmacher <[email protected]>
1 parent c5d3e72 commit 0ac3dc9

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

46 files changed

+376
-276
lines changed

src/Accelerators/NNPA/Conversion/ONNXToZHigh/ONNXLegalityCheck.cpp

+5-4
Original file line numberDiff line numberDiff line change
@@ -315,7 +315,7 @@ bool meetPoolParamRestrictions(Operation *op, int64_t inputShape,
315315
return onnxToZHighUnsupportedReport(op, message);
316316
}
317317
if (paddingType == "SAME_PADDING") {
318-
int64_t reqOutputShape = ceil((float)inputShape / strides);
318+
int64_t reqOutputShape = ceil(static_cast<float>(inputShape) / strides);
319319
if (outputShape != reqOutputShape) {
320320
std::string message =
321321
"When the strides (" + std::to_string(strides) +
@@ -329,7 +329,7 @@ bool meetPoolParamRestrictions(Operation *op, int64_t inputShape,
329329
}
330330
} else { // VALID_PADDING
331331
int64_t reqOutputShape =
332-
ceil((float)(inputShape - kernelShape + 1) / strides);
332+
ceil(static_cast<float>(inputShape - kernelShape + 1) / strides);
333333
if (outputShape != reqOutputShape) {
334334
std::string message = "When the strides (" + std::to_string(strides) +
335335
") and the padding type is VALID_PADDING, output "
@@ -1164,7 +1164,7 @@ static bool checkConv2DParamRestrictions(Operation *op, int64_t inputDim,
11641164
}
11651165
if (paddingType == "SAME_PADDING") {
11661166
// height_out restriction.
1167-
int64_t reqOutputShape = ceil((float)inputDim / stride);
1167+
int64_t reqOutputShape = ceil(static_cast<float>(inputDim) / stride);
11681168
if (outputDim != reqOutputShape) {
11691169
std::string message =
11701170
"When the strides (" + std::to_string(stride) +
@@ -1189,7 +1189,8 @@ static bool checkConv2DParamRestrictions(Operation *op, int64_t inputDim,
11891189
return onnxToZHighUnsupportedReport(op, message);
11901190
}
11911191
// height_out restriction.
1192-
int64_t reqOutputShape = ceil((float)(inputDim - kernelDim + 1) / stride);
1192+
int64_t reqOutputShape =
1193+
ceil(static_cast<float>(inputDim - kernelDim + 1) / stride);
11931194
if (outputDim != reqOutputShape) {
11941195
std::string message =
11951196
"When the strides (" + std::to_string(stride) +

src/Accelerators/NNPA/Conversion/ONNXToZHigh/RewriteONNXForZHigh.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -240,7 +240,7 @@ ArrayAttr getPadsForNNPAConv(PatternRewriter &rewriter, Value ret) {
240240
// This function is used for padding attribute in Conv.
241241
DenseElementsAttr insertZerosForNonPaddedDims(
242242
PatternRewriter &rewriter, ArrayAttr origAttrs, int extensionLength) {
243-
int nDims = (int)origAttrs.getValue().size() / 2;
243+
int nDims = static_cast<int>(origAttrs.getValue().size()) / 2;
244244
int nElements = (nDims + extensionLength) * 2;
245245
SmallVector<int64_t, 4> pads(nElements, 0);
246246
for (int i = 0; i < nDims; ++i) {

src/Accelerators/NNPA/Conversion/ZHighToZLow/ZHighToZLow.cpp

+11-10
Original file line numberDiff line numberDiff line change
@@ -199,7 +199,7 @@ Value insertAllocOrEmitZeroConstant(ArrayRef<IndexExpr> dims,
199199
// Attribute type: tensor<sizeInBytes x i8>
200200
int64_t sizeInBytes =
201201
affine::getIntOrFloatMemRefSizeInBytes(resType).value();
202-
char *rawData = (char *)malloc(sizeInBytes);
202+
char *rawData = static_cast<char *>(malloc(sizeInBytes));
203203
assert(rawData && "failed to allocate memory for stickified data");
204204
memset(rawData, 0, sizeInBytes);
205205
DenseResourceElementsAttr valueAttr = DenseUI8ResourceElementsAttr::get(
@@ -228,7 +228,7 @@ Value insertShapeMemRefI64(
228228
MultiDialectBuilder<KrnlBuilder, MathBuilder, MemRefBuilder> create(
229229
rewriter, loc);
230230
MemRefType shapeMemRefType = MemRefType::get(
231-
{(int64_t)originalDims.size()}, rewriter.getIntegerType(64));
231+
{static_cast<int64_t>(originalDims.size())}, rewriter.getIntegerType(64));
232232
Value shapeMemRef = create.mem.alignedAlloc(shapeMemRefType);
233233
for (uint64_t i = 0; i < originalDims.size(); ++i) {
234234
Value dim =
@@ -395,7 +395,7 @@ ZMemRefType convertZTensorToMemRefType(Type type) {
395395
"wrong concatenated dimension size");
396396
int64_t s = shape[rank - 1] / 4;
397397
// ((s + 64 - 1) / 64) * 64;
398-
int64_t s_pad = ceil((double)s / 64) * 64;
398+
int64_t s_pad = ceil(static_cast<double>(s) / 64) * 64;
399399
int64_t pad_size = s_pad - s;
400400
AffineExpr constExprS = getAffineConstantExpr(s, b.getContext());
401401
if (rank == 2) {
@@ -431,7 +431,8 @@ ZMemRefType convertZTensorToMemRefType(Type type) {
431431
"in affine_map generation.");
432432
assert((hidden_size % 3) == 0 && "wrong concatenated dimension size.");
433433
int64_t s = hidden_size / 3;
434-
int64_t s_pad = ceil((float)s / 64) * 64; // ((s + 64 - 1) / 64) * 64;
434+
int64_t s_pad =
435+
ceil(static_cast<float>(s) / 64) * 64; // ((s + 64 - 1) / 64) * 64;
435436
int64_t pad_size = s_pad - s;
436437
AffineExpr constExprS = getAffineConstantExpr(s, b.getContext());
437438
if (rank == 2) {
@@ -723,7 +724,7 @@ struct ZHighToZLowStickifiedConstantOpLowering : public ConversionPattern {
723724
// Validate the stickified tensor.
724725
int64_t memRefSizeInBytes = getMemRefEltSizeInBytes(normalizedType);
725726
memRefSizeInBytes *= normalizedType.getNumElements();
726-
assert((data.size() == (uint64_t)memRefSizeInBytes) &&
727+
assert((data.size() == static_cast<uint64_t>(memRefSizeInBytes)) &&
727728
"The stickified tensor's buffer size and MemRef's size mismatched");
728729

729730
// Create a KrnlGlobalOp.
@@ -1565,7 +1566,7 @@ struct ZHighToZLowStickifiedConstantOfShapeOpLowering
15651566

15661567
// Convert the scalar value to dlfloat16.
15671568
// Use uint16_t as container.
1568-
float valueF32 = (float)value.getValueAsDouble();
1569+
float valueF32 = static_cast<float>(value.getValueAsDouble());
15691570
uint16_t valueDLF16;
15701571
fp32_to_dlf16(&valueF32, &valueDLF16, 1);
15711572

@@ -1709,10 +1710,10 @@ struct ZHighToZLowDataConversionLowering
17091710

17101711
if (enableParallel) {
17111712
int64_t parId;
1712-
int64_t tripCount =
1713-
flattenedOutputDims[0].isLiteral()
1714-
? std::ceil(flattenedOutputDims[0].getLiteral() / (float)archVL)
1715-
: -1;
1713+
int64_t tripCount = flattenedOutputDims[0].isLiteral()
1714+
? std::ceil(flattenedOutputDims[0].getLiteral() /
1715+
static_cast<float>(archVL))
1716+
: -1;
17161717
if (findSuitableParallelDimension(lbs, flattenedOutputDims, 0, 1, parId,
17171718
/*min iter for going parallel*/ 1024)) {
17181719
create.krnl.parallel(blockedLoopDef[0]);

0 commit comments

Comments
 (0)