Skip to content

Update c style cast to c++ style cast #2934

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
4 commits merged into from Sep 9, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,7 @@ bool meetPoolParamRestrictions(Operation *op, int64_t inputShape,
return onnxToZHighUnsupportedReport(op, message);
}
if (paddingType == "SAME_PADDING") {
int64_t reqOutputShape = ceil((float)inputShape / strides);
int64_t reqOutputShape = ceil(static_cast<float>(inputShape) / strides);
if (outputShape != reqOutputShape) {
std::string message =
"When the strides (" + std::to_string(strides) +
Expand All @@ -329,7 +329,7 @@ bool meetPoolParamRestrictions(Operation *op, int64_t inputShape,
}
} else { // VALID_PADDING
int64_t reqOutputShape =
ceil((float)(inputShape - kernelShape + 1) / strides);
ceil(static_cast<float>(inputShape - kernelShape + 1) / strides);
if (outputShape != reqOutputShape) {
std::string message = "When the strides (" + std::to_string(strides) +
") and the padding type is VALID_PADDING, output "
Expand Down Expand Up @@ -1164,7 +1164,7 @@ static bool checkConv2DParamRestrictions(Operation *op, int64_t inputDim,
}
if (paddingType == "SAME_PADDING") {
// height_out restriction.
int64_t reqOutputShape = ceil((float)inputDim / stride);
int64_t reqOutputShape = ceil(static_cast<float>(inputDim) / stride);
if (outputDim != reqOutputShape) {
std::string message =
"When the strides (" + std::to_string(stride) +
Expand All @@ -1189,7 +1189,8 @@ static bool checkConv2DParamRestrictions(Operation *op, int64_t inputDim,
return onnxToZHighUnsupportedReport(op, message);
}
// height_out restriction.
int64_t reqOutputShape = ceil((float)(inputDim - kernelDim + 1) / stride);
int64_t reqOutputShape =
ceil(static_cast<float>(inputDim - kernelDim + 1) / stride);
if (outputDim != reqOutputShape) {
std::string message =
"When the strides (" + std::to_string(stride) +
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,7 @@ ArrayAttr getPadsForNNPAConv(PatternRewriter &rewriter, Value ret) {
// This function is used for padding attribute in Conv.
DenseElementsAttr insertZerosForNonPaddedDims(
PatternRewriter &rewriter, ArrayAttr origAttrs, int extensionLength) {
int nDims = (int)origAttrs.getValue().size() / 2;
int nDims = static_cast<int>(origAttrs.getValue().size()) / 2;
int nElements = (nDims + extensionLength) * 2;
SmallVector<int64_t, 4> pads(nElements, 0);
for (int i = 0; i < nDims; ++i) {
Expand Down
21 changes: 11 additions & 10 deletions src/Accelerators/NNPA/Conversion/ZHighToZLow/ZHighToZLow.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ Value insertAllocOrEmitZeroConstant(ArrayRef<IndexExpr> dims,
// Attribute type: tensor<sizeInBytes x i8>
int64_t sizeInBytes =
affine::getIntOrFloatMemRefSizeInBytes(resType).value();
char *rawData = (char *)malloc(sizeInBytes);
char *rawData = static_cast<char *>(malloc(sizeInBytes));
assert(rawData && "failed to allocate memory for stickified data");
memset(rawData, 0, sizeInBytes);
DenseResourceElementsAttr valueAttr = DenseUI8ResourceElementsAttr::get(
Expand Down Expand Up @@ -228,7 +228,7 @@ Value insertShapeMemRefI64(
MultiDialectBuilder<KrnlBuilder, MathBuilder, MemRefBuilder> create(
rewriter, loc);
MemRefType shapeMemRefType = MemRefType::get(
{(int64_t)originalDims.size()}, rewriter.getIntegerType(64));
{static_cast<int64_t>(originalDims.size())}, rewriter.getIntegerType(64));
Value shapeMemRef = create.mem.alignedAlloc(shapeMemRefType);
for (uint64_t i = 0; i < originalDims.size(); ++i) {
Value dim =
Expand Down Expand Up @@ -395,7 +395,7 @@ ZMemRefType convertZTensorToMemRefType(Type type) {
"wrong concatenated dimension size");
int64_t s = shape[rank - 1] / 4;
// ((s + 64 - 1) / 64) * 64;
int64_t s_pad = ceil((double)s / 64) * 64;
int64_t s_pad = ceil(static_cast<double>(s) / 64) * 64;
int64_t pad_size = s_pad - s;
AffineExpr constExprS = getAffineConstantExpr(s, b.getContext());
if (rank == 2) {
Expand Down Expand Up @@ -431,7 +431,8 @@ ZMemRefType convertZTensorToMemRefType(Type type) {
"in affine_map generation.");
assert((hidden_size % 3) == 0 && "wrong concatenated dimension size.");
int64_t s = hidden_size / 3;
int64_t s_pad = ceil((float)s / 64) * 64; // ((s + 64 - 1) / 64) * 64;
int64_t s_pad =
ceil(static_cast<float>(s) / 64) * 64; // ((s + 64 - 1) / 64) * 64;
int64_t pad_size = s_pad - s;
AffineExpr constExprS = getAffineConstantExpr(s, b.getContext());
if (rank == 2) {
Expand Down Expand Up @@ -723,7 +724,7 @@ struct ZHighToZLowStickifiedConstantOpLowering : public ConversionPattern {
// Validate the stickified tensor.
int64_t memRefSizeInBytes = getMemRefEltSizeInBytes(normalizedType);
memRefSizeInBytes *= normalizedType.getNumElements();
assert((data.size() == (uint64_t)memRefSizeInBytes) &&
assert((data.size() == static_cast<uint64_t>(memRefSizeInBytes)) &&
"The stickified tensor's buffer size and MemRef's size mismatched");

// Create a KrnlGlobalOp.
Expand Down Expand Up @@ -1565,7 +1566,7 @@ struct ZHighToZLowStickifiedConstantOfShapeOpLowering

// Convert the scalar value to dlfloat16.
// Use uint16_t as container.
float valueF32 = (float)value.getValueAsDouble();
float valueF32 = static_cast<float>(value.getValueAsDouble());
uint16_t valueDLF16;
fp32_to_dlf16(&valueF32, &valueDLF16, 1);

Expand Down Expand Up @@ -1709,10 +1710,10 @@ struct ZHighToZLowDataConversionLowering

if (enableParallel) {
int64_t parId;
int64_t tripCount =
flattenedOutputDims[0].isLiteral()
? std::ceil(flattenedOutputDims[0].getLiteral() / (float)archVL)
: -1;
int64_t tripCount = flattenedOutputDims[0].isLiteral()
? std::ceil(flattenedOutputDims[0].getLiteral() /
static_cast<float>(archVL))
: -1;
if (findSuitableParallelDimension(lbs, flattenedOutputDims, 0, 1, parId,
/*min iter for going parallel*/ 1024)) {
create.krnl.parallel(blockedLoopDef[0]);
Expand Down
Loading
Loading