@@ -199,7 +199,7 @@ Value insertAllocOrEmitZeroConstant(ArrayRef<IndexExpr> dims,
199
199
// Attribute type: tensor<sizeInBytes x i8>
200
200
int64_t sizeInBytes =
201
201
affine::getIntOrFloatMemRefSizeInBytes (resType).value ();
202
- char *rawData = ( char *) malloc (sizeInBytes);
202
+ char *rawData = static_cast < char *>( malloc (sizeInBytes) );
203
203
assert (rawData && " failed to allocate memory for stickified data" );
204
204
memset (rawData, 0 , sizeInBytes);
205
205
DenseResourceElementsAttr valueAttr = DenseUI8ResourceElementsAttr::get (
@@ -228,7 +228,7 @@ Value insertShapeMemRefI64(
228
228
MultiDialectBuilder<KrnlBuilder, MathBuilder, MemRefBuilder> create (
229
229
rewriter, loc);
230
230
MemRefType shapeMemRefType = MemRefType::get (
231
- {( int64_t ) originalDims.size ()}, rewriter.getIntegerType (64 ));
231
+ {static_cast < int64_t >( originalDims.size () )}, rewriter.getIntegerType (64 ));
232
232
Value shapeMemRef = create.mem .alignedAlloc (shapeMemRefType);
233
233
for (uint64_t i = 0 ; i < originalDims.size (); ++i) {
234
234
Value dim =
@@ -395,7 +395,7 @@ ZMemRefType convertZTensorToMemRefType(Type type) {
395
395
" wrong concatenated dimension size" );
396
396
int64_t s = shape[rank - 1 ] / 4 ;
397
397
// ((s + 64 - 1) / 64) * 64;
398
- int64_t s_pad = ceil (( double )s / 64 ) * 64 ;
398
+ int64_t s_pad = ceil (static_cast < double >(s) / 64 ) * 64 ;
399
399
int64_t pad_size = s_pad - s;
400
400
AffineExpr constExprS = getAffineConstantExpr (s, b.getContext ());
401
401
if (rank == 2 ) {
@@ -431,7 +431,8 @@ ZMemRefType convertZTensorToMemRefType(Type type) {
431
431
" in affine_map generation." );
432
432
assert ((hidden_size % 3 ) == 0 && " wrong concatenated dimension size." );
433
433
int64_t s = hidden_size / 3 ;
434
- int64_t s_pad = ceil ((float )s / 64 ) * 64 ; // ((s + 64 - 1) / 64) * 64;
434
+ int64_t s_pad =
435
+ ceil (static_cast <float >(s) / 64 ) * 64 ; // ((s + 64 - 1) / 64) * 64;
435
436
int64_t pad_size = s_pad - s;
436
437
AffineExpr constExprS = getAffineConstantExpr (s, b.getContext ());
437
438
if (rank == 2 ) {
@@ -723,7 +724,7 @@ struct ZHighToZLowStickifiedConstantOpLowering : public ConversionPattern {
723
724
// Validate the stickified tensor.
724
725
int64_t memRefSizeInBytes = getMemRefEltSizeInBytes (normalizedType);
725
726
memRefSizeInBytes *= normalizedType.getNumElements ();
726
- assert ((data.size () == ( uint64_t ) memRefSizeInBytes) &&
727
+ assert ((data.size () == static_cast < uint64_t >( memRefSizeInBytes) ) &&
727
728
" The stickified tensor's buffer size and MemRef's size mismatched" );
728
729
729
730
// Create a KrnlGlobalOp.
@@ -1565,7 +1566,7 @@ struct ZHighToZLowStickifiedConstantOfShapeOpLowering
1565
1566
1566
1567
// Convert the scalar value to dlfloat16.
1567
1568
// Use uint16_t as container.
1568
- float valueF32 = ( float ) value.getValueAsDouble ();
1569
+ float valueF32 = static_cast < float >( value.getValueAsDouble () );
1569
1570
uint16_t valueDLF16;
1570
1571
fp32_to_dlf16 (&valueF32, &valueDLF16, 1 );
1571
1572
@@ -1709,10 +1710,10 @@ struct ZHighToZLowDataConversionLowering
1709
1710
1710
1711
if (enableParallel) {
1711
1712
int64_t parId;
1712
- int64_t tripCount =
1713
- flattenedOutputDims[0 ].isLiteral ()
1714
- ? std::ceil (flattenedOutputDims[ 0 ]. getLiteral () / ( float ) archVL)
1715
- : -1 ;
1713
+ int64_t tripCount = flattenedOutputDims[ 0 ]. isLiteral ()
1714
+ ? std::ceil ( flattenedOutputDims[0 ].getLiteral () /
1715
+ static_cast < float >( archVL) )
1716
+ : -1 ;
1716
1717
if (findSuitableParallelDimension (lbs, flattenedOutputDims, 0 , 1 , parId,
1717
1718
/* min iter for going parallel*/ 1024 )) {
1718
1719
create.krnl .parallel (blockedLoopDef[0 ]);
0 commit comments