Skip to content

Commit ec314b7

Browse files
Ferdinand Lemairehamptonm1
Ferdinand Lemaire
andauthored
Bump llvm to 29b92d07746fac26cd64c914bc9c5c3833974f6d (#2962)
* bump llvm to 29b92d07746fac26cd64c914bc9c5c3833974f6d Signed-off-by: Ferdinand Lemaire <[email protected]> * Revert changes on refs Signed-off-by: Ferdinand Lemaire <[email protected]> --------- Signed-off-by: Ferdinand Lemaire <[email protected]> Co-authored-by: hamptonm1 <[email protected]>
1 parent ccf9552 commit ec314b7

File tree

8 files changed

+93
-97
lines changed

8 files changed

+93
-97
lines changed

docs/BuildOnLinuxOSX.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ Firstly, install MLIR (as a part of LLVM-Project):
1515
``` bash
1616
git clone -n https://github.com/llvm/llvm-project.git
1717
# Check out a specific branch that is known to work with ONNX-MLIR.
18-
cd llvm-project && git checkout eaa95a1c2bd38332c1a4e634595f29d22b28ffea && cd ..
18+
cd llvm-project && git checkout 29b92d07746fac26cd64c914bc9c5c3833974f6d && cd ..
1919
```
2020

2121
[same-as-file]: <> (utils/build-mlir.sh)

docs/BuildOnWindows.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ Install MLIR (as a part of LLVM-Project):
5252
```shell
5353
git clone -n https://github.com/llvm/llvm-project.git
5454
# Check out a specific branch that is known to work with ONNX-MLIR.
55-
cd llvm-project && git checkout eaa95a1c2bd38332c1a4e634595f29d22b28ffea && cd ..
55+
cd llvm-project && git checkout 29b92d07746fac26cd64c914bc9c5c3833974f6d && cd ..
5656
```
5757

5858
[same-as-file]: <> (utils/build-mlir.cmd)

test/mlir/conversion/onnx_to_krnl/ControlFlow/Loop.mlir

+15-16
Original file line numberDiff line numberDiff line change
@@ -41,19 +41,19 @@ func.func private @test_loop_simple_main_graph(%arg0: tensor<i64>, %arg1: tensor
4141
// CHECK-DAG: [[CST_1_2_:%.+]] = arith.constant 1 : index
4242
// CHECK-DAG: [[CST_1_3_:%.+]] = arith.constant 1 : index
4343
// CHECK-DAG: [[RES_3_:%.+]] = memref.alloc() {{.*}}: memref<1xi64>
44+
// CHECK-NOT: separator of consecutive DAGs
45+
// CHECK-DAG: [[VAR_8_:%.+]] = builtin.unrealized_conversion_cast [[RES_3_]] : memref<1xi64> to tensor<1xi64>
4446
// CHECK-DAG: [[CST_0_2_:%.+]] = arith.constant 0 : index
4547
// CHECK-NOT: separator of consecutive DAGs
4648
// CHECK-DAG: [[LOAD_RES_MEM_:%.+]] = krnl.load [[RES_]]{{.}}[[CST_0_2_]]{{.}} : memref<1xi64>
4749
// CHECK-DAG: [[LOAD_RES_2_MEM_:%.+]] = krnl.load [[RES_2_]][] : memref<i64>
4850
// CHECK-NOT: separator of consecutive DAGs
49-
// CHECK-DAG: [[VAR_10_:%.+]] = arith.addi [[LOAD_RES_MEM_]], [[LOAD_RES_2_MEM_]] : i64
51+
// CHECK-DAG: [[VAR_11_:%.+]] = arith.addi [[LOAD_RES_MEM_]], [[LOAD_RES_2_MEM_]] : i64
5052
// CHECK-DAG: [[CST_0_3_:%.+]] = arith.constant 0 : index
51-
// CHECK: krnl.store [[VAR_10_]], [[RES_3_]]{{.}}[[CST_0_3_]]{{.}} : memref<1xi64>
52-
// CHECK-DAG: [[VAR_11_:%.+]] = builtin.unrealized_conversion_cast [[RES_3_]] : memref<1xi64> to tensor<1xi64>
53+
// CHECK: krnl.store [[VAR_11_]], [[RES_3_]]{{.}}[[CST_0_3_]]{{.}} : memref<1xi64>
5354
// CHECK-DAG: [[VAR_12_:%.+]] = builtin.unrealized_conversion_cast [[PARAM_1_]] : memref<i1> to memref<i1>
54-
// CHECK-NOT: separator of consecutive DAGs
55-
// CHECK-DAG: [[VAR_13_:%.+]] = builtin.unrealized_conversion_cast [[VAR_11_]] : tensor<1xi64> to memref<1xi64>
56-
// CHECK-DAG: [[LOAD_VAR_12_MEM_:%.+]] = krnl.load [[VAR_12_]][] : memref<i1>
55+
// CHECK-DAG: [[VAR_13_:%.+]] = builtin.unrealized_conversion_cast [[VAR_8_]] : tensor<1xi64> to memref<1xi64>
56+
// CHECK: [[LOAD_VAR_12_MEM_:%.+]] = krnl.load [[VAR_12_]][] : memref<i1>
5757
// CHECK: krnl.store [[LOAD_VAR_12_MEM_]], [[RES_1_]][] : memref<i1>
5858
// CHECK-DAG: [[LOOP_2_:%.+]] = krnl.define_loops 1
5959
// CHECK-DAG: [[CST_0_4_:%.+]] = arith.constant 0 : index
@@ -111,8 +111,10 @@ func.func @test_loop(%arg0: tensor<i64>, %arg1: tensor<i1>, %arg2: tensor<?xf32>
111111
// CHECK-DAG: [[VAR_dim_7_:%.+]] = memref.dim [[PARAM_2_]], [[CST_0_1_]] : memref<?xf32>
112112
// CHECK-DAG: [[CST_0_2_:%.+]] = arith.constant 0 : index
113113
// CHECK: [[VAR_dim_9_:%.+]] = memref.dim [[PARAM_2_]], [[CST_0_2_]] : memref<?xf32>
114-
// CHECK: [[VAR_11_:%.+]] = affine.max [[MAP_0_]]([[VAR_dim_7_]], [[VAR_dim_9_]])
115-
// CHECK-DAG: [[RES_3_:%.+]] = memref.alloc([[VAR_11_]]) {{.*}}: memref<?xf32>
114+
// CHECK-DAG: [[VAR_11_:%.+]] = affine.max [[MAP_0_]]([[VAR_dim_7_]], [[VAR_dim_9_]])
115+
// CHECK-DAG: [[CST_1_1_:%.+]] = arith.constant 1 : index
116+
// CHECK: [[RES_3_:%.+]] = memref.alloc([[VAR_11_]]) {{.*}}: memref<?xf32>
117+
// CHECK-DAG: [[VAR_12_:%.+]] = builtin.unrealized_conversion_cast [[RES_3_]] : memref<?xf32> to tensor<?xf32>
116118
// CHECK-DAG: [[LOOP_1_:%.+]] = krnl.define_loops 1
117119
// CHECK-DAG: [[CST_0_3_:%.+]] = arith.constant 0 : index
118120
// CHECK-DAG: [[CST_0_4_:%.+]] = arith.constant 0 : index
@@ -123,11 +125,9 @@ func.func @test_loop(%arg0: tensor<i64>, %arg1: tensor<i1>, %arg2: tensor<?xf32>
123125
// CHECK: [[VAR_20_:%.+]] = arith.addf [[LOAD_PARAM_2_MEM_]], [[LOAD_PARAM_2_MEM_1_]] : f32
124126
// CHECK: krnl.store [[VAR_20_]], [[RES_3_]]{{.}}[[VAR_17_]]{{.}} : memref<?xf32>
125127
// CHECK: }
126-
// CHECK-DAG: [[VAR_13_:%.+]] = builtin.unrealized_conversion_cast [[RES_3_]] : memref<?xf32> to tensor<?xf32>
127128
// CHECK-DAG: [[VAR_14_:%.+]] = builtin.unrealized_conversion_cast [[PARAM_1_]] : memref<i1> to memref<i1>
128-
// CHECK-NOT: separator of consecutive DAGs
129-
// CHECK-DAG: [[VAR_15_:%.+]] = builtin.unrealized_conversion_cast [[VAR_13_]] : tensor<?xf32> to memref<?xf32>
130-
// CHECK-DAG: [[LOAD_VAR_14_MEM_:%.+]] = krnl.load [[VAR_14_]][] : memref<i1>
129+
// CHECK-DAG: [[VAR_15_:%.+]] = builtin.unrealized_conversion_cast [[VAR_12_]] : tensor<?xf32> to memref<?xf32>
130+
// CHECK: [[LOAD_VAR_14_MEM_:%.+]] = krnl.load [[VAR_14_]][] : memref<i1>
131131
// CHECK: krnl.store [[LOAD_VAR_14_MEM_]], [[RES_1_]][] : memref<i1>
132132
// CHECK: "krnl.seqstore"([[VAR_15_]], [[RES_]], [[VAR_8_]]) : (memref<?xf32>, memref<?xmemref<?xf32>>, index) -> ()
133133
// CHECK: }) : () -> ()
@@ -150,11 +150,10 @@ func.func @test_loop(%arg0: tensor<i64>, %arg1: tensor<i1>, %arg2: tensor<?xf32>
150150
// CHECK: [[VAR_dim_7_1_:%.+]] = memref.dim [[LOAD_RES_1_MEM_1_]], [[CST_0_9_]] : memref<?xf32>
151151
// CHECK: krnl.iterate([[LOOP_3_]]) with ([[LOOP_3_]] -> [[I_3_:%.+]] = 0 to [[MAP_2_]]([[VAR_dim_7_1_]])){
152152
// CHECK: [[VAR_11_1_:%.+]] = krnl.get_induction_var_value([[LOOP_3_]]) : (!krnl.loop) -> index
153-
// CHECK: [[LOOP_1_:%.+]] = krnl.load [[LOAD_RES_1_MEM_1_]]{{.}}[[VAR_11_1_]]{{.}} : memref<?xf32>
154-
// CHECK: krnl.store [[LOOP_1_]], [[RES_4_]]{{.}}[[VAR_8_1_]], [[VAR_11_1_]]{{.}} : memref<?x?xf32>
153+
// CHECK: [[VAR_12_1_:%.+]] = krnl.load [[LOAD_RES_1_MEM_1_]]{{.}}[[VAR_11_1_]]{{.}} : memref<?xf32>
154+
// CHECK: krnl.store [[VAR_12_1_]], [[RES_4_]]{{.}}[[VAR_8_1_]], [[VAR_11_1_]]{{.}} : memref<?x?xf32>
155155
// CHECK: }
156156
// CHECK: }) : () -> ()
157157
// CHECK: }
158158
// CHECK: return [[RES_4_]] : memref<?x?xf32>
159-
// CHECK: }
160-
}
159+
}

test/mlir/conversion/onnx_to_krnl/NN/Normalization_O3_SIMD_canonicalize.mlir

+2-2
Original file line numberDiff line numberDiff line change
@@ -433,6 +433,7 @@ func.func @layernorm_4D_with_scale_bias_no_SIMD(%arg0: tensor<2x64x31x3xf32>, %a
433433
// CHECK: }
434434
// CHECK: }
435435
// CHECK-DAG: [[RES_41_:%.+]] = memref.alloc() {{.*}}: memref<2x64x31x3xf32>
436+
// CHECK-DAG: [[VAR_6_:%.+]] = builtin.unrealized_conversion_cast [[RES_41_]] : memref<2x64x31x3xf32> to tensor<2x64x31x3xf32>
436437
// CHECK-DAG: [[RES_42_:%.+]] = memref.alloc() {{.*}}: memref<3xindex>
437438
// CHECK: affine.store [[CST_2_]], [[RES_42_]][0] : memref<3xindex>
438439
// CHECK: affine.store [[CST_64_]], [[RES_42_]][1] : memref<3xindex>
@@ -467,8 +468,7 @@ func.func @layernorm_4D_with_scale_bias_no_SIMD(%arg0: tensor<2x64x31x3xf32>, %a
467468
// CHECK: krnl.store [[LOAD_VAR_reshape_MEM_6_1_1_1_1_]], [[VAR_reshape_75_]]{{.}}[[VAR_8_5_]]#0, [[VAR_8_5_]]#1, [[VAR_11_12_]]{{.}} : memref<2x64x93xf32>
468469
// CHECK: }
469470
// CHECK: }
470-
// CHECK: [[VAR_7_:%.+]] = builtin.unrealized_conversion_cast [[RES_41_]] : memref<2x64x31x3xf32> to tensor<2x64x31x3xf32>
471-
// CHECK: onnx.Return [[VAR_7_]] : tensor<2x64x31x3xf32>
471+
// CHECK: onnx.Return [[VAR_6_]] : tensor<2x64x31x3xf32>
472472
// CHECK: }
473473
}
474474

test/mlir/conversion/onnx_to_krnl/Tensor/onnx_lowering_depth_to_space_op.mlir

+37-38
Original file line numberDiff line numberDiff line change
@@ -7,47 +7,46 @@ func.func private @test_depth_to_space_dynamic_dims(%arg0 : tensor<1x?x8x?xf32>)
77
%0 = "onnx.DepthToSpace"(%arg0) {blocksize = 4 : si64} : (tensor<1x?x8x?xf32>) -> tensor<1x?x32x?xf32>
88
"func.return"(%0) : (tensor<1x?x32x?xf32>) -> ()
99

10-
// CHECK-DAG: [[MAP_1_:#.+]] = affine_map<()[s0] -> (s0 floordiv 16)>
11-
// CHECK-DAG: [[MAP_2_:#.+]] = affine_map<()[s0] -> (s0 * 4)>
12-
// CHECK-LABEL: func private @test_depth_to_space_dynamic_dims
10+
// CHECK-DAG: [[MAP_0_:#.+]] = affine_map<()[s0] -> (s0 floordiv 16)>
11+
// CHECK-DAG: [[MAP_1_:#.+]] = affine_map<()[s0] -> (s0 * 4)>
12+
// CHECK-LABEL: func.func private @test_depth_to_space_dynamic_dims
1313
// CHECK-SAME: ([[PARAM_0_:%.+]]: memref<1x?x8x?xf32>) -> memref<1x?x32x?xf32> {
14-
// CHECK-DAG: [[VAR_c3_:%.+]] = arith.constant 3 : index
15-
// CHECK-DAG: [[VAR_c2_:%.+]] = arith.constant 2 : index
16-
// CHECK-DAG: [[VAR_c1_:%.+]] = arith.constant 1 : index
17-
// CHECK-DAG: [[VAR_c0_:%.+]] = arith.constant 0 : index
18-
// CHECK-DAG: [[VAR_c32_:%.+]] = arith.constant 32 : index
19-
// CHECK-DAG: [[VAR_c5_:%.+]] = arith.constant 5 : index
20-
// CHECK-DAG: [[VAR_c4_:%.+]] = arith.constant 4 : index
21-
// CHECK-DAG: [[VAR_c8_:%.+]] = arith.constant 8 : index
22-
// CHECK-DAG: [[VAR_5_:%.+]] = builtin.unrealized_conversion_cast [[PARAM_0_]] : memref<1x?x8x?xf32> to tensor<1x?x8x?xf32>
14+
// CHECK-DAG: [[CST_5_:%.+]] = arith.constant 5 : index
15+
// CHECK-DAG: [[CST_2_:%.+]] = arith.constant 2 : index
16+
// CHECK-DAG: [[CST_0_:%.+]] = arith.constant 0 : index
17+
// CHECK-DAG: [[CST_32_:%.+]] = arith.constant 32 : index
18+
// CHECK-DAG: [[CST_4_:%.+]] = arith.constant 4 : index
19+
// CHECK-DAG: [[CST_3_:%.+]] = arith.constant 3 : index
20+
// CHECK-DAG: [[CST_8_:%.+]] = arith.constant 8 : index
21+
// CHECK-DAG: [[CST_1_:%.+]] = arith.constant 1 : index
2322
// CHECK-NOT: separator of consecutive DAGs
24-
// CHECK-DAG: [[VAR_0_:%.+]] = memref.dim [[PARAM_0_]], [[VAR_c1_]] : memref<1x?x8x?xf32>
25-
// CHECK-DAG: [[VAR_1_:%.+]] = memref.dim [[PARAM_0_]], [[VAR_c3_]] : memref<1x?x8x?xf32>
23+
// CHECK-DAG: [[VAR_dim_:%.+]] = memref.dim [[PARAM_0_]], [[CST_1_]] : memref<1x?x8x?xf32>
24+
// CHECK-DAG: [[VAR_dim_0_:%.+]] = memref.dim [[PARAM_0_]], [[CST_3_]] : memref<1x?x8x?xf32>
2625
// CHECK-NOT: separator of consecutive DAGs
27-
// CHECK-DAG: [[VAR_2_:%.+]] = affine.apply [[MAP_1_]](){{.}}[[VAR_0_]]{{.}}
28-
// CHECK-DAG: [[VAR_3_:%.+]] = affine.apply [[MAP_2_]](){{.}}[[VAR_1_]]{{.}}
26+
// CHECK-DAG: [[VAR_0_:%.+]] = affine.apply [[MAP_0_]](){{.}}[[VAR_dim_]]{{.}}
27+
// CHECK-DAG: [[VAR_1_:%.+]] = affine.apply [[MAP_1_]](){{.}}[[VAR_dim_0_]]{{.}}
2928
// CHECK-DAG: [[RES_:%.+]] = memref.alloc() {{.*}}: memref<6xindex>
30-
// CHECK: krnl.store [[VAR_c1_]], [[RES_]]{{.}}[[VAR_c0_]]{{.}} : memref<6xindex>
31-
// CHECK: krnl.store [[VAR_c4_]], [[RES_]]{{.}}[[VAR_c1_]]{{.}} : memref<6xindex>
32-
// CHECK: krnl.store [[VAR_c4_]], [[RES_]]{{.}}[[VAR_c2_]]{{.}} : memref<6xindex>
33-
// CHECK: krnl.store [[VAR_2_]], [[RES_]]{{.}}[[VAR_c3_]]{{.}} : memref<6xindex>
34-
// CHECK: krnl.store [[VAR_c8_]], [[RES_]]{{.}}[[VAR_c4_]]{{.}} : memref<6xindex>
35-
// CHECK: krnl.store [[VAR_1_]], [[RES_]]{{.}}[[VAR_c5_]]{{.}} : memref<6xindex>
36-
// CHECK-DAG: [[VAR_6_:%.+]] = builtin.unrealized_conversion_cast [[RES_]] : memref<6xindex> to tensor<6xi64>
37-
// CHECK: [[VAR_7_:%.+]] = "onnx.Reshape"([[VAR_5_]], [[VAR_6_]]) {allowzero = 0 : si64} : (tensor<1x?x8x?xf32>, tensor<6xi64>) -> tensor<?x?x?x?x?x?xf32>
38-
// CHECK: [[VAR_8_:%.+]] = builtin.unrealized_conversion_cast [[VAR_7_]] : tensor<?x?x?x?x?x?xf32> to memref<?x?x?x?x?x?xf32>
39-
// CHECK: [[VAR_9_:%.+]] = memref.cast [[VAR_8_]] : memref<?x?x?x?x?x?xf32> to memref<1x4x4x?x8x?xf32>
40-
// CHECK: [[VAR_10_:%.+]] = builtin.unrealized_conversion_cast [[VAR_9_]] : memref<1x4x4x?x8x?xf32> to tensor<1x4x4x?x8x?xf32>
41-
// CHECK-DAG: [[VAR_11_:%.+]] = "onnx.Transpose"([[VAR_10_]]) {perm = [0, 3, 4, 1, 5, 2]} : (tensor<1x4x4x?x8x?xf32>) -> tensor<1x?x8x4x?x4xf32>
29+
// CHECK: krnl.store [[CST_1_]], [[RES_]]{{.}}[[CST_0_]]{{.}} : memref<6xindex>
30+
// CHECK: krnl.store [[CST_4_]], [[RES_]]{{.}}[[CST_1_]]{{.}} : memref<6xindex>
31+
// CHECK: krnl.store [[CST_4_]], [[RES_]]{{.}}[[CST_2_]]{{.}} : memref<6xindex>
32+
// CHECK: krnl.store [[VAR_0_]], [[RES_]]{{.}}[[CST_3_]]{{.}} : memref<6xindex>
33+
// CHECK: krnl.store [[CST_8_]], [[RES_]]{{.}}[[CST_4_]]{{.}} : memref<6xindex>
34+
// CHECK: krnl.store [[VAR_dim_0_]], [[RES_]]{{.}}[[CST_5_]]{{.}} : memref<6xindex>
35+
// CHECK-DAG: [[VAR_2_:%.+]] = builtin.unrealized_conversion_cast [[RES_]] : memref<6xindex> to tensor<6xi64>
36+
// CHECK-DAG: [[VAR_3_:%.+]] = builtin.unrealized_conversion_cast [[PARAM_0_]] : memref<1x?x8x?xf32> to tensor<1x?x8x?xf32>
37+
// CHECK: [[VAR_4_:%.+]] = "onnx.Reshape"([[VAR_3_]], [[VAR_2_]]) {allowzero = 0 : si64} : (tensor<1x?x8x?xf32>, tensor<6xi64>) -> tensor<?x?x?x?x?x?xf32>
38+
// CHECK: [[VAR_5_:%.+]] = builtin.unrealized_conversion_cast [[VAR_4_]] : tensor<?x?x?x?x?x?xf32> to memref<?x?x?x?x?x?xf32>
39+
// CHECK: [[VAR_cast_:%.+]] = memref.cast [[VAR_5_]] : memref<?x?x?x?x?x?xf32> to memref<1x4x4x?x8x?xf32>
40+
// CHECK: [[VAR_6_:%.+]] = builtin.unrealized_conversion_cast [[VAR_cast_]] : memref<1x4x4x?x8x?xf32> to tensor<1x4x4x?x8x?xf32>
41+
// CHECK-DAG: [[VAR_7_:%.+]] = "onnx.Transpose"([[VAR_6_]]) {perm = [0, 3, 4, 1, 5, 2]} : (tensor<1x4x4x?x8x?xf32>) -> tensor<1x?x8x4x?x4xf32>
4242
// CHECK-DAG: [[RES_1_:%.+]] = memref.alloc() {{.*}}: memref<4xindex>
43-
// CHECK: krnl.store [[VAR_c1_]], [[RES_1_]]{{.}}[[VAR_c0_]]{{.}} : memref<4xindex>
44-
// CHECK: krnl.store [[VAR_2_]], [[RES_1_]]{{.}}[[VAR_c1_]]{{.}} : memref<4xindex>
45-
// CHECK: krnl.store [[VAR_c32_]], [[RES_1_]]{{.}}[[VAR_c2_]]{{.}} : memref<4xindex>
46-
// CHECK: krnl.store [[VAR_3_]], [[RES_1_]]{{.}}[[VAR_c3_]]{{.}} : memref<4xindex>
47-
// CHECK: [[VAR_13_:%.+]] = builtin.unrealized_conversion_cast [[RES_1_]] : memref<4xindex> to tensor<4xi64>
48-
// CHECK: [[VAR_14_:%.+]] = "onnx.Reshape"([[VAR_11_]], [[VAR_13_]]) {allowzero = 0 : si64} : (tensor<1x?x8x4x?x4xf32>, tensor<4xi64>) -> tensor<?x?x?x?xf32>
49-
// CHECK: [[VAR_15_:%.+]] = builtin.unrealized_conversion_cast [[VAR_14_]] : tensor<?x?x?x?xf32> to memref<?x?x?x?xf32>
50-
// CHECK: [[VAR_16_:%.+]] = memref.cast [[VAR_15_]] : memref<?x?x?x?xf32> to memref<1x?x32x?xf32>
51-
// CHECK: return [[VAR_16_]] : memref<1x?x32x?xf32>
52-
// CHECK: }
43+
// CHECK: krnl.store [[CST_1_]], [[RES_1_]]{{.}}[[CST_0_]]{{.}} : memref<4xindex>
44+
// CHECK: krnl.store [[VAR_0_]], [[RES_1_]]{{.}}[[CST_1_]]{{.}} : memref<4xindex>
45+
// CHECK: krnl.store [[CST_32_]], [[RES_1_]]{{.}}[[CST_2_]]{{.}} : memref<4xindex>
46+
// CHECK: krnl.store [[VAR_1_]], [[RES_1_]]{{.}}[[CST_3_]]{{.}} : memref<4xindex>
47+
// CHECK: [[VAR_8_:%.+]] = builtin.unrealized_conversion_cast [[RES_1_]] : memref<4xindex> to tensor<4xi64>
48+
// CHECK: [[VAR_9_:%.+]] = "onnx.Reshape"([[VAR_7_]], [[VAR_8_]]) {allowzero = 0 : si64} : (tensor<1x?x8x4x?x4xf32>, tensor<4xi64>) -> tensor<?x?x?x?xf32>
49+
// CHECK: [[VAR_10_:%.+]] = builtin.unrealized_conversion_cast [[VAR_9_]] : tensor<?x?x?x?xf32> to memref<?x?x?x?xf32>
50+
// CHECK: [[VAR_cast_2_:%.+]] = memref.cast [[VAR_10_]] : memref<?x?x?x?xf32> to memref<1x?x32x?xf32>
51+
// CHECK: return [[VAR_cast_2_]] : memref<1x?x32x?xf32>
5352
}

0 commit comments

Comments
 (0)