Skip to content

Commit f2bccef

Browse files
flemairen6hamptonm1chentong319
authored
Bump LLVM commit to 20ed5b1f4587 (#2799)
* Bump LLVM commit to 1e6ce5e284f5c0e8d64eee21af727bb164eb3caf and stablehlo to 49dc86c0df9ac3a8a556208674204b0f68d8eb6d Signed-off-by: Ferdinand Lemaire <[email protected]> * Bump LLVM to 20ed5b1f4587 and stablehlo to fd0c20a10 Revert the changes to properties from stablehlo and fix some references where variable names had changed. Signed-off-by: Ferdinand Lemaire <[email protected]> * Remove Sequence_with_dealloc.mlir from the ran tests because it's unstable Signed-off-by: Ferdinand Lemaire <[email protected]> --------- Signed-off-by: Ferdinand Lemaire <[email protected]> Signed-off-by: Ferdinand Lemaire <[email protected]> Co-authored-by: hamptonm1 <[email protected]> Co-authored-by: Tong Chen <[email protected]>
1 parent 049f8e9 commit f2bccef

File tree

9 files changed

+17
-16
lines changed

9 files changed

+17
-16
lines changed

docs/BuildOnLinuxOSX.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ Firstly, install MLIR (as a part of LLVM-Project):
1515
``` bash
1616
git clone -n https://github.com/llvm/llvm-project.git
1717
# Check out a specific branch that is known to work with ONNX-MLIR.
18-
cd llvm-project && git checkout 7ac7d418ac2b16fd44789dcf48e2b5d73de3e715 && cd ..
18+
cd llvm-project && git checkout 20ed5b1f45871612570d3bd447121ac43e083c6a && cd ..
1919
```
2020

2121
[same-as-file]: <> (utils/build-mlir.sh)

docs/BuildOnWindows.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ Install MLIR (as a part of LLVM-Project):
5252
```shell
5353
git clone -n https://github.com/llvm/llvm-project.git
5454
# Check out a specific branch that is known to work with ONNX-MLIR.
55-
cd llvm-project && git checkout 7ac7d418ac2b16fd44789dcf48e2b5d73de3e715 && cd ..
55+
cd llvm-project && git checkout 20ed5b1f45871612570d3bd447121ac43e083c6a && cd ..
5656
```
5757

5858
[same-as-file]: <> (utils/build-mlir.cmd)

test/mlir/conversion/onnx_to_krnl/Sequence/Sequence_with_dealloc.mlir renamed to test/mlir/conversion/onnx_to_krnl/Sequence/Sequence_with_dealloc.mlir.broken

+1-2
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@ func.func @test_sequence_erase(%arg0: !onnx.Seq<tensor<?x4x5xf32>>) -> tensor<3x
1414
// CHECK-DAG: [[MAP_0_:#.+]] = affine_map<()[s0] -> (s0 - 1)>
1515
// CHECK-DAG: [[MAP_1_:#.+]] = affine_map<()[s0, s1] -> (s1 + s0)>
1616
// CHECK-DAG: [[MAP_2_:#.+]] = affine_map<()[s0, s1] -> (s0)>
17-
// CHECK-DAG: [[MAP_3_:#.+]] = affine_map<()[s0, s1] -> (s0 + s1)>
1817
// CHECK-LABEL: func.func @test_sequence_erase
1918
// CHECK-SAME: ([[PARAM_0_:%.+]]: memref<?xmemref<?x4x5xf32>>) -> memref<3xi64> {
2019
// CHECK-DAG: [[VAR_0_:%.+]] = "krnl.global"() {name = "constant_{{[0-9]+}}", shape = [], value = dense<0> : tensor<i64>} : () -> memref<i64>
@@ -55,7 +54,7 @@ func.func @test_sequence_erase(%arg0: !onnx.Seq<tensor<?x4x5xf32>>) -> tensor<3x
5554
// CHECK-DAG: [[CST_0_4_:%.+]] = arith.constant 0 : index
5655
// CHECK-NOT: separator of consecutive DAGs
5756
// CHECK-DAG: [[VAR_13_:%.+]] = arith.cmpi slt, [[VAR_12_]], [[CST_0_4_]] : index
58-
// CHECK-DAG: [[VAR_14_:%.+]] = affine.apply [[MAP_3_]](){{.}}[[VAR_12_]], [[VAR_dim_4_]]{{.}}
57+
// CHECK-DAG: [[VAR_14_:%.+]] = affine.apply [[MAP_1_]](){{.}}[[VAR_dim_4_]], [[VAR_12_]]{{.}}
5958
// CHECK: [[VAR_15_:%.+]] = arith.select [[VAR_13_]], [[VAR_14_]], [[VAR_12_]] : index
6059
// CHECK-DAG: [[VAR_16_:%.+]] = "krnl.seqextract"([[VAR_2_]], [[VAR_15_]]) {copy = 1 : ui1} : (memref<?xmemref<?x4x5xf32>>, index) -> memref<?x4x5xf32>
6160
// CHECK-DAG: [[CST_3_:%.+]] = arith.constant 3 : index

test/mlir/conversion/onnx_to_stablehlo/NN/Normalization.mlir

+1-1
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ func.func @test_batch_normalization(%arg0 : tensor<1x3x10x10xf32>) -> tensor<1x3
1313
// CHECK-NEXT: [[VAR_1_:%.+]] = stablehlo.constant dense<1.000000e+00> : tensor<3xf32>
1414
// CHECK-NEXT: [[VAR_2_:%.+]] = stablehlo.constant dense<1.000000e+00> : tensor<3xf32>
1515
// CHECK-NEXT: [[VAR_3_:%.+]] = stablehlo.constant dense<1.000000e+00> : tensor<3xf32>
16-
// CHECK-NEXT: [[VAR_4_:%.+]] = "stablehlo.batch_norm_inference"(%arg0, %0, %1, %2, %3) {epsilon = 1.00000007E-5 : f32, feature_index = 1 : i64} : (tensor<1x3x10x10xf32>, tensor<3xf32>, tensor<3xf32>, tensor<3xf32>, tensor<3xf32>) -> tensor<1x3x10x10xf32>
16+
// CHECK-NEXT: [[VAR_4_:%.+]] = "stablehlo.batch_norm_inference"([[PARAM_0_]], [[VAR_0_]], [[VAR_1_]], [[VAR_2_]], [[VAR_3_]]) {epsilon = 1.00000007E-5 : f32, feature_index = 1 : i64} : (tensor<1x3x10x10xf32>, tensor<3xf32>, tensor<3xf32>, tensor<3xf32>, tensor<3xf32>) -> tensor<1x3x10x10xf32>
1717
// CHECK-NEXT: return [[VAR_4_]] : tensor<1x3x10x10xf32>
1818
// CHECK-NEXT: }
1919
}

test/mlir/conversion/onnx_to_stablehlo/Tensor/ArgMax.mlir

+1-1
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ func.func @test_argmax_verifier_1(%arg0 : tensor<5x5x1x32xf32>) -> tensor<*xi64>
1111
// CHECK-DAG: [[VAR_1_:%.+]] = stablehlo.constant dense<0> : tensor<i64>
1212
// CHECK-DAG: [[VAR_2_:%.+]] = stablehlo.constant dense<0xFF800000> : tensor<f32>
1313
// CHECK: [[VAR_3_:%.+]] = stablehlo.dynamic_iota [[VAR_0_]], dim = 3 : (tensor<4xindex>) -> tensor<5x5x1x32xi64>
14-
// CHECK: [[VAR_4_:%.+]]:2 = stablehlo.reduce(%arg0 init: %1), (%3 init: %0) across dimensions = [3] : (tensor<5x5x1x32xf32>, tensor<5x5x1x32xi64>, tensor<f32>, tensor<i64>) -> (tensor<5x5x1xf32>, tensor<5x5x1xi64>)
14+
// CHECK: [[VAR_4_:%.+]]:2 = stablehlo.reduce(%arg0 init: [[VAR_2_]]), (%1 init: [[VAR_1_]]) across dimensions = [3] : (tensor<5x5x1x32xf32>, tensor<5x5x1x32xi64>, tensor<f32>, tensor<i64>) -> (tensor<5x5x1xf32>, tensor<5x5x1xi64>)
1515
// CHECK: reducer(%arg1: tensor<f32>, %arg3: tensor<f32>) (%arg2: tensor<i64>, %arg4: tensor<i64>) {
1616
// CHECK: [[VAR_6_:%.+]] = stablehlo.compare GE, %arg1, %arg3, NOTYPE : (tensor<f32>, tensor<f32>) -> tensor<i1>
1717
// CHECK-DAG: [[VAR_7_:%.+]] = stablehlo.select [[VAR_6_]], %arg1, %arg3 : tensor<i1>, tensor<f32>

test/mlir/conversion/onnx_to_stablehlo/Tensor/Pad.mlir

+3-3
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ func.func @test_pad_constant(%arg0: tensor<1x3x5x5xf32>) -> tensor<1x3x7x7xf32>
77
%3 = "onnx.Pad"(%arg0, %0, %1, %2) {mode = "constant"} : (tensor<1x3x5x5xf32>, tensor<8xi64>, tensor<f32>, none) -> tensor<1x3x7x7xf32>
88
return %3 : tensor<1x3x7x7xf32>
99
// CHECK-LABEL: func.func @test_pad_constant(%arg0: tensor<1x3x5x5xf32>) -> tensor<1x3x7x7xf32> {
10-
// CHECK-NEXT: %0 = stablehlo.constant dense<2.000000e+00> : tensor<f32>
11-
// CHECK-NEXT: %1 = stablehlo.pad %arg0, %0, low = [0, 0, 1, 1], high = [0, 0, 1, 1], interior = [0, 0, 0, 0] : (tensor<1x3x5x5xf32>, tensor<f32>) -> tensor<1x3x7x7xf32>
12-
// CHECK-NEXT: return %1 : tensor<1x3x7x7xf32>
10+
// CHECK-NEXT: [[CST_:%.+]] = stablehlo.constant dense<2.000000e+00> : tensor<f32>
11+
// CHECK-NEXT: [[PAD_:%.+]] = stablehlo.pad %arg0, [[CST_]], low = [0, 0, 1, 1], high = [0, 0, 1, 1], interior = [0, 0, 0, 0] : (tensor<1x3x5x5xf32>, tensor<f32>) -> tensor<1x3x7x7xf32>
12+
// CHECK-NEXT: return [[PAD_]] : tensor<1x3x7x7xf32>
1313
}

test/mlir/conversion/onnx_to_stablehlo/Tensor/Shape.mlir

+7-5
Original file line numberDiff line numberDiff line change
@@ -4,15 +4,17 @@ func.func @test_shape1(%arg0 : tensor<2x4x8x16xf32>) -> tensor<4xi64> {
44
%0 = "onnx.Shape"(%arg0) : (tensor<2x4x8x16xf32>) -> tensor<4xi64>
55
return %0 : tensor<4xi64>
66
// CHECK: func.func @test_shape1(%arg0: tensor<2x4x8x16xf32>) -> tensor<4xi64> {
7-
// CHECK: %0 = stablehlo.constant dense<[2, 4, 8, 16]> : tensor<4xi64>
8-
// CHECK: return %0 : tensor<4xi64>
7+
// CHECK: [[CST_:%.+]] = stablehlo.constant dense<[2, 4, 8, 16]> : tensor<4xi64>
8+
// CHECK: return [[CST_]] : tensor<4xi64>
99
}
1010

11+
// -----
12+
1113
func.func @test_shape2(%arg0 : tensor<?x4x8x16xf32>) -> tensor<4xi64> {
1214
%0 = "onnx.Shape"(%arg0) : (tensor<?x4x8x16xf32>) -> tensor<4xi64>
1315
return %0 : tensor<4xi64>
1416
// CHECK: func.func @test_shape2(%arg0: tensor<?x4x8x16xf32>) -> tensor<4xi64> {
15-
// CHECK: %0 = shape.shape_of %arg0 : tensor<?x4x8x16xf32> -> tensor<4xindex>
16-
// CHECK: %1 = arith.index_cast %0 : tensor<4xindex> to tensor<4xi64>
17-
// CHECK: return %1 : tensor<4xi64>
17+
// CHECK: [[CST_:%.+]] = shape.shape_of %arg0 : tensor<?x4x8x16xf32> -> tensor<4xindex>
18+
// CHECK: [[CAST_:%.+]] = arith.index_cast [[CST_]] : tensor<4xindex> to tensor<4xi64>
19+
// CHECK: return [[CAST_]] : tensor<4xi64>
1820
}

third_party/stablehlo

Submodule stablehlo updated 98 files

utils/clone-mlir.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
git clone -n https://github.com/llvm/llvm-project.git
22
# Check out a specific branch that is known to work with ONNX-MLIR.
3-
cd llvm-project && git checkout 7ac7d418ac2b16fd44789dcf48e2b5d73de3e715 && cd ..
3+
cd llvm-project && git checkout 20ed5b1f45871612570d3bd447121ac43e083c6a && cd ..

0 commit comments

Comments
 (0)