Skip to content

Commit 80f63cb

Browse files
committed
Merge remote-tracking branch 'upstream/main' into dimparam
2 parents acdbfeb + 328c2f9 commit 80f63cb

File tree

8 files changed

+37
-16
lines changed

8 files changed

+37
-16
lines changed

src/Compiler/CompilerOptions.cpp

+8-4
Original file line numberDiff line numberDiff line change
@@ -926,6 +926,10 @@ std::string getTargetArchOption(bool forLLVMToolchain) {
926926
int64_t zArchNum = getZArchNum(march, mcpu);
927927
if (zArchNum != -1)
928928
return "--march=systemz";
929+
// On mac, llc --version seem to want aarch64 or arm64.
930+
if (march == "apple-m1" || march == "apple-m2" || march == "apple-m3" ||
931+
march == "apple-m4")
932+
return "--march=arm64";
929933
}
930934
return (march != "") ? "--march=" + march : "";
931935
}
@@ -1413,11 +1417,11 @@ void initCompilerConfig() {
14131417
setLLVMOption(getLLVMOption() + " --enable-unsafe-fp-math");
14141418
}
14151419

1416-
if (march == "z17")
1417-
march = "arch15";
1418-
1419-
if (march == "native")
1420+
if (march == "native") {
14201421
march = std::string(llvm::sys::getHostCPUName());
1422+
if (VerboseOutput)
1423+
llvm::outs() << "Native machine set as \"" << march << "\"\n";
1424+
}
14211425
}
14221426

14231427
} // namespace onnx_mlir

src/Dialect/Mlir/VectorMachineSupport.cpp

+4-2
Original file line numberDiff line numberDiff line change
@@ -48,8 +48,10 @@ namespace onnx_mlir {
4848
else
4949
// Default seems to be SSE
5050
globalVectorMachineSupport = new SSE42x86VectorMachineSupport();
51-
// Arm uses arch
52-
} else if (arch.compare("aarch64") == 0 || arch.compare("arm64") == 0) {
51+
// Arm uses arch, and arch=native returns apple-mXXX.
52+
} else if (arch.compare("aarch64") == 0 || arch.compare("arm64") == 0 ||
53+
arch.compare("apple-m1") == 0 || arch.compare("apple-m2") == 0 ||
54+
arch.compare("apple-m3") == 0 || arch.compare("apple-m4") == 0) {
5355
// Arm arch
5456
globalVectorMachineSupport = new NeonVectorMachineSupport();
5557
} else {

src/Dialect/ONNX/ONNXOps/Canonicalize.td

+1-1
Original file line numberDiff line numberDiff line change
@@ -707,7 +707,7 @@ def ReorderReluMaxPoolPattern : Pattern<
707707
$X,
708708
$auto_pad, $ceil_mode, $dilations, $kernel_shape,
709709
$pads, $storage_order, $strides,
710-
(returnType (GetReturnTypeForMaxPool2D $X))))],
710+
(returnType (GetReturnTypeForMaxPool2D $X)), (location $maxpool)), (location $relu))],
711711
[
712712
(HasRankOf<4> $X),
713713
(IsStaticShapeTensor:$maxpool),

src/Runtime/python/onnxmlirdocker.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -225,8 +225,8 @@ def getSession(self):
225225
from PyRuntime import OMExecutionSession
226226
except ImportError:
227227
raise ImportError(
228-
"Looks like you did not build the PyRuntime target, build it by running `make PyRuntime`."
229-
"You may need to set ONNX_MLIR_HOME to `onnx-mlir/build/Debug` since `make PyRuntime` outputs to `build/Debug` by default"
228+
"Looks like you did not build the PyRuntimeC target, build it by running `make PyRuntimeC`."
229+
"You may need to set ONNX_MLIR_HOME to `onnx-mlir/build/Debug` since `make PyRuntimeC` outputs to `build/Debug` by default"
230230
)
231231

232232
return OMExecutionSession(self.compiled_model, self.compile_tag)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
// RUN: onnx-mlir-opt --shape-inference --canonicalize="test-convergence=true" --shape-inference --cse %s -split-input-file --mlir-print-debuginfo | FileCheck %s
2+
3+
4+
func.func @test_reorder_relu_maxpool(%arg0: tensor<1x64x32x32xf32>) -> tensor<1x64x16x16xf32> {
5+
%0 = "onnx.Relu"(%arg0) : (tensor<1x64x32x32xf32>) -> tensor<1x64x32x32xf32> loc("Relu")
6+
%1 = "onnx.MaxPoolSingleOut"(%0) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [2, 2], onnx_node_name = "onnx.MaxPoolSingleOut_1", storage_order = 0 : si64, strides = [2, 2]} : (tensor<1x64x32x32xf32>) -> tensor<1x64x16x16xf32> loc("MaxPool")
7+
return %1 : tensor<1x64x16x16xf32>
8+
9+
// CHECK-LABEL: func @test_reorder_relu_maxpool
10+
// CHECK: [[VAR_0_:%.+]] = "onnx.MaxPoolSingleOut"(%arg0) {auto_pad = "NOTSET", ceil_mode = 0 : si64, kernel_shape = [2, 2], storage_order = 0 : si64, strides = [2, 2]} : (tensor<1x64x32x32xf32>) -> tensor<1x64x16x16xf32> loc([[LOC_MAX_POOL:#.+]])
11+
// CHECK: [[VAR_1_:%.+]] = "onnx.Relu"([[VAR_0_]]) : (tensor<1x64x16x16xf32>) -> tensor<1x64x16x16xf32> loc([[LOC_RELU:#.+]])
12+
// CHECK-DAG: [[LOC_MAX_POOL:#.+]] = loc("MaxPool")
13+
// CHECK-DAG: [[LOC_RELU:#.+]] = loc("Relu")
14+
}
15+

utils/RunONNXModel.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -55,9 +55,9 @@
5555
from PyRuntime import OMExecutionSession
5656
except ImportError:
5757
raise ImportError(
58-
"Looks like you did not build the PyRuntime target, build it by running"
59-
" `make PyRuntime`. You may need to set ONNX_MLIR_HOME to"
60-
" `onnx-mlir/build/Debug` since `make PyRuntime` outputs to"
58+
"Looks like you did not build the PyRuntimeC target, build it by running"
59+
" `make PyRuntimeC`. You may need to set ONNX_MLIR_HOME to"
60+
" `onnx-mlir/build/Debug` since `make PyRuntimeC` outputs to"
6161
" `build/Debug` by default."
6262
)
6363

utils/onnxmlirrun.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ def __init__(self, model_path, target="cpu", **kwarg):
8282
from PyRuntime import OMExecutionSession
8383
except ImportError:
8484
raise ImportError(
85-
"Looks like you did not build the PyRuntime target, build it by running `make PyRuntime`.You may need to set ONNX_MLIR_HOME to `onnx-mlir/build/Debug` since `make PyRuntime` outputs to `build/Debug` by default"
85+
"Looks like you did not build the PyRuntimeC target, build it by running `make PyRuntimeC`.You may need to set ONNX_MLIR_HOME to `onnx-mlir/build/Debug` since `make PyRuntimeC` outputs to `build/Debug` by default"
8686
)
8787
# Initialize status
8888
self.compiled = False
@@ -121,7 +121,7 @@ def loadSession(self):
121121
from PyRuntime import OMExecutionSession
122122
except ImportError:
123123
raise ImportError(
124-
"Looks like you did not build the PyRuntime target, build it by running `make PyRuntime`.You may need to set ONNX_MLIR_HOME to `onnx-mlir/build/Debug` since `make PyRuntime` outputs to `build/Debug` by default"
124+
"Looks like you did not build the PyRuntimeC target, build it by running `make PyRuntimeC`.You may need to set ONNX_MLIR_HOME to `onnx-mlir/build/Debug` since `make PyRuntimeC` outputs to `build/Debug` by default"
125125
)
126126

127127
# Use the generated shared library to create an execution session.

utils/python/transformers/run_gpt2_from_huggingface.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -34,8 +34,8 @@
3434
from PyCompileAndRuntime import OMCompileExecutionSession
3535
except ImportError:
3636
raise ImportError(
37-
"Looks like you did not build the PyRuntime target, build it by running `make PyRuntime`."
38-
"You may need to set ONNX_MLIR_HOME to `onnx-mlir/build/Debug` since `make PyRuntime` outputs to `build/Debug` by default"
37+
"Looks like you did not build the PyRuntimeC target, build it by running `make PyRuntimeC`."
38+
"You may need to set ONNX_MLIR_HOME to `onnx-mlir/build/Debug` since `make PyRuntimeC` outputs to `build/Debug` by default"
3939
)
4040

4141
# Information to download onnx models from HuggingFace.

0 commit comments

Comments
 (0)