Skip to content

Merge ORT RHEL changes Into Kyle's RHEL dev #7498

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Aug 5, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
56 changes: 36 additions & 20 deletions build.py
Original file line number Diff line number Diff line change
Expand Up @@ -649,7 +649,8 @@ def onnxruntime_cmake_args(images, library_paths):
]

# TRITON_ENABLE_GPU is already set for all backends in backend_cmake_args()
if FLAGS.enable_gpu:
# TODO: TPRD-334 TensorRT extension is not currently supported by our manylinux build
if FLAGS.enable_gpu and target_platform() != "rhel":
cargs.append(
cmake_backend_enable(
"onnxruntime", "TRITON_ENABLE_ONNXRUNTIME_TENSORRT", True
Expand Down Expand Up @@ -680,8 +681,11 @@ def onnxruntime_cmake_args(images, library_paths):
)
)

if (target_machine() != "aarch64") and (
TRITON_VERSION_MAP[FLAGS.version][3] is not None
# TODO: TPRD-333 OpenVino extension is not currently supported by our manylinux build
if (
(target_machine() != "aarch64")
and (target_platform() != "rhel")
and (TRITON_VERSION_MAP[FLAGS.version][3] is not None)
):
cargs.append(
cmake_backend_enable(
Expand All @@ -697,7 +701,7 @@ def onnxruntime_cmake_args(images, library_paths):
)
)

if target_platform() == "igpu":
if (target_platform() == "igpu") or (target_platform() == "rhel"):
cargs.append(
cmake_backend_arg(
"onnxruntime",
Expand Down Expand Up @@ -847,7 +851,7 @@ def install_dcgm_libraries(dcgm_version, target_machine):
)
return ""
else:
# RHEL has the same install instructions for both aarch64 and x86
# RHEL has the same install instructions for both aarch64 and x86
if target_platform() == "rhel":
if target_machine == "aarch64":
return """
Expand All @@ -856,15 +860,19 @@ def install_dcgm_libraries(dcgm_version, target_machine):
RUN dnf config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/rhel8/sbsa/cuda-rhel8.repo \\
&& dnf clean expire-cache \\
&& dnf install -y datacenter-gpu-manager-{}
""".format(dcgm_version, dcgm_version)
""".format(
dcgm_version, dcgm_version
)
else:
return """
ENV DCGM_VERSION {}
# Install DCGM. Steps from https://developer.nvidia.com/dcgm#Downloads
RUN dnf config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/rhel8/x86_64/cuda-rhel8.repo \\
&& dnf clean expire-cache \\
&& dnf install -y datacenter-gpu-manager-{}
""".format(dcgm_version, dcgm_version)
""".format(
dcgm_version, dcgm_version
)
else:
if target_machine == "aarch64":
return """
Expand All @@ -877,8 +885,8 @@ def install_dcgm_libraries(dcgm_version, target_machine):
&& apt-get update \\
&& apt-get install -y datacenter-gpu-manager=1:{}
""".format(
dcgm_version, dcgm_version
)
dcgm_version, dcgm_version
)
else:
return """
ENV DCGM_VERSION {}
Expand All @@ -890,8 +898,9 @@ def install_dcgm_libraries(dcgm_version, target_machine):
&& apt-get update \\
&& apt-get install -y datacenter-gpu-manager=1:{}
""".format(
dcgm_version, dcgm_version
)
dcgm_version, dcgm_version
)


def create_dockerfile_buildbase_rhel(ddir, dockerfile_name, argmap):
df = """
Expand Down Expand Up @@ -947,7 +956,7 @@ def create_dockerfile_buildbase_rhel(ddir, dockerfile_name, argmap):
libarchive-devel \\
libxml2-devel \\
numactl-devel \\
wget
wget

RUN pip3 install --upgrade pip \\
&& pip3 install --upgrade \\
Expand Down Expand Up @@ -990,6 +999,7 @@ def create_dockerfile_buildbase_rhel(ddir, dockerfile_name, argmap):
with open(os.path.join(ddir, dockerfile_name), "w") as dfile:
dfile.write(df)


def create_dockerfile_buildbase(ddir, dockerfile_name, argmap):
df = """
ARG TRITON_VERSION={}
Expand Down Expand Up @@ -1146,10 +1156,12 @@ def create_dockerfile_cibase(ddir, dockerfile_name, argmap):
with open(os.path.join(ddir, dockerfile_name), "w") as dfile:
dfile.write(df)

def create_dockerfile_rhel(

def create_dockerfile_rhel(
ddir, dockerfile_name, argmap, backends, repoagents, caches, endpoints
):
pass
pass


def create_dockerfile_linux(
ddir, dockerfile_name, argmap, backends, repoagents, caches, endpoints
Expand Down Expand Up @@ -1299,12 +1311,14 @@ def dockerfile_prepare_container_linux(argmap, backends, enable_gpu, target_mach
fi \\
&& [ `id -u $TRITON_SERVER_USER` -eq 1000 ] \\
&& [ `id -g $TRITON_SERVER_USER` -eq 1000 ]
""".format(gpu_enabled=gpu_enabled)
""".format(
gpu_enabled=gpu_enabled
)

# This
# This
if target_platform() == "rhel":
df += """
# Common dpeendencies.
# Common dpeendencies.
RUN yum install -y \\
git \\
gperf \\
Expand All @@ -1317,7 +1331,7 @@ def dockerfile_prepare_container_linux(argmap, backends, enable_gpu, target_mach
patchelf \\
wget \\
numactl-devel \\
wget
wget
"""
else:
df += """
Expand All @@ -1343,8 +1357,10 @@ def dockerfile_prepare_container_linux(argmap, backends, enable_gpu, target_mach
wget \\
{backend_dependencies} \\
&& rm -rf /var/lib/apt/lists/*
""".format(backend_dependencies=backend_dependencies)

""".format(
backend_dependencies=backend_dependencies
)

df += """
# Set TCMALLOC_RELEASE_RATE for users setting LD_PRELOAD with tcmalloc
ENV TCMALLOC_RELEASE_RATE 200
Expand Down
Loading