diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 8a2ba9f841434e..0a445e2442e1f9 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -111,6 +111,10 @@ jobs: - platforms: linux/amd64 device: cuda suffix: -cuda + + - platforms: linux/arm64 + device: jetson + suffix: -jetson - platforms: linux/amd64 device: openvino diff --git a/machine-learning/Dockerfile b/machine-learning/Dockerfile index 12fb183c953d43..47411b486a9f4f 100644 --- a/machine-learning/Dockerfile +++ b/machine-learning/Dockerfile @@ -1,4 +1,5 @@ ARG DEVICE=cpu +ARG TARGETARCH FROM python:3.11-bookworm@sha256:20c1819af5af3acba0b2b66074a2615e398ceee6842adf03cd7ad5f8d0ee3daf AS builder-cpu @@ -6,6 +7,8 @@ FROM builder-cpu AS builder-openvino FROM builder-cpu AS builder-cuda +FROM builder-cpu AS builder-jetson + FROM builder-cpu AS builder-armnn ENV ARMNN_PATH=/opt/armnn @@ -32,7 +35,11 @@ RUN poetry config installer.max-workers 10 && \ RUN python3 -m venv /opt/venv COPY poetry.lock pyproject.toml ./ -RUN poetry install --sync --no-interaction --no-ansi --no-root --with ${DEVICE} --without dev +RUN if [ "$DEVICE" = "jetson" ]; then \ + # hack to work around poetry not setting the right filename for the wheel https://github.com/python-poetry/poetry/issues/4472 + wget -q -O onnxruntime_gpu-1.18.0-cp311-cp311-manylinux_aarch64.whl https://nvidia.box.com/shared/static/fy55jvniujjbigr4gwkv8z1ma6ipgspg.whl; fi && \ + poetry install --sync --no-interaction --no-ansi --no-root --with ${DEVICE} --without dev && \ + if [ "$DEVICE" = "jetson" ]; then rm onnxruntime_gpu-1.18.0-cp311-cp311-manylinux_aarch64.whl; fi FROM python:3.11-slim-bookworm@sha256:ed4e985674f478c90ce879e9aa224fbb772c84e39b4aed5155b9e2280f131039 AS prod-cpu @@ -55,6 +62,12 @@ COPY --from=builder-cuda /usr/local/bin/python3 /usr/local/bin/python3 COPY --from=builder-cuda /usr/local/lib/python3.11 /usr/local/lib/python3.11 COPY --from=builder-cuda /usr/local/lib/libpython3.11.so /usr/local/lib/libpython3.11.so +FROM nvcr.io/nvidia/l4t-cuda:11.4.19-runtime@sha256:fb22ff080631990dda403fd768acb384dc3745a7e516f5ed1dc4c4944898da78 as prod-jetson + +COPY --from=builder-cuda /usr/local/bin/python3 /usr/local/bin/python3 +COPY --from=builder-cuda /usr/local/lib/python3.11 /usr/local/lib/python3.11 +COPY --from=builder-cuda /usr/local/lib/libpython3.11.so /usr/local/lib/libpython3.11.so + FROM prod-cpu AS prod-armnn ENV LD_LIBRARY_PATH=/opt/armnn @@ -79,7 +92,7 @@ FROM prod-${DEVICE} AS prod ARG DEVICE RUN apt-get update && \ - apt-get install -y --no-install-recommends tini $(if ! [ "$DEVICE" = "openvino" ]; then echo "libmimalloc2.0"; fi) && \ + apt-get install -y --no-install-recommends tini $(if ! { [ "$DEVICE" = "openvino" ] || [ "$DEVICE" = "jetson" ]; }; then echo "libmimalloc2.0"; fi) && \ apt-get autoremove -yqq && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* diff --git a/machine-learning/poetry.lock b/machine-learning/poetry.lock index bd09bd8469e677..6665e7bc18b737 100644 --- a/machine-learning/poetry.lock +++ b/machine-learning/poetry.lock @@ -2003,6 +2003,28 @@ packaging = "*" protobuf = "*" sympy = "*" +[[package]] +name = "onnxruntime-gpu" +version = "1.18.0" +description = "ONNX Runtime is a runtime accelerator for Machine Learning models" +optional = false +python-versions = "*" +files = [ + {file = "onnxruntime_gpu-1.18.0-cp311-cp311-manylinux_aarch64.whl", hash = "sha256:7bdd6c373611235e43c8707fa528539327ff17a969448adf956ddf177d5fc8e7"}, +] + +[package.dependencies] +coloredlogs = "*" +flatbuffers = "*" +numpy = ">=1.26.4" +packaging = "*" +protobuf = "*" +sympy = "*" + +[package.source] +type = "file" +url = "onnxruntime_gpu-1.18.0-cp311-cp311-manylinux_aarch64.whl" + [[package]] name = "onnxruntime-gpu" version = "1.18.1" @@ -2636,7 +2658,6 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, @@ -3607,4 +3628,4 @@ testing = ["coverage (>=5.0.3)", "zope.event", "zope.testing"] [metadata] lock-version = "2.0" python-versions = ">=3.10,<4.0" -content-hash = "b2b053886ca1dd3a3305c63caf155b1976dfc4066f72f5d1ecfc42099db34aab" +content-hash = "7f87eff3cd024cae7e0439fa06faf6404670890537077e6245e945e940364c94" diff --git a/machine-learning/pyproject.toml b/machine-learning/pyproject.toml index a69fb33a8d50ed..dd9c0095e73676 100644 --- a/machine-learning/pyproject.toml +++ b/machine-learning/pyproject.toml @@ -4,7 +4,7 @@ version = "1.114.0" description = "" authors = ["Hau Tran "] readme = "README.md" -packages = [{include = "app"}] +packages = [{ include = "app" }] [tool.poetry.dependencies] python = ">=3.10,<4.0" @@ -12,7 +12,7 @@ insightface = ">=0.7.3,<1.0" opencv-python-headless = ">=4.7.0.72,<5.0" pillow = ">=9.5.0,<11.0" fastapi-slim = ">=0.95.2,<1.0" -uvicorn = {extras = ["standard"], version = ">=0.22.0,<1.0"} +uvicorn = { extras = ["standard"], version = ">=0.22.0,<1.0" } pydantic = "^1.10.8" aiocache = ">=0.12.1,<1.0" rich = ">=13.4.2" @@ -45,7 +45,13 @@ onnxruntime = "^1.15.0" optional = true [tool.poetry.group.cuda.dependencies] -onnxruntime-gpu = {version = "^1.17.0", source = "cuda12"} +onnxruntime-gpu = { version = "^1.17.0", source = "cuda12", markers = "platform_machine == 'x86_64'" } + +[tool.poetry.group.jetson] +optional = true + +[tool.poetry.group.jetson.dependencies] +onnxruntime-gpu = { python = "3.11", path = "onnxruntime_gpu-1.18.0-cp311-cp311-manylinux_aarch64.whl", markers = "platform_machine == 'aarch64'" } [tool.poetry.group.openvino] optional = true