CMAKE error when trying to build project inside of a docker container: cant find cudart lib

920 Views Asked by At

Using an nvidia jetson tx2 running ubuntu18.04 with docker,nvidia-docker2 and l4t-cuda installed on the host system. Main Error when compiling:

CMake Error at /usr/local/lib/python3.6/dist-packages/cmake/data/share/cmake-3.25/Modules/FindPackageHandleStandardArgs.cmake:230 (message):
  Could NOT find CUDA (missing: CUDA_CUDART_LIBRARY) (found suitable version
  "10.2", minimum required is "10.2")
Call Stack (most recent call first):
  /usr/local/lib/python3.6/dist-packages/cmake/data/share/cmake-3.25/Modules/FindPackageHandleStandardArgs.cmake:600 (_FPHSA_FAILURE_MESSAGE)
  /usr/local/lib/python3.6/dist-packages/cmake/data/share/cmake-3.25/Modules/FindCUDA.cmake:1266 (find_package_handle_standard_args)
  CMakeLists.txt:17 (find_package)

CMakeLists.txt:

cmake_minimum_required (VERSION 3.5)

project(vision)
enable_testing()

# Variables scopes follow standard rules
# Variables defined here will carry over to its children, ergo subdirectories

# Setup ZED libs
find_package(ZED 3 REQUIRED)
include_directories(${ZED_INCLUDE_DIRS})
link_directories(${ZED_LIBRARY_DIR})

# Setup CUDA libs for zed and ai modules
find_package(CUDA ${ZED_CUDA_VERSION} REQUIRED)
include_directories(${CUDA_INCLUDE_DIRS})
link_directories(${CUDA_LIBRARY_DIRS})

# Setup OpenCV libs
find_package(OpenCV REQUIRED)
include_directories(${OpenCV_INLCUDE_DIRS})
# Check if OpenMP is installed 
find_package(OpenMP)
checkPackage("OpenMP" "OpenMP not found, please install it to improve performances: 'sudo apt install libomp-dev'")

# TensorRT
set(TENSORRT_ROOT /usr/src/tensorrt/)
find_path(TENSORRT_INCLUDE_DIR NvInfer.h
        HINTS ${TENSORRT_ROOT} PATH_SUFFIXES include/)
message(STATUS "Found TensorRT headers at ${TENSORRT_INCLUDE_DIR}")

set(MODEL_INCLUDE ../code/includes)
set(MODEL_LIB_DIR libs)
set(YAML_INCLUDE ../depends/yaml-cpp/include)
set(YAML_LIB_DIR ../depends/yaml-cpp/libs)

include_directories(${MODEL_INCLUDE} ${YAML_INCLUDE})
link_directories(${MODEL_LIB_DIR} ${YAML_LIB_DIR})

# Setup Darknet libs
#find_library(DARKNET_LIBRARY NAMES dark libdark.so libdarknet.so)
#find_package(dark REQUIRED)

# Setup HTTP libs
find_package(httplib REQUIRED)
find_package(nlohmann_json 3.2.0 REQUIRED)

# System libs
SET(SPECIAL_OS_LIBS "pthread")
link_libraries(stdc++fs)

# Optional definitions
add_definitions(-std=c++17 -g -O3)

# Add sub directories
add_subdirectory(zed_module)
add_subdirectory(ai_module)
add_subdirectory(http_api_module)
add_subdirectory(executable_module)

option(RUN_TESTS "Build the tests" off)

if (RUN_TESTS OR CMAKE_BUILD_TYPE MATCHES Debug)
    add_subdirectory(test)

steps that fail in Dockerfile using image stereolabs/zed:3.7-devel-jetson-jp4.6:


WORKDIR /opt

RUN git clone https://github.com/Cruiz102/Vision-Module
WORKDIR /opt/Vision-Module 
RUN mkdir build-debug && cd build-debug
RUN pwd
WORKDIR /opt/Vision-Module/build-debug
RUN cmake -DCMAKE_BUILD_TYPE=Release ..

contents of /etc/docker/daemon.json

{
    "runtimes": {
        "nvidia": {
            "path": "/usr/bin/nvidia-container-runtime",
            "runtimeArgs": []
        }
    },
    "default-runtime": "nvidia"
}

Using the jetson, I've tried using flags to set the toolkit dir as well as editing daemon.json reinstalling dependencies, changing docker images, installing and reinstalling cudaart on host, changing flags and finishing the build in interactive mode However I always get the same error.

1

There are 1 best solutions below

4
On

I have looked into Docker some time ago, so I'm not an expert, put as far as I remember, Docker and Docker containers are like virtual machines. It doesn't matter whether your pc has any cuda support or whether the libraries are installed. They are not part of your Docker VM. And since Docker runs without GUI this stuff will not be installed right away.

I don't see any code to install it in your container. Are you using one that has cuda support? If not you need to install it using your Dockerfile, not on your host