diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 55a74bd8..72e26f87 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -34,10 +34,10 @@ "NVIDIA_DRIVER_CAPABILITIES": "graphics,video,compute,utility,display", // Set the following environment variables to use the same folder name as the host machine. // This is needed to launch container from the workspace folder that is not same as the SDK source root folder. - "HOLOSCAN_PUBLIC_FOLDER": "${env:HOLOSCAN_PUBLIC_FOLDER}", + "HOLOSCAN_PUBLIC_FOLDER": "${localEnv:HOLOSCAN_PUBLIC_FOLDER}", // This is necessary to prevent memory overuse during the SDK build process. // The `CMAKE_BUILD_PARALLEL_LEVEL` environment variable is set by the `run vscode` command. - "CMAKE_BUILD_PARALLEL_LEVEL": "${env:CMAKE_BUILD_PARALLEL_LEVEL}", + "CMAKE_BUILD_PARALLEL_LEVEL": "${localEnv:CMAKE_BUILD_PARALLEL_LEVEL}", }, "mounts": [ "source=/tmp/.X11-unix,target=/tmp/.X11-unix,type=bind,consistency=cached", @@ -64,7 +64,7 @@ "shd101wyy.markdown-preview-enhanced", "cschlosser.doxdocgen", "mine.cpplint", - "benjamin-simmonds.pythoncpp-debug" , // Python/C++ debugging + "benjamin-simmonds.pythoncpp-debug", // Python/C++ debugging ] } }, diff --git a/scripts/debug_python b/.vscode/debug_python similarity index 96% rename from scripts/debug_python rename to .vscode/debug_python index 8aab2e2d..02faf1d3 100755 --- a/scripts/debug_python +++ b/.vscode/debug_python @@ -19,7 +19,7 @@ SCRIPT_DIR=$(dirname "$(readlink -f "$0")") if [ -e ${SCRIPT_DIR}/debug_env.sh ]; then - # User can place debug_env.sh in the same directory as this script (scripts/debug_env.sh would be ignored in git repo) + # User can place debug_env.sh in the same directory as this script (.vscode/debug_env.sh would be ignored in git repo) . ${SCRIPT_DIR}/debug_env.sh fi diff --git a/.vscode/launch.json b/.vscode/launch.json index 0e8231cc..ee2d522d 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -36,7 +36,7 @@ "request": "launch", "program": "/usr/bin/bash", "args": [ - "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python", + "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python", "${file}", ], "stopAtEntry": false, @@ -98,7 +98,7 @@ "program": "/usr/bin/bash", // https://github.com/catchorg/Catch2/blob/devel/docs/command-line.md#specifying-which-tests-to-run "args": [ - "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python", + "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python", "${command:cmake.buildDirectory}/examples/aja_capture/python/aja_capture.py", ], "stopAtEntry": false, @@ -128,7 +128,7 @@ "request": "launch", "program": "/usr/bin/bash", "args": [ - "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python", + "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python", "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/bring_your_own_model/python/byom.py", ], "stopAtEntry": false, @@ -212,7 +212,7 @@ "request": "launch", "program": "/usr/bin/bash", "args": [ - "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python", + "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python", "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/conditions/periodic/python/ping_periodic.py", ], "stopAtEntry": false, @@ -242,7 +242,7 @@ "request": "launch", "program": "/usr/bin/bash", "args": [ - "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python", + "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python", "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/cupy_native/matmul.py", ], "stopAtEntry": false, @@ -295,7 +295,7 @@ "request": "launch", "program": "/usr/bin/bash", "args": [ - "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python", + "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python", "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/flow_tracker/python/flow_tracker.py", ], "stopAtEntry": false, @@ -348,7 +348,7 @@ "request": "launch", "program": "/usr/bin/bash", "args": [ - "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python", + "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python", "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/hello_world/python/hello_world.py", ], "stopAtEntry": false, @@ -401,7 +401,7 @@ "request": "launch", "program": "/usr/bin/bash", "args": [ - "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python", + "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python", "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/holoviz/python/holoviz_geometry.py", ], "stopAtEntry": false, @@ -431,7 +431,7 @@ "request": "launch", "program": "/usr/bin/bash", "args": [ - "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python", + "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python", "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/holoviz/python/holoviz_geometry_3d.py", ], "stopAtEntry": false, @@ -461,7 +461,7 @@ "request": "launch", "program": "/usr/bin/bash", "args": [ - "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python", + "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python", "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/holoviz/python/holoviz_views.py", ], "stopAtEntry": false, @@ -518,7 +518,7 @@ "request": "launch", "program": "/usr/bin/bash", "args": [ - "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python", + "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python", "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/multithread/python/multithread.py", ], "stopAtEntry": false, @@ -552,7 +552,7 @@ "request": "launch", "program": "/usr/bin/bash", "args": [ - "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python", + "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python", "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/numpy_native/convolve.py", ], "stopAtEntry": false, @@ -651,7 +651,7 @@ "request": "launch", "program": "/usr/bin/bash", "args": [ - "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python", + "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python", "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/ping_custom_op/python/ping_custom_op.py", ], "stopAtEntry": false, @@ -733,7 +733,7 @@ "request": "launch", "program": "/usr/bin/bash", "args": [ - "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python", + "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python", "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/ping_distributed/python/ping_distributed.py", "--driver", "--worker", @@ -793,7 +793,7 @@ "request": "launch", "program": "/usr/bin/bash", "args": [ - "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python", + "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python", "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/ping_multi_port/python/ping_multi_port.py", ], "stopAtEntry": false, @@ -846,7 +846,7 @@ "request": "launch", "program": "/usr/bin/bash", "args": [ - "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python", + "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python", "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/ping_simple/python/ping_simple.py", ], "stopAtEntry": false, @@ -899,7 +899,7 @@ "request": "launch", "program": "/usr/bin/bash", "args": [ - "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python", + "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python", "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/ping_simple_run_async/python/ping_simple_run_async.py", ], "stopAtEntry": false, @@ -952,7 +952,7 @@ "request": "launch", "program": "/usr/bin/bash", "args": [ - "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python", + "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python", "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/ping_vector/python/ping_vector.py", ], "stopAtEntry": false, @@ -1005,7 +1005,7 @@ "request": "launch", "program": "/usr/bin/bash", "args": [ - "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python", + "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python", "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/resources/clock/python/ping_clock.py", ], "stopAtEntry": false, @@ -1058,7 +1058,7 @@ "request": "launch", "program": "/usr/bin/bash", "args": [ - "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python", + "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python", "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/tensor_interop/python/tensor_interop.py", ], "stopAtEntry": false, @@ -1127,7 +1127,7 @@ "request": "launch", "program": "/usr/bin/bash", "args": [ - "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python", + "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python", "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/v4l2_camera/python/v4l2_camera.py", ], "stopAtEntry": false, @@ -1188,7 +1188,7 @@ "request": "launch", "program": "/usr/bin/bash", "args": [ - "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python", + "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python", "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/video_replayer/python/video_replayer.py", ], "stopAtEntry": false, @@ -1249,7 +1249,7 @@ "request": "launch", "program": "/usr/bin/bash", "args": [ - "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python", + "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python", "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/video_replayer_distributed/python/video_replayer_distributed.py", ], "stopAtEntry": false, @@ -1317,7 +1317,7 @@ "request": "launch", "program": "/usr/bin/bash", "args": [ - "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python", + "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python", "-m", "pytest", "-v", diff --git a/.vscode/settings.json b/.vscode/settings.json index 5dba339e..6174db62 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -118,7 +118,10 @@ "charconv": "cpp", "cuchar": "cpp", "propagate_const": "cpp", - "ranges": "cpp" + "ranges": "cpp", + "barrier": "cpp", + "latch": "cpp", + "syncstream": "cpp" }, "git.alwaysSignOff": true, "git.untrackedChanges": "separate", diff --git a/.vscode/tasks.json b/.vscode/tasks.json index 0fdf1b92..50cac519 100644 --- a/.vscode/tasks.json +++ b/.vscode/tasks.json @@ -10,6 +10,7 @@ "env": { "PATH": "${env:HOME}/.local/bin:${env:PATH}", "CUDACXX": "/usr/local/cuda/bin/nvcc", + "CMAKE_BUILD_PARALLEL_LEVEL": "${env:CMAKE_BUILD_PARALLEL_LEVEL}", } }, "presentation": { diff --git a/CMakeLists.txt b/CMakeLists.txt index 47bd06f8..2ced8e43 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -159,6 +159,11 @@ if(HOLOSCAN_BUILD_GXF_EXTENSIONS) add_subdirectory(gxf_extensions) endif() +# ############################################################################## +# # Configure scripts +# ############################################################################## +add_subdirectory(scripts) + # ############################################################################## # # Package project # ############################################################################## @@ -192,6 +197,7 @@ list(APPEND HOLOSCAN_INSTALL_TARGETS op_video_stream_recorder op_video_stream_replayer op_v4l2 + spdlog_logger ) if(HOLOSCAN_BUILD_LIBTORCH) @@ -250,17 +256,37 @@ install(FILES ${${HOLOSCAN_PACKAGE_NAME}_BINARY_DIR}/include/holoscan/version_co # Install GXF install(DIRECTORY - ${GXF_core_INCLUDE_DIR}/common - ${GXF_core_INCLUDE_DIR}/gxf/core - ${GXF_core_INCLUDE_DIR}/gxf/cuda - ${GXF_core_INCLUDE_DIR}/gxf/multimedia - ${GXF_core_INCLUDE_DIR}/gxf/network - ${GXF_core_INCLUDE_DIR}/gxf/npp - ${GXF_core_INCLUDE_DIR}/gxf/serialization - ${GXF_core_INCLUDE_DIR}/gxf/std + ${GXF_INCLUDE_DIR}/common + ${GXF_INCLUDE_DIR}/gxf/app + ${GXF_INCLUDE_DIR}/gxf/core + ${GXF_INCLUDE_DIR}/gxf/cuda + ${GXF_INCLUDE_DIR}/gxf/logger + ${GXF_INCLUDE_DIR}/gxf/multimedia + ${GXF_INCLUDE_DIR}/gxf/serialization + ${GXF_INCLUDE_DIR}/gxf/std + ${GXF_INCLUDE_DIR}/gxf/ucx DESTINATION "include/gxf" COMPONENT "holoscan-gxf_libs" ) +foreach(_component ${HOLOSCAN_GXF_COMPONENTS}) + string(TOUPPER "${CMAKE_BUILD_TYPE}" _build_type) + get_target_property(GXF_${_component}_LOCATION GXF::${_component} IMPORTED_LOCATION_${_build_type}) + if(NOT GXF_${_component}_LOCATION) + get_target_property(GXF_${_component}_LOCATION GXF::${_component} IMPORTED_LOCATION) + endif() + if("${_component}" STREQUAL "gxe") + install(FILES ${HOLOSCAN_GXE_LOCATION} + DESTINATION "bin" + PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE + COMPONENT "holoscan-gxf_libs" + ) + else() + install(FILES "${GXF_${_component}_LOCATION}" + DESTINATION ${HOLOSCAN_INSTALL_LIB_DIR} + COMPONENT "holoscan-gxf_libs" + ) + endif() +endforeach() # Install CMake script to build GXE applications install(FILES "${CMAKE_SOURCE_DIR}/cmake/modules/GenerateGXEAppInstall.cmake" @@ -286,21 +312,6 @@ DESTINATION "${HOLOSCAN_INSTALL_LIB_DIR}/cmake/holoscan" COMPONENT "holoscan-core" ) -install(FILES "${CMAKE_SOURCE_DIR}/scripts/download_ngc_data" - "${CMAKE_SOURCE_DIR}/scripts/convert_video_to_gxf_entities.py" - "${CMAKE_SOURCE_DIR}/scripts/gxf_entity_codec.py" -DESTINATION "${HOLOSCAN_INSTALL_LIB_DIR}/cmake/holoscan" -PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ -COMPONENT "holoscan-core" -) - -# Install CMake script to download example data from NGC -install(FILES "${CMAKE_SOURCE_DIR}/scripts/download_example_data" -DESTINATION "examples" -PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ -COMPONENT "holoscan-core" -) - # Define docs and hooks set(holoscan_doc_string [=[ libholoscan: Holoscan SDK C++ API @@ -316,19 +327,30 @@ if(NOT TARGET fmt::fmt-header-only) add_library(fmt::fmt-header-only INTERFACE IMPORTED) endif() -set(_GXFlibs core std multimedia cuda network npp serialization behavior_tree) +set(_GXF_components @HOLOSCAN_GXF_COMPONENTS@) -foreach(gxflib IN LISTS _GXFlibs) - if(NOT TARGET GXF::${gxflib}) - add_library(GXF::${gxflib} SHARED IMPORTED) - set_target_properties(GXF::${gxflib} PROPERTIES - IMPORTED_LOCATION "${PACKAGE_PREFIX_DIR}/lib/libgxf_${gxflib}.so" +foreach(gxf_component IN LISTS _GXF_components) + if(NOT TARGET GXF::${gxf_component} AND NOT (${gxf_component} STREQUAL "gxe")) + add_library(GXF::${gxf_component} SHARED IMPORTED) + set_target_properties(GXF::${gxf_component} PROPERTIES + IMPORTED_LOCATION "${PACKAGE_PREFIX_DIR}/lib/libgxf_${gxf_component}.so" IMPORTED_NO_SONAME ON INTERFACE_INCLUDE_DIRECTORIES "${PACKAGE_PREFIX_DIR}/include;${PACKAGE_PREFIX_DIR}/include/gxf" ) endif() endforeach() +if(TARGET GXF::ucx) + # GXF UCX classes publicly depend on UCX headers. + # Workaround to include those headers without explicitly providing UCX targets. + # http://cdash.nvidia.com/viewBuildError.php?buildid=4461 + set_property( + TARGET GXF::ucx + APPEND PROPERTY + INTERFACE_INCLUDE_DIRECTORIES "${PACKAGE_PREFIX_DIR}/include/3rdparty/ucx" + ) +endif() + if(NOT TARGET GXF::gxe) add_executable(GXF::gxe IMPORTED) set_target_properties(GXF::gxe PROPERTIES @@ -342,6 +364,7 @@ set(GXF_EXTENSIONS_DIR "${PACKAGE_PREFIX_DIR}/lib/gxf_extensions") list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}") ]=]) +string(CONFIGURE "${holoscan_install_hook_code_string}" holoscan_install_hook_code_string @ONLY) set(holoscan_build_hook_code_string [=[ ]=]) diff --git a/DEVELOP.md b/DEVELOP.md new file mode 100644 index 00000000..df13cc4d --- /dev/null +++ b/DEVELOP.md @@ -0,0 +1,217 @@ +# Developer Resources + +This document aims to guide users with recommended and advanced workflows to build and use Holoscan SDK. This is generally not the simplest way to use the SDK, so make sure to review the [project README](../README.md) before getting started. + +> **⚠️ Disclaimer**: we only recommend building the SDK from source if you are a developer of the SDK, or need to build the SDK with debug symbols or other options not used as part of the published packages. +> - If you want to write your own operator or application, you can use the SDK as a dependency (and contribute to [HoloHub](https://github.com/nvidia-holoscan/holohub)). +> - If you need to make other modifications to the SDK, [file a feature or bug request](https://forums.developer.nvidia.com/c/healthcare/holoscan-sdk/320/all). +> - Refer to the [Holoscan SDK User Guide installation instructions](https://docs.nvidia.com/holoscan/sdk-user-guide/sdk_installation.html#install-the-sdk) for guidance on installing Holoscan SDK from published packages. + +## Table of Contents + +- [Building the SDK from source](#building-the-sdk-from-source) + - [Prerequisites](#prerequisites) + - [(Recommended) using the `run` script](#recommended-using-the-run-script) + - [Cross-compilation](#cross-compilation) + - [(Advanced) Docker + CMake](#advanced-docker--cmake) + - [(Advanced) Local environment + CMake](#advanced-local-environment--cmake) +- [Runtime Container](#runtime-container) +- [Utilities](#utilities) + - [Testing](#testing) + - [Linting](#linting) + - [VSCode](#vscode) + +## Building the SDK from source + +### Prerequisites + +- Prerequisites for each supported platform are documented in [the user guide](https://docs.nvidia.com/holoscan/sdk-user-guide/sdk_installation.html#prerequisites). +- To build and run the SDK in a containerized environment (recommended) you'll need: + - the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html) v1.12.2+ + - [Docker](https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository), including the buildx plugin (`docker-buildx-plugin`) + +### (Recommended) Using the `run` script + +Call **`./run build`** within the repository to build the build container and the CMake project. + +- *If you encounter errors during the CMake build, you can execute `./run clear_cache` to remove cache/build/install folders* +- *Execute `./run build --help` for more information* +- *Execute `./run build --dryrun` to see the commands that will be executed* +- *That command can be broken-up in more granular commands also:* + + ```sh + ./run check_system_deps # ensure the system is properly configured for building + ./run build_image # create the build Docker container + ./run build # run the CMake configuration, build, and install steps + ``` + +Call the **`./run launch`** command to start and enter the build container. + +- *You can run from the `install` or `build` tree by passing the working directory as an argument (ex: `./run launch install`)* +- *Execute `./run launch --help` for more information* +- *Execute `./run launch --dryrun` to see the commands that will be executed* +- *Execute `./run launch --run-cmd "..."` to execute a bash command directly in the container* + +Run the [**examples**](./examples#readme) inside the container by running their respective commands listed within each directory README file. + +### Cross-compilation + +While the Dockerfile to build the SDK does not currently support true cross-compilation, you can compile the Holoscan SDK for the developer kits (arm64) from a x86_64 host using an emulation environment. + +1. [Install qemu](https://github.com/multiarch/qemu-user-static) +2. Clear your build cache: `./run clear_cache` +3. Rebuild for `linux/arm64` using `--arch|-a` or `HOLOSCAN_BUILD_ARCH`: + - `./run build --arch arm64` + - `HOLOSCAN_BUILD_ARCH=arm64 ./run build` + +You can then copy the `install` folder generated by CMake to a developer kit with a configured environment or within a container to use for running and developing applications. + +### (Advanced) Docker + CMake + +The [`run`](./run) script mentioned above is helpful to understand how Docker and CMake are configured and run, as commands will be printed when running it or using `--dryrun`. +We recommend looking at those commands if you want to use Docker and CMake manually, and reading the comments inside the script for details about each parameter (specifically the `build()` and `launch()` methods). + +### (Advanced) Local environment + CMake + +> **⚠️ Disclaimer**: this method of building the SDK is not actively tested or maintained. Instructions below might go out of date. + +#### Software Requirements + +To build the Holoscan SDK on a local environment, the following versions of dev dependencies are needed (or tested). The last column refers to the stage (`FROM`) in the [Dockerfile](./Dockerfile) where respective commands can be found to build/install these dependencies. + +| Dependency | Min version | Needed by | Dockerfile stage | +|---|---|---|---| +| CUDA | 12.2 | Core SDK | base | +| gRPC | 1.54.2 | Core SDK | grpc-builder | +| UCX | 1.15.0 | Core SDK | ucx-builder | +| GXF | 3.1 | Core SDK | gxf-downloader | +| MOFED | 23.07 | ConnectX | mofed-installer | +| TensorRT | 8.6.1 | Inference operator | base | +| ONNX Runtime | 1.15.1 | Inference operator | onnxruntime-downloader | +| LibTorch | 2.1.0 | Inference operator
(torch plugin) | torch-downloader-[x86_64\|arm64] | +| TorchVision | 0.16.0 | Inference operator
(torch plugin) | torchvision-downloader-[x86_64\|arm64] | +| Vulkan SDK | 1.3.216 | Holoviz operator | vulkansdk-builder | +| Vulkan loader and
validation layers | 1.3.204 | Holoviz operator | dev | +| spirv-tools | 2022.1 | Holoviz operator | dev | +| V4L2 | 1.22.1 | V4L2 operator | dev | +| CMake | 3.24.0 | Build process | build-tools | +| Patchelf | N/A | Build process | build-tools | + +Note: refer to the [Dockerfile](./Dockerfile) for other dependencies which are not needed to build, but might be needed for: + +- runtime (openblas/mkl for torch, egl for headless rendering, cloudpickle for distributed python apps, cupy for some examples...) +- testing (valgrind, pytest, xvfb...) +- utilities (v4l-utils, ...) + +For CMake to find these dependencies, install them in default system paths, or pass `CMAKE_PREFIX_PATH`, `CMAKE_LIBRARY_PATH`, and/or `CMAKE_INCLUDE_PATH` during configuration. + +#### Build example + +```sh +# Configure +cmake -S $source_dir -B $build_dir \ + -G Ninja \ + -D CMAKE_BUILD_TYPE=Release \ + -D CUDAToolkit_ROOT:PATH="/usr/local/cuda" + +# Build +cmake --build $build_dir -j + +# Install +cmake --install $build_dir --prefix $install_dir +``` + +The commands to run the [**examples**](./examples#readme) are then the same as in the dockerized environment, and can be found in the respective source directory READMEs. + +## Runtime Container + +There are multiple containers associated with Holoscan: + +- The **build** container generated by the [top-level Dockerfile](./Dockerfile) is designed to pull dependencies to build and test the SDK itself. The image does not contain the SDK itself, as it is mounted with during `docker run` to run the cmake build or run tests. +- The **development** container available at [NGC | Holoscan Container](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/clara-holoscan/containers/holoscan/tags) which includes all the development tools and libraries needed to *build* Holoscan applications. + - This image is ~13 GB when uncompressed. However, once a Holoscan application is created, it does not need all those same development tools just to *run* an application. +- To address this, a **runtime** container can now be generated with the [runtime_docker/Dockerfile](./runtime_docker/Dockerfile) which contains only the runtime dependencies of the Holoscan SDK. + - This Dockerfile is based on the [CUDA-base](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/cuda) image, which begins with Ubuntu:22.04 and installs the CUDA runtime and Compat package. + - This image is ~8.7 GB on x86_64, and can be further reduced based on use cases (see below). + +> ⚠️ Disclaimer: Currently iGPU is not supported by the runtime container + +### Generate the runtime container + +The [`run`](./run) script contains the command `build_run_image` to build the runtime Holoscan SDK image: + +```bash +./run build_run_image +``` + +Once this image is built, it can be run exactly as the Holoscan development container on NGC is. Simply follow the 'Running the container' instructions beginning at step #3 at [NGC | Holoscan Container](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/clara-holoscan/containers/holoscan), but replace `${NGC_CONTAINER_IMAGE_PATH}` by `holoscan-sdk-run-[-]` in step #4 (name outputted at the end of the above command). + +### Further Reducing Runtime Size + +If you have a specific application you wish to deploy, you can further reduce this runtime image size in two ways: + +1. **Targeting different stages of the [runtime Dockerfile](./runtime_docker/Dockerfile)**. + 1. add `--cpp` to the command above to not pull in python dependencies. + 2. add `--cpp-no-mkl` to the command above to not pull in MKL (x86_64-only libtorch dependency) in addition to the above. + +2. **Modifying the Dockerfile** + +The [runtime Dockerfile](./runtime_docker/Dockerfile) is thoroughly documented to indicate which dependency is used by which component of the Holoscan SDK. If you do not use some of these components (ex: Torch inference backend, ONNX Runtime inference backend, TensorRT inference backend, Python/Cupy, format_converter operator, etc...), comment out the appropriate line in the Dockerfile and run the build command above. + +## Utilities + +Some utilities are available in the [`scripts`](./scripts) folder, others closer to the built process are listed below: + +### Testing + +Existing tests are using GTest for C++ and pytest for Python, and can be found under [tests](./tests/) and [python/tests](./python/tests/) respectively. The Holoscan SDK uses CTest as a framework to build and execute these tests. + +Run the tests using the following command: + +```sh +./run test +``` + +> Note: Run `run test --help` to see additional options. + +### Linting + +Run the following command to run various linting tools on the repository: + +```sh +./run lint # optional: specify directories +``` + +> Note: Run `run lint --help` to see the list of tools that are used. If a lint command fails due to a missing module or executable on your system, you can install it using `python3 -m pip install `. + +### Building the User Guide + +The source of the user guide hosted at is located in [docs](./docs/). It can be built with the following commands: + +- PDF: `./run build_pdf` +- HTML: `./run build_html` (auto-reload: `./run live_html`) + +Run `./run help` for more commands related to the user guide documentation. + +### VSCode + +Visual Studio Code can be utilized to develop the Holoscan SDK. The `.devcontainer` folder holds the configuration for setting up a [development container](https://code.visualstudio.com/docs/remote/containers) with all necessary tools and libraries installed. + +The `./run` script contains `vscode` and `vscode_remote` commands for launching Visual Studio Code in a container or from a remote machine, respectively. + +- To launch Visual Studio Code in a dev container, use `./run vscode`. +- To attach to an existing dev container from a remote machine, use `./run vscode_remote`. For more information, refer to the instructions from `./run vscode_remote -h`. + +Once Visual Studio Code is launched, the development container will be built and the recommended extensions will be installed automatically, along with CMake being configured. + +#### Configuring CMake in the Development Container + +For manual configuration of CMake, open the command palette (`Ctrl + Shift + P`) and run the `CMake: Configure` command. + +#### Building the Source Code in the Development Container + +The source code in the development container can be built by either pressing `Ctrl + Shift + B` or executing `Tasks: Run Build Task` from the command palette (`Ctrl + Shift + P`). + +#### Debugging the Source Code in the Development Container + +To debug the source code in the development container, open the `Run and Debug` view (`Ctrl + Shift + D`), select a debug configuration from the dropdown list, and press `F5` to initiate debugging. diff --git a/Dockerfile b/Dockerfile index c007ace2..df8f1971 100644 --- a/Dockerfile +++ b/Dockerfile @@ -18,14 +18,16 @@ ############################################################ # Versions ############################################################ -ARG ONNX_RUNTIME_VERSION=1.15.1 +# Dependencies ending in _YY.MM are built or extracted from +# the TensorRT or PyTorch NGC containers of that same version +ARG ONNX_RUNTIME_VERSION=1.15.1_23.08 ARG LIBTORCH_VERSION=2.1.0_23.08 ARG TORCHVISION_VERSION=0.16.0_23.08 ARG VULKAN_SDK_VERSION=1.3.216.0 ARG GRPC_VERSION=1.54.2 ARG UCX_VERSION=1.15.0 -ARG GXF_VERSION=3.1_20240103_6bf4fcd2 -ARG MOFED_VERSION=23.07-0.5.1.2 +ARG GXF_VERSION=4.0_20240409_bc03d9d +ARG MOFED_VERSION=23.10-2.1.3.1 ############################################################ # Base image @@ -82,8 +84,9 @@ ARG ONNX_RUNTIME_VERSION # note: built with CUDA and TensorRT providers WORKDIR /opt/onnxruntime RUN curl -S -L -# -o ort.tgz \ - https://edge.urm.nvidia.com/artifactory/sw-holoscan-thirdparty-generic-local/onnxruntime/onnxruntime-${ONNX_RUNTIME_VERSION}-cuda-12.1-$(uname -m).tar.gz -RUN tar -xf ort.tgz --strip-components 1 + https://edge.urm.nvidia.com/artifactory/sw-holoscan-thirdparty-generic-local/onnxruntime/onnxruntime-${ONNX_RUNTIME_VERSION}-cuda-12.2-$(uname -m).tar.gz +RUN mkdir -p ${ONNX_RUNTIME_VERSION} +RUN tar -xf ort.tgz -C ${ONNX_RUNTIME_VERSION} --strip-components 2 ############################################################ # Libtorch @@ -172,7 +175,7 @@ ARG MOFED_VERSION # only dependencies in the `MOFED_DEPS` variable (parsing the output of `--check-deps-only`) to # remove them in that same layer, to ensure they are not propagated in the final image. WORKDIR /opt/nvidia/mofed -ARG MOFED_INSTALL_FLAGS="--upstream-libs --dpdk --with-mft --user-space-only --force --without-fw-update" +ARG MOFED_INSTALL_FLAGS="--dpdk --with-mft --user-space-only --force --without-fw-update" RUN UBUNTU_VERSION=$(cat /etc/lsb-release | grep DISTRIB_RELEASE | cut -d= -f2) \ && OFED_PACKAGE="MLNX_OFED_LINUX-${MOFED_VERSION}-ubuntu${UBUNTU_VERSION}-$(uname -m)" \ && curl -S -# -o ${OFED_PACKAGE}.tgz -L \ @@ -227,7 +230,7 @@ RUN patchelf --set-rpath '$ORIGIN/../lib' bin/* ############################################################ # GXF ############################################################ -FROM base as gxf-downloader +FROM base as gxf-builder ARG GXF_VERSION WORKDIR /opt/nvidia/gxf @@ -235,7 +238,7 @@ RUN if [ $(uname -m) = "aarch64" ]; then ARCH=arm64; else ARCH=x86_64; fi \ && curl -S -# -L -o gxf.tgz \ https://edge.urm.nvidia.com/artifactory/sw-holoscan-thirdparty-generic-local/gxf/gxf_${GXF_VERSION}_holoscan-sdk_${ARCH}.tar.gz RUN mkdir -p ${GXF_VERSION} -RUN tar -xzf gxf.tgz -C ${GXF_VERSION} --strip-components 1 +RUN tar xzf gxf.tgz -C ${GXF_VERSION} --strip-components 1 ############################################################ # Build image (final) @@ -304,9 +307,21 @@ ENV LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:${UCX}/lib" # Copy GXF ARG GXF_VERSION ENV GXF=/opt/nvidia/gxf/${GXF_VERSION} -COPY --from=gxf-downloader ${GXF} ${GXF} +COPY --from=gxf-builder ${GXF} ${GXF} ENV CMAKE_PREFIX_PATH="${CMAKE_PREFIX_PATH}:${GXF}" +# Setup Docker & NVIDIA Container Toolkit's apt repositories to enable DooD +# for packaging & running applications with the CLI +# Ref: Docker installation: https://docs.docker.com/engine/install/ubuntu/ +# DooD (Docker-out-of-Docker): use the Docker (or Moby) CLI in your dev container to connect to +# your host's Docker daemon by bind mounting the Docker Unix socket. +RUN install -m 0755 -d /etc/apt/keyrings \ + && curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg \ + && chmod a+r /etc/apt/keyrings/docker.gpg \ + && echo "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ + "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \ + tee /etc/apt/sources.list.d/docker.list > /dev/null + # APT INSTALLS # valgrind - static analysis # xvfb - testing on headless systems @@ -319,6 +334,8 @@ ENV CMAKE_PREFIX_PATH="${CMAKE_PREFIX_PATH}:${GXF}" # v4l-utils - V4L2 operator utility # libpng-dev - torchvision dependency # libjpeg-dev - torchvision dependency +# docker-ce-cli - enable Docker DooD for CLI +# docker-buildx-plugin - enable Docker DooD for CLI RUN apt-get update \ && apt-get install --no-install-recommends -y \ valgrind="1:3.18.1-*" \ @@ -338,6 +355,8 @@ RUN apt-get update \ v4l-utils="1.22.1-*" \ libpng-dev="1.6.37-*" \ libjpeg-turbo8-dev="2.1.2-*" \ + docker-ce-cli="5:25.0.3-*" \ + docker-buildx-plugin="0.12.1-*" \ && rm -rf /var/lib/apt/lists/* # PIP INSTALLS @@ -345,14 +364,16 @@ RUN apt-get update \ # requirements.dev.txt # coverage - test coverage of python tests # pytest* - testing -# requirements.txt +# requirements +# pip - 20.3+ needed for PEP 600 +# cupy-cuda - dependency for holoscan python + examples # cloudpickle - dependency for distributed apps # python-on-whales - dependency for holoscan CLI # Jinja2 - dependency for holoscan CLI # packaging - dependency for holoscan CLI # pyyaml - dependency for holoscan CLI # requests - dependency for holoscan CLI -# cupy-cuda - dependency for holoscan python + examples +# psutil - dependency for holoscan CLI RUN if [ $(uname -m) = "x86_64" ]; then \ python3 -m pip install --no-cache-dir \ mkl==2021.1.1 \ @@ -362,6 +383,35 @@ RUN if [ $(uname -m) = "x86_64" ]; then \ # This can be removed once upgrading to an MKL pip wheel that fixes the symlinks find /usr/local/lib -maxdepth 1 -type f -regex '.*\/lib\(tbb\|mkl\).*\.so\(\.[0-9]+\.[0-9]+\)?' -exec rm -v {} +; \ fi -COPY python/requirements.dev.txt /tmp -COPY python/requirements.txt /tmp +COPY python/requirements.dev.txt /tmp/requirements.dev.txt +COPY python/requirements.txt /tmp/requirements.txt RUN python3 -m pip install --no-cache-dir -r /tmp/requirements.dev.txt -r /tmp/requirements.txt + +# Creates a home directory for docker-in-docker to store files temporarily in the container, +# necessary when running the holoscan CLI packager +ENV HOME=/home/holoscan +RUN mkdir -p $HOME && chmod 777 $HOME + +############################################################################################ +# Extra stage: igpu build image +# The iGPU CMake build depends on libnvcudla.so as well as libnvdla_compiler.so, which are +# part of the L4T BSP. As such, they should not be in the container, but mounted at runtime +# (which the nvidia container runtime handles). However, we need the symbols at build time +# for the TensorRT libraries to resolve. Since there is no stub library (unlike libcuda.so), +# we need to include them in our builder. We use a separate stage so that `run build` can +# use it if needed, but `run launch` (used to run apps in the container) doesn't need to. +############################################################################################ +FROM build as build-igpu +ARG GPU_TYPE +RUN if [ ${GPU_TYPE} = "igpu" ]; then \ + tmp_dir=$(mktemp -d) \ + && curl -S -# -L -o $tmp_dir/l4t_core.deb \ + https://repo.download.nvidia.com/jetson/t234/pool/main/n/nvidia-l4t-core/nvidia-l4t-core_36.1.0-20231206095146_arm64.deb \ + && curl -S -# -L -o $tmp_dir/l4t_cuda.deb \ + https://repo.download.nvidia.com/jetson/t234/pool/main/n/nvidia-l4t-cuda/nvidia-l4t-cuda_36.1.0-20231206095146_arm64.deb \ + && curl -S -# -L -o $tmp_dir/l4t_dla.deb \ + https://repo.download.nvidia.com/jetson/common/pool/main/n/nvidia-l4t-dla-compiler/nvidia-l4t-dla-compiler_36.1.0-20231206095146_arm64.deb \ + && dpkg -x $tmp_dir/l4t_core.deb / \ + && dpkg -x $tmp_dir/l4t_cuda.deb / \ + && dpkg -x $tmp_dir/l4t_dla.deb /; \ + fi diff --git a/FAQ.md b/FAQ.md new file mode 100644 index 00000000..00f7ef0e --- /dev/null +++ b/FAQ.md @@ -0,0 +1,74 @@ +## Troubleshooting the SDK + +### X11: Failed to open display :0 [...] Failed to initialize GLFW + +Enable permissions to your X server from Docker, either: + +- Passing `-u $(id -u):$(id -g)` to `docker run`, or +- Running `xhost +local:docker` on your host + +### GLX: Failed to create context: GLXBadFBConfig + +You may encounter the error message if the Holoscan Application runs on a Virtual Machine (by a Cloud Service Provider) or without a physical display attached. If you want to run applications that use GPU on x11 (e.g., VNC or NoMachine), the following environment variables need to be set before executing the application to offload the rendering to GPU. + +```sh +export __NV_PRIME_RENDER_OFFLOAD=1 +export __GLX_VENDOR_LIBRARY_NAME=nvidia +``` + +### `GXF_ENTITY_COMPONENT_NOT_FOUND` or `GXF_ENTITY_NOT_FOUND` + +Ensure all your application connections in the yaml file (`nvidia::gxf::Connection`) refer to entities or components defined within. This can occur when attempting to remove a component and not cleaning up the stale connections. + +### No receiver connected to transmitter of of entity . The entity will never tick + +Ensure your entity or component is not an orphan, but is connected to a `nvidia::gxf::Connection`. + +### AJA device errors + +These errors indicate that you don't have AJA support in your environment. + +```sh +2022-06-09 18:45:13.826 ERROR gxf_extensions/aja/aja_source.cpp@80: Device 0 not found. +2022-06-09 18:45:13.826 ERROR gxf_extensions/aja/aja_source.cpp@251: Failed to open device 0 +``` + +Double check that you have installed the AJA ntv2 driver, loaded the driver after every reboot, and that you have specified `--device /dev/ajantv20:/dev/ajantv20` in the `docker run` command if you’re running a docker container. + +### GXF format converter errors + +These errors may indicate that you need to reconfigure your format converter's num_block number. + +```sh +2022-06-09 18:47:30.769 ERROR gxf_extensions/format_converter/format_converter.cpp@521: Failed to allocate memory for the channel conversion +2022-06-09 18:47:30.769 ERROR gxf_extensions/format_converter/format_converter.cpp@359: Failed to convert tensor format (conversion type:6) +``` + +Try increasing the current num_block number by 1 in the yaml file for all format converter entities. This may happen if your yaml file was configured for running with RDMA and you have decided to disable RDMA. + +### Video device error + +Some of those errors may occur when running the V4L2 codelet: + +``` +Failed to open device, OPEN: No such file or directory +``` + +Ensure you have a video device connected (ex: USB webcam) and listed when running `ls -l /dev/video*`. + +``` +Failed to open device, OPEN: Permission denied +``` + +This means the `/dev/video*` device is not available to the user from within docker. Give `--group-add video` to the `docker run` command. + +### HolovizOp fails on hybrid GPU systems with non-NVIDIA integrated GPU and NVIDIA discrete GPU + +You may encounter an error when trying to run the Holoviz operator on a laptop equipped with an integrated and a discrete GPU. By default these systems will be using the integrated GPU when running an application. The integrated GPU does not provide the capabilities the Holoviz operator needs and the operator will fail. + +The following environment variables need to be set before executing the application to offload the rendering to the discrete GPU. See [PRIME Render Offload](https://download.nvidia.com/XFree86/Linux-x86_64/535.54.03/README/primerenderoffload.html) for more information. + +```sh +export __NV_PRIME_RENDER_OFFLOAD=1 +export __GLX_VENDOR_LIBRARY_NAME=nvidia +``` \ No newline at end of file diff --git a/NOTICE.txt b/NOTICE.txt index c17f2040..7d02d28c 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -30,6 +30,14 @@ Licensed under MIT (https://github.com/ocornut/imgui/blob/master/LICENSE.txt) DLPack (https://github.com/dmlc/dlpack) Licensed under Apache-2.0 (https://github.com/dmlc/dlpack/blob/v0.7/LICENSE) +docker-buildx-plugin (https://github.com/docker/buildx) +Copyright 2013-2017 Docker, Inc. +Licensed under Apache-2.0 (https://github.com/docker/buildx/blob/master/LICENSE) + +docker-ce-cli (https://github.com/docker/cli/) +Copyright 2013-2017 Docker, Inc. +Licensed under Apache-2.0 (https://github.com/docker/cli/blob/master/LICENSE) + expected (https://github.com/TartanLlama/expected) Licensed under CC0-1.0 (https://github.com/TartanLlama/expected/blob/v1.1.0/COPYING) @@ -96,6 +104,10 @@ Copyright (c) 1991-2020 Guido Vollbeding. All Rights Reserved. Copyright (c) 2010 Nokia Corporation Licensed under JPEG (https://github.com/libjpeg-turbo/libjpeg-turbo/blob/main/LICENSE.md) +libnuma (https://github.com/numactl/numactl) +Copyright (C) 1991, 1999 Free Software Foundation, Inc. +Licensed under LGPLv2.1 (https://github.com/numactl/numactl/blob/master/LICENSE.LGPL2.1) + libpng (https://packages.ubuntu.com/jammy/libpng-dev) Copyright (c) 1995-2023 The PNG Reference Library Authors. Copyright (c) 2018-2023 Cosmin Truta. @@ -132,6 +144,10 @@ libvulkan1 (https://packages.ubuntu.com/jammy/libvulkan1) 2015-2016 LunarG, Inc Licensed under Apache-2.0 (http://changelogs.ubuntu.com/changelogs/pool/main/v/vulkan-loader/vulkan-loader_1.3.204.1-2/copyright) +magic_enum (https://github.com/Neargye/magic_enum) +Copyright (c) 2019 - 2023 Daniil Goncharov +Licensed under MIT (https://github.com/Neargye/magic_enum/blob/v0.9.3/LICENSE) + Intel® oneAPI Math Kernel Library (https://pypi.org/project/mkl/2021.1.1/) Copyright (c) Intel Corporation Licensed under ISSL (https://www.intel.com/content/www/us/en/developer/articles/license/end-user-license-agreement.html#intel-simplified-software-license) diff --git a/README.md b/README.md index 5ed4e787..32136618 100644 --- a/README.md +++ b/README.md @@ -2,302 +2,55 @@ The **Holoscan SDK** is part of [NVIDIA Holoscan](https://developer.nvidia.com/holoscan-sdk), the AI sensor processing platform that combines hardware systems for low-latency sensor and network connectivity, optimized libraries for data processing and AI, and core microservices to run streaming, imaging, and other applications, from embedded to edge to cloud. It can be used to build streaming AI pipelines for a variety of domains, including Medical Devices, High Performance Computing at the Edge, Industrial Inspection and more. -> In previous releases, the prefix [`Clara`](https://developer.nvidia.com/industries/healthcare) was used to define Holoscan as a platform designed initially for [medical devices](https://www.nvidia.com/en-us/clara/developer-kits/). As Holoscan has grown, its potential to serve other areas has become apparent. With version 0.4.0, we're proud to announce that the Holoscan SDK is now officially built to be domain-agnostic and can be used to build sensor AI applications in multiple domains. Note that some of the content of the SDK (sample applications) or the documentation might still appear to be healthcare-specific pending additional updates. Going forward, domain specific content will be hosted on the [HoloHub](https://nvidia-holoscan.github.io/holohub) repository. - ## Table of Contents - [Getting Started](#getting-started) -- [Building the SDK from source](#building-the-sdk-from-source) - - [Prerequisites](#prerequisites) - - [(Recommended) using the `run` script](#recommended-using-the-run-script) - - [Cross-compilation](#cross-compilation) - - [(Advanced) Docker + CMake](#advanced-docker--cmake) - - [(Advanced) Local environment + CMake](#advanced-local-environment--cmake) -- [Runtime Container](#runtime-container) -- [Utilities](#utilities) - - [Testing](#testing) - - [Linting](#linting) - - [VSCode](#vscode) -- [Troubleshooting](#troubleshooting) -- [Repository structure](#repository-structure) +- [Obtaining the Holoscan SDK](#obtaining-the-holoscan-sdk) +- [Troubleshooting and Feedback](#troubleshooting-and-feedback) +- [Additional Notes](#additional-notes) ## Getting Started Visit the Holoscan User Guide to get started with the Holoscan SDK: -## Building the SDK from source - -> **⚠️ Disclaimer**: we only recommend building the SDK from source if you are a developer of the SDK, or need to build the SDK with debug symbols or other options not used as part of the published packages. If you want to write your own operator or application, you can use the SDK as a dependency (and contribute to [HoloHub](https://github.com/nvidia-holoscan/holohub)). If you need to make other modifications to the SDK, [file a feature or bug request](https://forums.developer.nvidia.com/c/healthcare/holoscan-sdk/320/all). If that's not the case, prefer installing the SDK from [published packages](https://docs.nvidia.com/holoscan/sdk-user-guide/sdk_installation.html#install-the-sdk). - -### Prerequisites - -- Prerequisites for each supported platform are documented in [the user guide](https://docs.nvidia.com/holoscan/sdk-user-guide/sdk_installation.html#prerequisites). -- To build and run the SDK in a containerized environment (recommended) you'll need: - - the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html) v1.12.2+ - - [Docker](https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository), including the buildx plugin (`docker-buildx-plugin`) - -### (Recommended) Using the `run` script - -Call **`./run build`** within the repository to build the build container and the CMake project. - -- *If you encounter errors during the CMake build, you can execute `./run clear_cache` to remove cache/build/install folders* -- *Execute `./run build --help` for more information* -- *Execute `./run build --dryrun` to see the commands that will be executed* -- *That command can be broken-up in more granular commands also:* - - ```sh - ./run check_system_deps # ensure the system is properly configured for building - ./run build_image # create the build Docker container - ./run build # run the CMake configuration, build, and install steps - ``` - -Call the **`./run launch`** command to start and enter the build container. - -- *You can run from the `install` or `build` tree by passing the working directory as an argument (ex: `./run launch install`)* -- *Execute `./run launch --help` for more information* -- *Execute `./run launch --dryrun` to see the commands that will be executed* -- *Execute `./run launch --run-cmd "..."` to execute a bash command directly in the container* - -Run the [**examples**](./examples#readme) inside the container by running their respective commands listed within each directory README file. - -### Cross-compilation - -While the Dockerfile to build the SDK does not currently support true cross-compilation, you can compile the Holoscan SDK for the developer kits (arm64) from a x86_64 host using an emulation environment. - -1. [Install qemu](https://github.com/multiarch/qemu-user-static) -2. Clear your build cache: `./run clear_cache` -3. Rebuild for `linux/arm64` using `--arch|-a` or `HOLOSCAN_BUILD_ARCH`: - - `./run build --arch arm64` - - `HOLOSCAN_BUILD_ARCH=arm64 ./run build` - -You can then copy the `install` folder generated by CMake to a developer kit with a configured environment or within a container to use for running and developing applications. - -### (Advanced) Docker + CMake - -The [`run`](./run) script mentioned above is helpful to understand how Docker and CMake are configured and run, as commands will be printed when running it or using `--dryrun`. -We recommend looking at those commands if you want to use Docker and CMake manually, and reading the comments inside the script for details about each parameter (specifically the `build()` and `launch()` methods). - -### (Advanced) Local environment + CMake - -> **⚠️ Disclaimer**: this method of building the SDK is not actively tested or maintained. Instructions below might go out of date. - -#### Dependencies - -To build the Holoscan SDK on a local environment, the following versions of dev dependencies are needed (or tested). The last column refers to the stage (`FROM`) in the [Dockerfile](./Dockerfile) where respective commands can be found to build/install these dependencies. - -| Dependency | Min version | Needed by | Dockerfile stage | -|---|---|---|---| -| CUDA | 12.2 | Core SDK | base | -| gRPC | 1.54.2 | Core SDK | grpc-builder | -| UCX | 1.15.0 | Core SDK | ucx-builder | -| GXF | 3.1 | Core SDK | gxf-downloader | -| MOFED | 23.07 | ConnectX | mofed-installer | -| TensorRT | 8.6.1 | Inference operator | base | -| ONNX Runtime | 1.15.1 | Inference operator | onnxruntime-downloader | -| LibTorch | 2.1.0 | Inference operator
(torch plugin) | torch-downloader-[x86_64\|arm64] | -| TorchVision | 0.16.0 | Inference operator
(torch plugin) | torchvision-downloader-[x86_64\|arm64] | -| Vulkan SDK | 1.3.216 | Holoviz operator | vulkansdk-builder | -| Vulkan loader and
validation layers | 1.3.204 | Holoviz operator | dev | -| spirv-tools | 2022.1 | Holoviz operator | dev | -| V4L2 | 1.22.1 | V4L2 operator | dev | -| CMake | 3.24.0 | Build process | build-tools | -| Patchelf | N/A | Build process | build-tools | - -Note: refer to the [Dockerfile](./Dockerfile) for other dependencies which are not needed to build, but might be needed for: - -- runtime (openblas/mkl for torch, egl for headless rendering, cloudpickle for distributed python apps, cupy for some examples...) -- testing (valgrind, pytest, xvfb...) -- utilities (v4l-utils, ...) - -For CMake to find these dependencies, install them in default system paths, or pass `CMAKE_PREFIX_PATH`, `CMAKE_LIBRARY_PATH`, and/or `CMAKE_INCLUDE_PATH` during configuration. - -#### Build example - -```sh -# Configure -cmake -S $source_dir -B $build_dir \ - -G Ninja \ - -D CMAKE_BUILD_TYPE=Release \ - -D CUDAToolkit_ROOT:PATH="/usr/local/cuda" - -# Build -cmake --build $build_dir -j - -# Install -cmake --install $build_dir --prefix $install_dir -``` - -The commands to run the [**examples**](./examples#readme) are then the same as in the dockerized environment, and can be found in the respective source directory READMEs. - -## Runtime Container - -There are multiple containers associated with Holoscan: - -- The **build** container generated by the [top-level Dockerfile](./Dockerfile) is designed to pull dependencies to build and test the SDK itself. The image does not contain the SDK itself, as it is mounted with during `docker run` to run the cmake build or run tests. -- The **development** container available at [NGC | Holoscan Container](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/clara-holoscan/containers/holoscan/tags) which includes all the development tools and libraries needed to *build* Holoscan applications. - - This image is ~13 GB when uncompressed. However, once a Holoscan application is created, it does not need all those same development tools just to *run* an application. -- To address this, a **runtime** container can now be generated with the [runtime_docker/Dockerfile](./runtime_docker/Dockerfile) which contains only the runtime dependencies of the Holoscan SDK. - - This Dockerfile is based on the [CUDA-base](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/cuda) image, which begins with Ubuntu:22.04 and installs the CUDA runtime and Compat package. - - This image is ~8.7 GB on x86_64, and can be further reduced based on use cases (see below). - -> ⚠️ Disclaimer: Currently iGPU is not supported by the runtime container - -### Generate the runtime container - -The [`run`](./run) script contains the command `build_run_image` to build the runtime Holoscan SDK image: - -```bash -./run build_run_image -``` - -Once this image is built, it can be run exactly as the Holoscan development container on NGC is. Simply follow the 'Running the container' instructions beginning at step #3 at [NGC | Holoscan Container](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/clara-holoscan/containers/holoscan), but replace `${NGC_CONTAINER_IMAGE_PATH}` by `holoscan-sdk-run-[-]` in step #4 (name outputted at the end of the above command). - -### Further Reducing Runtime Size - -If you have a specific application you wish to deploy, you can further reduce this runtime image size in two ways: - -1. **Targeting different stages of the [runtime Dockerfile](./runtime_docker/Dockerfile)**. - 1. add `--cpp` to the command above to not pull in python dependencies. - 2. add `--cpp-no-mkl` to the command above to not pull in MKL (x86_64-only libtorch dependency) in addition to the above. - -2. **Modifying the Dockerfile** - -The [runtime Dockerfile](./runtime_docker/Dockerfile) is thoroughly documented to indicate which dependency is used by which component of the Holoscan SDK. If you do not use some of these components (ex: Torch inference backend, ONNX Runtime inference backend, TensorRT inference backend, Python/Cupy, format_converter operator, etc...), comment out the appropriate line in the Dockerfile and run the build command above. - -## Utilities - -Some utilities are available in the [`scripts`](./scripts) folder, others closer to the built process are listed below: - -### Testing - -Existing tests are using GTest for C++ and pytest for Python, and can be found under [tests](./tests/) and [python/tests](./python/tests/) respectively. The Holoscan SDK uses CTest as a framework to build and execute these tests. - -Run the tests using the following command: - -```sh -./run test -``` - -> Note: Run `run test --help` to see additional options. - -### Linting - -Run the following command to run various linting tools on the repository: - -```sh -./run lint # optional: specify directories -``` - -> Note: Run `run lint --help` to see the list of tools that are used. If a lint command fails due to a missing module or executable on your system, you can install it using `python3 -m pip install `. - -### Building the User Guide - -The source of the user guide hosted at is located in [docs](./docs/). It can be built with the following commands: - -- PDF: `./run build_pdf` -- HTML: `./run build_html` (auto-reload: `./run live_html`) - -Run `./run help` for more commands related to the user guide documentation. - -### VSCode - -Visual Studio Code can be utilized to develop the Holoscan SDK. The `.devcontainer` folder holds the configuration for setting up a [development container](https://code.visualstudio.com/docs/remote/containers) with all necessary tools and libraries installed. - -The `./run` script contains `vscode` and `vscode_remote` commands for launching Visual Studio Code in a container or from a remote machine, respectively. - -- To launch Visual Studio Code in a dev container, use `./run vscode`. -- To attach to an existing dev container from a remote machine, use `./run vscode_remote`. For more information, refer to the instructions from `./run vscode_remote -h`. - -Once Visual Studio Code is launched, the development container will be built and the recommended extensions will be installed automatically, along with CMake being configured. - -#### Configuring CMake in the Development Container - -For manual configuration of CMake, open the command palette (`Ctrl + Shift + P`) and run the `CMake: Configure` command. - -#### Building the Source Code in the Development Container - -The source code in the development container can be built by either pressing `Ctrl + Shift + B` or executing `Tasks: Run Build Task` from the command palette (`Ctrl + Shift + P`). - -#### Debugging the Source Code in the Development Container - -To debug the source code in the development container, open the `Run and Debug` view (`Ctrl + Shift + D`), select a debug configuration from the dropdown list, and press `F5` to initiate debugging. - -## Troubleshooting - -### X11: Failed to open display :0 [...] Failed to initialize GLFW - -Enable permissions to your X server from Docker, either: - -- Passing `-u $(id -u):$(id -g)` to `docker run`, or -- Running `xhost +local:docker` on your host - -### GLX: Failed to create context: GLXBadFBConfig - -You may encounter the error message if the Holoscan Application runs on a Virtual Machine (by a Cloud Service Provider) or without a physical display attached. If you want to run applications that use GPU on x11 (e.g., VNC or NoMachine), the following environment variables need to be set before executing the application to offload the rendering to GPU. - -```sh -export __NV_PRIME_RENDER_OFFLOAD=1 -export __GLX_VENDOR_LIBRARY_NAME=nvidia -``` - -### `GXF_ENTITY_COMPONENT_NOT_FOUND` or `GXF_ENTITY_NOT_FOUND` - -Ensure all your application connections in the yaml file (`nvidia::gxf::Connection`) refer to entities or components defined within. This can occur when attempting to remove a component and not cleaning up the stale connections. - -### No receiver connected to transmitter of of entity . The entity will never tick - -Ensure your entity or component is not an orphan, but is connected to a `nvidia::gxf::Connection`. - -### AJA device errors - -These errors indicate that you don't have AJA support in your environment. - -```sh -2022-06-09 18:45:13.826 ERROR gxf_extensions/aja/aja_source.cpp@80: Device 0 not found. -2022-06-09 18:45:13.826 ERROR gxf_extensions/aja/aja_source.cpp@251: Failed to open device 0 -``` - -Double check that you have installed the AJA ntv2 driver, loaded the driver after every reboot, and that you have specified `--device /dev/ajantv20:/dev/ajantv20` in the `docker run` command if you’re running a docker container. - -### GXF format converter errors - -These errors may indicate that you need to reconfigure your format converter's num_block number. +The Holoscan User Guide includes: +- An introduction to the NVIDIA Holoscan platform, including the Holoscan C++/Python SDK; +- Requirements and setup steps; +- Detailed SDK documentation, including a developer introduction, examples, and API details. -```sh -2022-06-09 18:47:30.769 ERROR gxf_extensions/format_converter/format_converter.cpp@521: Failed to allocate memory for the channel conversion -2022-06-09 18:47:30.769 ERROR gxf_extensions/format_converter/format_converter.cpp@359: Failed to convert tensor format (conversion type:6) -``` +We also recommend visiting [NVIDIA HoloHub](https://nvidia-holoscan.github.io/holohub/) to view +community projects and reusable components available for your Holoscan project. -Try increasing the current num_block number by 1 in the yaml file for all format converter entities. This may happen if your yaml file was configured for running with RDMA and you have decided to disable RDMA. +## Obtaining the Holoscan SDK -### Video device error +The Holoscan User Guide documents several options to install and run the Holoscan SDK: -Some of those errors may occur when running the V4L2 codelet: +- As an [NGC Container 🐋](https://docs.nvidia.com/holoscan/sdk-user-guide/sdk_installation.html#sd-tab-item-2) +- As a [Debian Package 📦️](https://docs.nvidia.com/holoscan/sdk-user-guide/sdk_installation.html#sd-tab-item-3) +- As a [Python Wheel 🐍](https://docs.nvidia.com/holoscan/sdk-user-guide/sdk_installation.html#sd-tab-item-4) -``` -Failed to open device, OPEN: No such file or directory -``` +Visit the [Holoscan User Guide](https://docs.nvidia.com/holoscan/sdk-user-guide/sdk_installation.html#not-sure-what-to-choose) for +guidance to help choose which installation option may be right for your use case. -Ensure you have a video device connected (ex: USB webcam) and listed when running `ls -l /dev/video*`. +If the options above do not support your use case, you may prefer to [build the SDK from source](./DEVELOP.md). -``` -Failed to open device, OPEN: Permission denied -``` +Please review [Holoscan SDK prerequisites](https://docs.nvidia.com/holoscan/sdk-user-guide/sdk_installation.html#prerequisites) +before getting started. -This means the `/dev/video*` device is not available to the user from within docker. Give `--group-add video` to the `docker run` command. +## Troubleshooting and Feedback -### HolovizOp fails on hybrid GPU systems with non-NVIDIA integrated GPU and NVIDIA discrete GPU +We appreciate community discussion and feedback in support of Holoscan platform users and developers. We ask that users: +- Review the [Holoscan SDK Frequently Asked Questions](FAQ.md) document for common solutions and workarounds. +- Direct questions to the [NVIDIA Support Forum](https://forums.developer.nvidia.com/c/healthcare/holoscan-sdk/320/all). +- Enter SDK issues on the [SDK GitHub Issues board](https://github.com/nvidia-holoscan/holoscan-sdk/issues). -You may encounter an error when trying to run the Holoviz operator on a laptop equipped with an integrated and a discrete GPU. By default these systems will be using the integrated GPU when running an application. The integrated GPU does not provide the capabilities the Holoviz operator needs and the operator will fail. +## Additional Notes -The following environment variables need to be set before executing the application to offload the rendering to the discrete GPU. See [PRIME Render Offload](https://download.nvidia.com/XFree86/Linux-x86_64/535.54.03/README/primerenderoffload.html) for more information. +### Relation to NVIDIA Clara -```sh -export __NV_PRIME_RENDER_OFFLOAD=1 -export __GLX_VENDOR_LIBRARY_NAME=nvidia -``` +In previous releases, the prefix [`Clara`](https://developer.nvidia.com/industries/healthcare) was used to define Holoscan as a platform designed initially for [medical devices](https://www.nvidia.com/en-us/clara/developer-kits/). As Holoscan has grown, its potential to serve other areas has become apparent. With version 0.4.0, we're proud to announce that the Holoscan SDK is now officially built to be domain-agnostic and can be used to build sensor AI applications in multiple domains. Note that some of the content of the SDK (sample applications) or the documentation might still appear to be healthcare-specific pending additional updates. Going forward, domain specific content will be hosted on the [HoloHub](https://nvidia-holoscan.github.io/holohub) repository. -## Repository structure +### Repository structure The repository is organized as such: diff --git a/VERSION b/VERSION index 21e8796a..359a5b95 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.0.3 +2.0.0 \ No newline at end of file diff --git a/cmake/deps/glfw_rapids.cmake b/cmake/deps/glfw_rapids.cmake index 0051d3fc..ab2861a4 100644 --- a/cmake/deps/glfw_rapids.cmake +++ b/cmake/deps/glfw_rapids.cmake @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -55,6 +55,7 @@ rapids_cpm_find(GLFW 3.3.7 GITHUB_REPOSITORY glfw/glfw GIT_TAG 3.3.7 OPTIONS + "BUILD_SHARED_LIBS OFF" "CXXOPTS_BUILD_EXAMPLES OFF" "CXXOPTS_BUILD_TESTS OFF" "GLFW_BUILD_TESTS OFF" @@ -63,10 +64,3 @@ rapids_cpm_find(GLFW 3.3.7 "GLFW_INSTALL OFF" EXCLUDE_FROM_ALL ) - -if(GLFW_ADDED) - install(TARGETS glfw - DESTINATION "${HOLOSCAN_INSTALL_LIB_DIR}" - COMPONENT "holoscan-dependencies" - ) -endif() diff --git a/cmake/deps/gxf.cmake b/cmake/deps/gxf.cmake index 9ab649f4..33b9a7ca 100644 --- a/cmake/deps/gxf.cmake +++ b/cmake/deps/gxf.cmake @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,13 +13,106 @@ # See the License for the specific language governing permissions and # limitations under the License. -find_package(GXF 2.4 MODULE REQUIRED - COMPONENTS +set(HOLOSCAN_GXF_COMPONENTS + # For Holoscan to use and distribute + app core cuda gxe + logger multimedia + sample # dependency of GXF::app serialization std ucx ) + +find_package(GXF 4.0 CONFIG REQUIRED + COMPONENTS ${HOLOSCAN_GXF_COMPONENTS} +) +message(STATUS "Found GXF: ${GXF_DIR}") + +# Workaround: If the GXF distribution implicitly includes an HTTP target dependency +# for other libraries, add it to the list of imports. +# https://jirasw.nvidia.com/browse/NVG-3245 +if(TARGET GXF::http) + list(APPEND HOLOSCAN_GXF_COMPONENTS http) +endif() + +# Copy shared libraries and their headers to the GXF build folder +# to be found alongside Holoscan GXF extensions. + +if(NOT HOLOSCAN_INSTALL_LIB_DIR) + if(DEFINED HOLOSCAN_SDK_PATH) + # Find library directory from HOLOSCAN_SDK_PATH + find_path(HOLOSCAN_INSTALL_LIB_DIR + NAMES libholoscan.so + PATHS ${HOLOSCAN_SDK_PATH}/lib ${HOLOSCAN_SDK_PATH}/lib64 + NO_DEFAULT_PATH + REQUIRED + ) + + # Take only file name from path + get_filename_component(HOLOSCAN_INSTALL_LIB_DIR "${HOLOSCAN_INSTALL_LIB_DIR}" NAME) + else() + message(FATAL_ERROR "Unable to guess HOLOSCAN_INSTALL_LIB_DIR from HOLOSCAN_SDK_PATH") + endif() +endif() + +set(HOLOSCAN_GXF_LIB_DIR "${CMAKE_BINARY_DIR}/${HOLOSCAN_INSTALL_LIB_DIR}") +set(HOLOSCAN_GXF_BIN_DIR "${CMAKE_BINARY_DIR}/bin") +foreach(component ${HOLOSCAN_GXF_COMPONENTS}) + # Copy the GXF library to the build folder so that executables can find shared libraries + get_target_property(GXF_${component}_LOCATION GXF::${component} IMPORTED_LOCATION) + if(NOT GXF_${component}_LOCATION) + string(TOUPPER "${CMAKE_BUILD_TYPE}" _build_type) + get_target_property(GXF_${component}_LOCATION GXF::${component} IMPORTED_LOCATION_${_build_type}) + endif() + if(GXF_${component}_LOCATION) + if(NOT "${component}" STREQUAL "gxe") + file(COPY "${GXF_${component}_LOCATION}" + DESTINATION "${HOLOSCAN_GXF_LIB_DIR}" + FILE_PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ + ) + get_filename_component(${component}_filename ${GXF_${component}_LOCATION} NAME) + set(HOLOSCAN_GXF_${component}_LOCATION "${HOLOSCAN_GXF_LIB_DIR}/${${component}_filename}") + set_target_properties(GXF::${component} PROPERTIES + IMPORTED_LOCATION_${_build_type} ${HOLOSCAN_GXF_${component}_LOCATION} + IMPORTED_LOCATION ${HOLOSCAN_GXF_${component}_LOCATION} + ) + else() + file(COPY "${GXF_${component}_LOCATION}" + DESTINATION "${HOLOSCAN_GXF_BIN_DIR}" + FILE_PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE + ) + set(HOLOSCAN_GXE_LOCATION "${HOLOSCAN_GXF_BIN_DIR}/gxe") + set_target_properties(GXF::gxe PROPERTIES + IMPORTED_LOCATION_${_build_type} {HOLOSCAN_GXE_LOCATION} + IMPORTED_LOCATION ${HOLOSCAN_GXE_LOCATION} + ) + + # Patch `gxe` executable RUNPATH to find required GXF libraries in the self-contained HSDK installation. + # GXF 4.0 libraries are entirely self-contained and do not require RPATH updates. + find_program(PATCHELF_EXECUTABLE patchelf) + if(PATCHELF_EXECUTABLE) + execute_process( + COMMAND "${PATCHELF_EXECUTABLE}" + "--set-rpath" + "\$ORIGIN:\$ORIGIN/../${HOLOSCAN_INSTALL_LIB_DIR}" + "${HOLOSCAN_GXE_LOCATION}" + ) + else() + message(WARNING "Failed to patch the GXE executable RUNPATH. Must set LD_LIBRARY_PATH to use the executable.") + endif() + endif() + else() + message(FATAL_ERROR "No imported location found for GXF::${component}") + endif() +endforeach() + +# Set variables in parent scope for use throughout the Holoscan project +set(GXF_INCLUDE_DIR ${GXF_INCLUDE_DIR} PARENT_SCOPE) +set(HOLOSCAN_GXF_LIB_DIR ${HOLOSCAN_GXF_LIB_DIR} PARENT_SCOPE) +set(HOLOSCAN_GXF_BIN_DIR ${HOLOSCAN_GXF_BIN_DIR} PARENT_SCOPE) +set(HOLOSCAN_GXE_LOCATION ${HOLOSCAN_GXE_LOCATION} PARENT_SCOPE) +set(HOLOSCAN_GXF_COMPONENTS ${HOLOSCAN_GXF_COMPONENTS} PARENT_SCOPE) diff --git a/cmake/deps/magic_enum.cmake b/cmake/deps/magic_enum.cmake new file mode 100644 index 00000000..3f8203b7 --- /dev/null +++ b/cmake/deps/magic_enum.cmake @@ -0,0 +1,43 @@ +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# https://docs.rapids.ai/api/rapids-cmake/stable/command/rapids_cpm_find.html +include(${rapids-cmake-dir}/cpm/find.cmake) + +# GXF 4.0 added a dependency on magic_enum + +rapids_cpm_find(magic_enum 0.9.3 + GLOBAL_TARGETS magic_enum + BUILD_EXPORT_SET ${HOLOSCAN_PACKAGE_NAME}-exports + CPM_ARGS + + GITHUB_REPOSITORY Neargye/magic_enum + GIT_TAG v0.9.3 + GIT_SHALLOW TRUE + + EXCLUDE_FROM_ALL +) + +# Set 'magic_enum_SOURCE_DIR' with PARENT_SCOPE so that +# root project can use it to include headers +set(magic_enum_SOURCE_DIR ${magic_enum_SOURCE_DIR} PARENT_SCOPE) + +if(magic_enum_ADDED) + # Install the headers needed for development with the SDK + install(FILES ${magic_enum_SOURCE_DIR}/include/magic_enum.hpp + DESTINATION "include" + COMPONENT "holoscan-dependencies" + ) +endif() diff --git a/cmake/deps/ucx.cmake b/cmake/deps/ucx.cmake index 21bd3244..20fa4f2f 100644 --- a/cmake/deps/ucx.cmake +++ b/cmake/deps/ucx.cmake @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,8 +15,17 @@ find_package(ucx 1.14.0 REQUIRED) -install(DIRECTORY ${UCX_LIBRARIES} +install( + DIRECTORY ${UCX_LIBRARIES} DESTINATION "${HOLOSCAN_INSTALL_LIB_DIR}/.." COMPONENT "holoscan-dependencies" FILES_MATCHING PATTERN "*.so*" ) + +foreach(ucx_target ucm ucp ucs uct) + install( + DIRECTORY ${UCX_INCLUDE_DIRS}/${ucx_target} + DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/3rdparty/ucx" + COMPONENT "holoscan-dependencies" + ) +endforeach() diff --git a/cmake/modules/FindGXF.cmake b/cmake/modules/FindGXF.cmake deleted file mode 100644 index f07c391b..00000000 --- a/cmake/modules/FindGXF.cmake +++ /dev/null @@ -1,273 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: Apache-2.0 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Create GXF imported cmake targets -# -# This module defines GXF_FOUND if all GXF libraries are found or -# if the required libraries (COMPONENTS property in find_package) -# are found. -# -# A new imported target is created for each component (library) -# under the GXF namespace (GXF::${component_name}) -# -# Note: this leverages the find-module paradigm [1]. The config-file paradigm [2] -# is recommended instead in CMake. -# [1] https://cmake.org/cmake/help/latest/manual/cmake-packages.7.html#config-file-packages -# [2] https://cmake.org/cmake/help/latest/manual/cmake-packages.7.html#find-module-packages - -# Define environment -if(CMAKE_SYSTEM_PROCESSOR STREQUAL x86_64 OR CMAKE_SYSTEM_PROCESSOR STREQUAL amd64) - set(_internal_GXF_recipe "gxf_x86_64") - set(_public_GXF_recipe "x86_64") -elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL aarch64 OR CMAKE_SYSTEM_PROCESSOR STREQUAL arm64) - set(_internal_GXF_recipe "gxf_jetpack50") - set(_public_GXF_recipe "arm64") -else() - message(FATAL_ERROR "CMAKE_SYSTEM_PROCESSOR=${CMAKE_SYSTEM_PROCESSOR} is not an architecture supported by GXF") -endif() - -if(NOT HOLOSCAN_INSTALL_LIB_DIR) - if(DEFINED HOLOSCAN_SDK_PATH) - # Find library directory from HOLOSCAN_SDK_PATH - find_path(HOLOSCAN_INSTALL_LIB_DIR - NAMES libholoscan.so - PATHS ${HOLOSCAN_SDK_PATH}/lib ${HOLOSCAN_SDK_PATH}/lib64 - NO_DEFAULT_PATH - REQUIRED - ) - - # Take only file name from path - get_filename_component(HOLOSCAN_INSTALL_LIB_DIR "${HOLOSCAN_INSTALL_LIB_DIR}" NAME) - else() - message(FATAL_ERROR "Unable to guess HOLOSCAN_INSTALL_LIB_DIR from HOLOSCAN_SDK_PATH") - endif() -endif() - -# Need PatchELF to update the RPATH of the libs -find_program(PATCHELF_EXECUTABLE patchelf) -if(NOT PATCHELF_EXECUTABLE) - message(FATAL_ERROR "Please specify the PATCHELF executable") -endif() - -# Library names -list(APPEND _GXF_EXTENSIONS - behavior_tree - cuda - multimedia - network - npp - python_codelet - sample - serialization - std - stream - ucx -) - -# Common headers -find_path(GXF_common_INCLUDE_DIR - NAMES common/ - REQUIRED -) -mark_as_advanced(GXF_common_INCLUDE_DIR) -list(APPEND GXF_INCLUDE_DIR_VARS GXF_common_INCLUDE_DIR) - -# Libraries and their headers -list(APPEND _GXF_LIBRARIES ${_GXF_EXTENSIONS} core) - -foreach(component IN LISTS _GXF_LIBRARIES) - # headers - find_path(GXF_${component}_INCLUDE_DIR - NAMES "gxf/${component}/" - ) - mark_as_advanced(GXF_${component}_INCLUDE_DIR) - list(APPEND GXF_INCLUDE_DIR_VARS GXF_${component}_INCLUDE_DIR) - - # library - find_library(GXF_${component}_LIBRARY - NAMES "gxf_${component}" - PATH_SUFFIXES - "${_internal_GXF_recipe}/${component}" - "${_public_GXF_recipe}/${component}" - ) - mark_as_advanced(GXF_${component}_LIBRARY) - list(APPEND GXF_LIBRARY_VARS GXF_${component}_LIBRARY) - - # create imported target - if(GXF_${component}_LIBRARY) - if(NOT TARGET GXF::${component}) - # Assume SHARED, though technically UNKNOWN since we don't enforce .so - add_library(GXF::${component} SHARED IMPORTED) - - endif() - - ############################################################################## - # TODO: config/patching/install should not be in this file, only target import - - # Set the internal location to the binary directory - get_filename_component(gxf_component_filename "${GXF_${component}_LIBRARY}" NAME) - set(gxf_component_build_dir "${CMAKE_BINARY_DIR}/${HOLOSCAN_INSTALL_LIB_DIR}") - set(gxf_component_build_path "${gxf_component_build_dir}/${gxf_component_filename}") - - # Copy the GXF library to the build folder - # Needed for permissions to run patchelf for RUNPATH - file(COPY "${GXF_${component}_LIBRARY}" - DESTINATION "${gxf_component_build_dir}" - FILE_PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ - ) - - # Patch RUNPATH - list(APPEND _GXF_LIB_RPATH "\$ORIGIN" "\$ORIGIN/gxf_extensions") - if(CMAKE_SYSTEM_PROCESSOR STREQUAL aarch64 OR CMAKE_SYSTEM_PROCESSOR STREQUAL arm64) - # The video encoder/decoder libraries need an extra path for aarch64 - # To find the right l4t libraries - if(component STREQUAL videoencoderio - OR component STREQUAL videoencoder - OR component STREQUAL videodecoderio - OR component STREQUAL videodecoder) - list(APPEND _GXF_LIB_RPATH "/usr/lib/aarch64-linux-gnu/tegra/") - endif() - endif() - list(JOIN _GXF_LIB_RPATH ":" _GXF_LIB_RPATH) - execute_process(COMMAND - "${PATCHELF_EXECUTABLE}" - "--set-rpath" - "${_GXF_LIB_RPATH}" - "${gxf_component_build_path}" - ) - unset(_GXF_LIB_RPATH) - - # Install the GXF library - # Use the build location since RUNPATH has changed - install(FILES "${gxf_component_build_path}" - DESTINATION "${HOLOSCAN_INSTALL_LIB_DIR}" - COMPONENT "holoscan-gxf_libs" - ) - ############################################################################## - - # Include dirs - list(APPEND GXF_${component}_INCLUDE_DIRS ${GXF_common_INCLUDE_DIR}) - if(GXF_${component}_INCLUDE_DIR) - list(APPEND GXF_${component}_INCLUDE_DIRS ${GXF_${component}_INCLUDE_DIR}) - endif() - - set_target_properties(GXF::${component} PROPERTIES - IMPORTED_LOCATION "${gxf_component_build_path}" - - # Without this, make and ninja's behavior is different. - # GXF's shared libraries doesn't seem to set soname. - # (https://gitlab.kitware.com/cmake/cmake/-/issues/22307) - IMPORTED_NO_SONAME ON - INTERFACE_INCLUDE_DIRECTORIES "${GXF_${component}_INCLUDE_DIRS}" - ) - - set(GXF_${component}_FOUND TRUE) - else() - set(GXF_${component}_FOUND FALSE) - endif() -endforeach() - -unset(_GXF_EXTENSIONS) -unset(_GXF_LIBRARIES) - -# Find version -if(GXF_core_INCLUDE_DIR) - # Note: "kGxfCoreVersion \"(.*)\"$" does not work with a simple string - # REGEX (doesn't stop and EOL, neither $ nor \n), so we first extract - # the line with file(STRINGS), then the version with string(REGEX) - file(STRINGS "${GXF_core_INCLUDE_DIR}/gxf/core/gxf.h" _GXF_VERSION_LINE - REGEX "kGxfCoreVersion" - ) - string(REGEX MATCH "kGxfCoreVersion \"(.*)\"" _ ${_GXF_VERSION_LINE}) - set(GXF_VERSION ${CMAKE_MATCH_1}) - unset(_GXF_VERSION_LINE) -endif() - -# GXE -find_program(GXF_gxe_PATH - NAMES gxe - PATH_SUFFIXES - "${_internal_GXF_recipe}/gxe" - "${_public_GXF_recipe}/gxe" -) - -if(GXF_gxe_PATH) - if(NOT TARGET GXF::gxe) - add_executable(GXF::gxe IMPORTED) - endif() - - ############################################################################## - # TODO: config/patching/install should not be in this file, only target import - - # Set the internal location to the binary directory - # This is need for RPATH to work - set(GXE_BUILD_DIR "${CMAKE_BINARY_DIR}/bin") - set(GXE_BUILD_PATH "${GXE_BUILD_DIR}/gxe") - - # Copy gxe binary to the build folder - # Needed for permissions to run patchelf for RUNPATH - file(COPY "${GXF_gxe_PATH}" - DESTINATION "${GXE_BUILD_DIR}" - FILE_PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE - ) - - # Patch RUNPATH so that it can find libgxf_core.so library. - execute_process(COMMAND - "${PATCHELF_EXECUTABLE}" - "--set-rpath" - "\$ORIGIN:\$ORIGIN/../${HOLOSCAN_INSTALL_LIB_DIR}" - "${GXE_BUILD_PATH}" - ) - - # Install GXE - # Use the build location since RUNPATH has changed - install(FILES "${GXE_BUILD_PATH}" - DESTINATION "bin" - PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE - COMPONENT "holoscan-gxf_bins" - ) - ############################################################################## - - set_target_properties(GXF::gxe PROPERTIES - IMPORTED_LOCATION "${GXE_BUILD_PATH}" - ) - - set(GXF_gxe_FOUND TRUE) -else() - set(GXF_gxe_FOUND FALSE) -endif() - -# Generate GXF_FOUND -include(FindPackageHandleStandardArgs) - -if(GXF_FIND_COMPONENTS) - # ... based on requested components/libraries - find_package_handle_standard_args(GXF - FOUND_VAR GXF_FOUND - VERSION_VAR GXF_VERSION - HANDLE_COMPONENTS # Looks for GXF_${component}_FOUND - ) -else() - # ... need all the libraries - find_package_handle_standard_args(GXF - FOUND_VAR GXF_FOUND - VERSION_VAR GXF_VERSION - REQUIRED_VARS ${GXF_INCLUDE_DIR_VARS} ${GXF_LIBRARY_VARS} GXF_gxe_PATH - ) -endif() - -# Clean -unset(_internal_GXF_recipe) -unset(_public_GXF_recipe) diff --git a/cmake/modules/GenerateGXEApp.cmake b/cmake/modules/GenerateGXEApp.cmake index 1a62edbc..1c0e1328 100644 --- a/cmake/modules/GenerateGXEApp.cmake +++ b/cmake/modules/GenerateGXEApp.cmake @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -32,6 +32,10 @@ function(_get_lib_file_path location target) if(imported) get_target_property(lib ${target} IMPORTED_LOCATION) + if(NOT lib) + string(TOUPPER "${CMAKE_BUILD_TYPE}" _BUILD_TYPE) + get_target_property(lib ${target} IMPORTED_LOCATION_${_BUILD_TYPE}) + endif() else() set(lib $) endif() @@ -170,6 +174,10 @@ function(create_gxe_application) COMPONENT "${GXE_APP_COMPONENT}" ) + # GXE apps are expected to be run from the top of the build/install directory + # to find `gxe_executable`. + file(RELATIVE_PATH gxe_executable ${CMAKE_BINARY_DIR} ${HOLOSCAN_GXE_LOCATION}) + # Create bash script set(GXE_APP_EXECUTABLE "${CMAKE_CURRENT_BINARY_DIR}/${GXE_APP_NAME}") file(GENERATE @@ -177,7 +185,7 @@ function(create_gxe_application) CONTENT "#!/usr/bin/env bash export LD_LIBRARY_PATH=$(pwd):$(pwd)/${HOLOSCAN_INSTALL_LIB_DIR}:\${LD_LIBRARY_PATH} -./bin/gxe --app ${GXE_APP_YAML_RELATIVE_PATH} --manifest ${GXE_APP_MANIFEST_RELATIVE_PATH} $@ +${gxe_executable} --app ${GXE_APP_YAML_RELATIVE_PATH} --manifest ${GXE_APP_MANIFEST_RELATIVE_PATH} $@ " FILE_PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE ) diff --git a/cmake/modules/cpack/NOTICE.txt b/cmake/modules/cpack/NOTICE.txt index b3fba64c..38355b6b 100644 --- a/cmake/modules/cpack/NOTICE.txt +++ b/cmake/modules/cpack/NOTICE.txt @@ -69,6 +69,10 @@ jq (https://github.com/jqlang/jq) Copyright (C) 2012 Stephen Dolan authors. Licensed under MIT (https://github.com/jqlang/jq/raw/master/COPYING) +magic_enum (https://github.com/Neargye/magic_enum) +Copyright (c) 2019 - 2023 Daniil Goncharov +Licensed under MIT (https://github.com/Neargye/magic_enum/blob/v0.9.3/LICENSE) + AJA NTV2 SDK (https://github.com/ibstewart/ntv2) Copyright (c) 2021 AJA Video Systems Licensed under MIT (https://github.com/ibstewart/ntv2/blob/holoscan-v0.2.0/LICENSE) diff --git a/cmake/setup_dependencies.cmake b/cmake/setup_dependencies.cmake index d3658c51..7ccb6131 100644 --- a/cmake/setup_dependencies.cmake +++ b/cmake/setup_dependencies.cmake @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -39,14 +39,15 @@ superbuild_depend(expected_rapids) superbuild_depend(fmt_rapids) superbuild_depend(glfw_rapids) superbuild_depend(grpc) -superbuild_depend(gxf) superbuild_depend(hwloc) +superbuild_depend(magic_enum) superbuild_depend(spdlog_rapids) superbuild_depend(tensorrt) superbuild_depend(threads) superbuild_depend(ucx) superbuild_depend(v4l2) superbuild_depend(yaml-cpp_rapids) +superbuild_depend(gxf) # Testing dependencies if(HOLOSCAN_BUILD_TESTS) diff --git a/docs/Dockerfile b/docs/Dockerfile index 55d68f34..da82c18c 100644 --- a/docs/Dockerfile +++ b/docs/Dockerfile @@ -28,7 +28,7 @@ ARG DEBIAN_FRONTEND=noninteractive # Deadsnakes repo is added then package index files are updated # software-properties-common - Needed to use `add-apt-repository` # build-essential - Adds GNU/g++ compiler collection -# curl - Used to download Doxygen and Node.js +# curl - Used to download Doxygen # python3-pip - Needed for pip installs RUN apt-get update \ && apt-get install -y --no-install-recommends \ @@ -36,6 +36,7 @@ RUN apt-get update \ curl \ python3-pip \ gnupg \ + graphviz \ && rm -rf /var/lib/apt/lists/* # Install up to date doxygen for better C++ parsing with a few cases like @@ -48,21 +49,6 @@ RUN cd /tmp/ \ && cd .. \ && rm -rf doxygen* -# Install Node.js 20 using DEB packages -# https://github.com/nodesource/distributions#debian-and-ubuntu-based-distributions -RUN mkdir -p /etc/apt/keyrings \ - && curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key \ - | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg \ - && echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_20.x nodistro main" \ - | tee /etc/apt/sources.list.d/nodesource.list \ - && apt-get update \ - && apt-get install -y --no-install-recommends \ - nodejs \ - && rm -rf /var/lib/apt/lists/* - -# Install Mermaid CLI -RUN npm install --production -g @mermaid-js/mermaid-cli - # Install Python dependencies # Pinned additional sphinxcontrib-* extensions to specific versions to avoid following error: # "The sphinxcontrib.* extension used by this project needs at least Sphinx v5.0;" @@ -88,37 +74,14 @@ RUN python3 -m pip install --no-cache-dir \ FROM $BASE_IMAGE as docs-html # Copy over installed denpendencies from docs-base -COPY --from=docs-base /usr/bin/curl /usr/bin/curl -COPY --from=docs-base /usr/bin/node /usr/bin/node -COPY --from=docs-base /usr/bin/npx /usr/bin/npx -COPY --from=docs-base /usr/bin/npm /usr/bin/npm -COPY --from=docs-base /usr/bin/mmdc /usr/bin/mmdc -COPY --from=docs-base /usr/bin/corepack /usr/bin/corepack +COPY --from=docs-base /usr/bin/dot /usr/bin/dot COPY --from=docs-base /usr/local/bin/doxygen /usr/local/bin/doxygen COPY --from=docs-base /usr/local/lib/python3.10/dist-packages /usr/local/lib/python3.10/dist-packages COPY --from=docs-base /usr/local/bin/sphinx-build /usr/local/bin/sphinx-build COPY --from=docs-base /usr/local/bin/sphinx-autobuild /usr/local/bin/sphinx-autobuild -COPY --from=docs-base /usr/lib/node_modules /usr/lib/node_modules COPY --from=docs-base /usr/lib/python3/dist-packages /usr/lib/python3/dist-packages -COPY --from=docs-base /usr/include/node /usr/include/node -COPY --from=docs-base /usr/share/doc/nodejs /usr/share/doc/nodejs - -# Below logic needed due to copy cmds being used instead of local installs -# ------------------------------------------------------------------------ -# Update npm executable to invoke the npm module's CLI script and pass the -# current Node.js process as an argument -RUN echo '#!/usr/bin/env node' > /usr/bin/npm \ - && echo "require('/usr/lib/node_modules/npm/lib/cli.js')(process)" >> /usr/bin/npm && \ - # Update mmdc (mermaid-cli) executable to set the current process title to 'mmdc', - # invoke the mermaid module's CLI function, and print any error if one is encountered - echo '#!/usr/bin/env node' > /usr/bin/mmdc \ - && echo "process.title = 'mmdc'; \ - import('/usr/lib/node_modules/@mermaid-js/mermaid-cli/src/index.js') \ - .then(({ cli, error }) => cli().catch((exception) => error(exception instanceof Error ? exception.stack : exception))) \ - .catch((err) => { \ - console.error(err); \ - process.exit(1); \ - });" >> /usr/bin/mmdc +COPY --from=docs-base /usr/share/fonts /usr/share/fonts +COPY --from=docs-base /lib/x86_64-linux-gnu/ /lib/x86_64-linux-gnu/ ################################################################# # PDF docs image that installs pdf/latex dependencies to the base @@ -137,6 +100,3 @@ RUN apt-get update \ libgbm1 \ libasound2 \ && rm -rf /var/lib/apt/lists/* - -# Add configuration for for puppeteer -RUN echo '{"args": ["--no-sandbox"]}' >> /usr/bin/puppeteer-config.json diff --git a/docs/aja_setup.rst b/docs/aja_setup.rst index 35ff5112..c16184f7 100644 --- a/docs/aja_setup.rst +++ b/docs/aja_setup.rst @@ -14,14 +14,14 @@ applications as sysmem to GPU copies are eliminated from the processing pipeline. The following instructions describe the steps required to setup and use an AJA -device with RDMA support on Holoscan Developer Kits. Note that the AJA NTV2 +device with RDMA support on NVIDIA Developer Kits with a PCIe slot. Note that the AJA NTV2 SDK support for Holoscan includes all of the `AJA Developer Products`_, though the following instructions have only been verified for the `Corvid 44 12G BNC`_ and `KONA HDMI`_ products, specifically. .. Note:: - The addition of an AJA device to a Holoscan Developer Kit is + The addition of an AJA device to a NVIDIA Developer Kit is optional. The Holoscan SDK has elements that can be run with an AJA device with the additional features mentioned above, but those elements can also run without AJA. For example, there are Holoscan sample applications that have diff --git a/docs/api/holoscan_cpp_api.md b/docs/api/holoscan_cpp_api.md index 17cdfe64..86f08ee7 100644 --- a/docs/api/holoscan_cpp_api.md +++ b/docs/api/holoscan_cpp_api.md @@ -149,6 +149,7 @@ - {ref}`exhale_class_classholoscan_1_1Receiver` - {ref}`exhale_class_classholoscan_1_1SerializationBuffer` - {ref}`exhale_class_classholoscan_1_1StdComponentSerializer` +- {ref}`exhale_class_classholoscan_1_1StdEntitySerializer` - {ref}`exhale_class_classholoscan_1_1Transmitter` - {ref}`exhale_class_classholoscan_1_1UcxComponentSerializer` - {ref}`exhale_class_classholoscan_1_1UcxEntitySerializer` @@ -156,10 +157,10 @@ - {ref}`exhale_class_classholoscan_1_1UcxSerializationBuffer` - {ref}`exhale_class_classholoscan_1_1UcxTransmitter` - {ref}`exhale_class_classholoscan_1_1UnboundedAllocator` -- {ref}`exhale_class_classholoscan_1_1VideoStreamSerializer` #### Schedulers +- {ref}`exhale_class_classholoscan_1_1EventBasedScheduler` - {ref}`exhale_class_classholoscan_1_1GreedyScheduler` - {ref}`exhale_class_classholoscan_1_1MultiThreadScheduler` diff --git a/docs/cli/cli.md b/docs/cli/cli.md index 43f3766d..6304bd5f 100644 --- a/docs/cli/cli.md +++ b/docs/cli/cli.md @@ -6,7 +6,7 @@ ## Synopsis -`holoscan` [](#cli-help) [](#cli-log-level) {[package](./package.md),[run](./run.md),[version](./version.md)} +`holoscan` [](#cli-help) [](#cli-log-level) {[package](./package.md),[run](./run.md),[version](./version.md),[nics](./nics.md)} ## Positional Arguments diff --git a/docs/cli/package.md b/docs/cli/package.md index 5faea3dd..07d626ff 100755 --- a/docs/cli/package.md +++ b/docs/cli/package.md @@ -6,7 +6,7 @@ ## Synopsis -`holoscan package` [](#cli-help) [](#cli-log-level) [](#cli-package-config) [](#cli-package-docs) [](#cli-package-models) [](#cli-package-platform) [](#cli-package-platform-config) [](#cli-package-timeout) [](#cli-package-version) [](#cli-package-base-image) [](#cli-package-build-image) [](#cli-package-build-cache) [](#cli-package-cmake-args) [](#cli-package-no-cache) [](#cli-package-sdk) [](#cli-package-sdk-version) [](#cli-package-holoscan-sdk-file) [](#cli-package-monai-deploy-sdk-file) [](#cli-package-output) [](#cli-package-tag) [](#cli-package-username) [](#cli-package-uid) [](#cli-package-gid) [](#cli-package-application) +`holoscan package` [](#cli-help) [](#cli-log-level) [](#cli-package-config) [](#cli-package-docs) [](#cli-package-models) [](#cli-package-platform) [](#cli-package-platform-config) [](#cli-package-timeout) [](#cli-package-version) [](#cli-package-base-image) [](#cli-package-build-image) [](#cli-package-build-cache) [](#cli-package-cmake-args) [](#cli-package-no-cache) [](#cli-package-sdk) [](#cli-package-source) [](#cli-package-sdk-version) [](#cli-package-holoscan-sdk-file) [](#cli-package-monai-deploy-sdk-file) [](#cli-package-output) [](#cli-package-tag) [](#cli-package-username) [](#cli-package-uid) [](#cli-package-gid) [](#cli-package-application) [](#cli-package-source) ## Examples @@ -120,7 +120,6 @@ A comma-separated list of platform types to generate. Each platform value specif `PLATFORM` must be one of: `clara-agx-devkit`, `igx-orin-devkit`, `jetson-agx-orin-devkit`, `x64-workstation`. -- `clara-agx-devkit`: Clara AGX DevKit - `igx-orin-devkit`: IGX Orin DevKit - `jetson-agx-orin-devkit`: Orin AGX DevKit - `x64-workstation`: systems with a [x86-64](https://en.wikipedia.org/wiki/X86-64) processor(s) @@ -168,7 +167,7 @@ Optionally specifies the build container image for building C++ applications. It ### `[--build-cache BUILD_CACHE]` -Specifies a directory path for storing Docker cache. Defaults to `~/.holoscan_build_cache`. +Specifies a directory path for storing Docker cache. Defaults to `~/.holoscan_build_cache`. If the `$HOME` directory is inaccessible, the CLI uses the `/tmp` directory. (#cli-package-cmake-args)= @@ -194,6 +193,14 @@ Do not use cache when building image. SDK for building the application: Holoscan or MONAI-Deploy. `SDK` must be one of: holoscan, monai-deploy. +(#cli-package-source)= + +### `[--source URL|FILE]` + +Override the artifact manifest source with a securely hosted file or from the local file system. + +E.g. https://my.domain.com/my-file.json + (#cli-package-sdk-version)= ### `[--sdk-version SDK_VERSION]` @@ -259,3 +266,9 @@ It is recommended to use the default value of `1000` when packaging an applicati ### `[--gid GID]` Optional *group ID* to be associated with the user created with `--username` with default of `1000`. + +(#cli-package-source)= + +### `[--source PATH|URL]` + +Overrides the default manifest file source. This value can be a local file path or a HTTPS url. \ No newline at end of file diff --git a/docs/cli/run.md b/docs/cli/run.md index 24b2a010..5233bff6 100755 --- a/docs/cli/run.md +++ b/docs/cli/run.md @@ -21,7 +21,7 @@ spec: ## Synopsis -`holoscan run` [](#cli-help) [](#cli-log-level) [](#cli-run-address) [](#cli-run-driver) [](#cli-run-input) [](#cli-run-output) [](#cli-run-fragments) [](#cli-run-worker) [](#cli-run-worker-address) [](#cli-run-config) [](#cli-run-network) [](#cli-run-nic) [](#cli-run-use-all-nics) [](#cli-run-render) [](#cli-run-quiet) [](#cli-run-shm-size)[](#cli-run-terminal) [](#cli-run-device) [](#cli-run-uid) [](#cli-run-gid)[](#cli-run-image-tag) +`holoscan run` [](#cli-help) [](#cli-log-level) [](#cli-run-address) [](#cli-run-driver) [](#cli-run-input) [](#cli-run-output) [](#cli-run-fragments) [](#cli-run-worker) [](#cli-run-worker-address) [](#cli-run-config) [](#cli-run-network) [](#cli-run-nic) [](#cli-run-use-all-nics) [](#cli-run-render) [](#cli-run-quiet) [](#cli-run-shm-size)[](#cli-run-terminal) [](#cli-run-device) [](#cli-run-gpu) [](#cli-run-uid) [](#cli-run-gid)[](#cli-run-image-tag) ## Examples @@ -75,6 +75,11 @@ When specified, a directory mount is set up to the value defined in the environm Ensure that the directory on the host is accessible by the current user or the user specified with [--uid](#cli-run-uid). ::: +:::{note} +Use the host system path when running applications inside Docker (DooD). +::: + + (#cli-run-output)= ### `[--output|-o OUTPUT]` @@ -212,6 +217,24 @@ holoscan run --render --device ajantv0 video1 -- my-application-image:1.0 ::: +(#cli-run-gpu)= + +### `[--gpu]` + +Override the value of the `NVIDIA_VISIBLE_DEVICES` environment variable with the default value set to +the value defined in the [package manifest file](./hap.md#package-manifest) or `all` if undefined. + +Refer to the [GPU Enumeration](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/docker-specialized.html#gpu-enumeration) +page for all available options. + +:::{note} +The default value is `nvidia.com/igpu=0` when running a HAP built for iGPU on a system with both iGPU and dGPU, +::: + +:::{note} +A single integer value translates to the device index, not the number of GPUs. +::: + (#cli-run-uid)= ### `[--uid UID]` diff --git a/docs/components/schedulers.md b/docs/components/schedulers.md index fc8adad1..1a8b43e5 100644 --- a/docs/components/schedulers.md +++ b/docs/components/schedulers.md @@ -5,9 +5,10 @@ The Scheduler component is a critical part of the system responsible for governi The Holoscan SDK offers multiple schedulers that can cater to various use cases. These schedulers are: 1. [Greedy Scheduler](#greedy-scheduler): This basic single-threaded scheduler tests conditions in a greedy manner. It is suitable for simple use cases and provides predictable execution. However, it may not be ideal for large-scale applications as it may incur significant overhead in condition execution. -2. [MultiThread Scheduler](#multithreadscheduler): The MultiThread Scheduler is designed to handle complex execution patterns in large-scale applications. This scheduler consists of a dispatcher thread that monitors the status of each operator and dispatches it to a thread pool of worker threads responsible for executing them. Once execution is complete, worker threads enqueue the operator back on the dispatch queue. The MultiThread Scheduler offers superior performance and scalability over the Greedy Scheduler. +2. [MultiThread Scheduler](#multithreadscheduler): The multithread scheduler is designed to handle complex execution patterns in large-scale applications. This scheduler consists of a dispatcher thread that monitors the status of each operator and dispatches it to a thread pool of worker threads responsible for executing them. Once execution is complete, worker threads enqueue the operator back on the dispatch queue. The multithread scheduler offers superior performance and scalability over the greedy scheduler. +3. [Event-Based Scheduler](#eventbasedscheduler): The event-based scheduler is also a multi-thread scheduler, but as the name indicates it is event-based rather than polling based. Instead of having a thread that constantly polls for the execution readiness of each operator, it instead waits for an event to be received which indicates that an operator is ready to execute. The event-based scheduler will have a lower latency than using the multi-thread scheduler with a long polling interval (`check_recession_period_ms`), but without the high CPU usage seen for a multi-thread scheduler with a very short polling interval. -It is essential to select the appropriate scheduler for the use case at hand to ensure optimal performance and efficient resource utilization. +It is essential to select the appropriate scheduler for the use case at hand to ensure optimal performance and efficient resource utilization. Since most parameters of the schedulers overlap, it is easy to switch between them to test which may be most performant for a given application. :::{note} Detailed APIs can be found here: {ref}`C++ `/{py:mod}`Python `). @@ -24,9 +25,16 @@ The greedy scheduler has a few parameters that the user can configure. - This scheduler also has a boolean parameter, `stop_on_deadlock` that controls whether the application will terminate if a deadlock occurs. A deadlock occurs when all operators are in a `WAIT` state, but there is no periodic condition pending to break out of this state. This parameter is `true` by default. - When setting the `stop_on_deadlock_timeout` parameter, the scheduler will wait this amount of time (in ms) before determining that it is in deadlock and should stop. It will reset if a job comes in during the wait. A negative value means no stop on deadlock. This parameter only applies when `stop_on_deadlock=true`. -## MultiThreadScheduler +## Multithread Scheduler -The multithread scheduler has several parameters that the user can configure. These are a superset of the parameters available for the GreedyScheduler (described in the section above). Only the parameters unique to the multithread scheduler are described here. +The multithread scheduler has several parameters that the user can configure. These are a superset of the parameters available for the `GreedyScheduler` (described in the section above). Only the parameters unique to the multithread scheduler are described here. The multi-thread scheduler uses a dedicated thread to poll the status of operators and schedule any that are ready to execute. This will lead to high CPU usage by this polling thread when `check_recession_period_ms` is close to 0. - The number of worker threads used by the scheduler can be set via `worker_thread_number`, which defaults to `1`. This should be set based on a consideration of both the workflow and the available hardware. For example, the topology of the computation graph will determine how many operators it may be possible to run in parallel. Some operators may potentially launch multiple threads internally, so some amount of performance profiling may be required to determine optimal parameters for a given workflow. - The value of `check_recession_period_ms` controls how long the scheduler will sleep before checking a given condition again. In other words, this is the polling interval for operators that are in a `WAIT` state. The default value for this parameter is `5` ms. + + +## Event-Based Scheduler + +The event-based scheduler is also a multi-thread scheduler, but it is event-based rather than polling based. As such, there is no `check_recession_period_ms` parameter, and this scheduler will not have the high CPU usage that can occur when polling at a short interval. Instead, the scheduler only wakes up when an event is received indicating that an operator is ready to execute. The parameters of this scheduler are a superset of the parameters available for the `GreedyScheduler` (described above). Only the parameters unique to the event-based scheduler are described here. + +- The number of worker threads used by the scheduler can be set via `worker_thread_number`, which defaults to `1`. This should be set based on a consideration of both the workflow and the available hardware. For example, the topology of the computation graph will determine how many operators it may be possible to run in parallel. Some operators may potentially launch multiple threads internally, so some amount of performance profiling may be required to determine optimal parameters for a given workflow. diff --git a/docs/conf.py b/docs/conf.py index 9e976a2f..c684f39c 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -34,7 +34,7 @@ # -- Project information ----------------------------------------------------- project = "Holoscan SDK" -copyright = "2022-2023, NVIDIA" # noqa: A001 +copyright = "2022-2024, NVIDIA" # noqa: A001 author = "NVIDIA" # The full version, including alpha/beta/rc tags @@ -57,13 +57,14 @@ "exhale", "myst_parser", "numpydoc", + "sphinx.ext.graphviz", "sphinx.ext.autosectionlabel", # https://docs.readthedocs.io/en/stable/guides/cross-referencing-with-sphinx.html#automatically-label-sections # noqa: E501 "sphinx.ext.autodoc", # needed for Python API docs (provides automodule) "sphinx.ext.autosummary", # needed for Python API docs (provides autosummary) - "sphinxcontrib.mermaid", # https://sphinxcontrib-mermaid-demo.readthedocs.io/en/latest/ "sphinx_design", # https://sphinx-design.readthedocs.io/en/latest/ ] + # Make sure the target is unique autosectionlabel_prefix_document = True # Set the maximum depth of the section label @@ -80,6 +81,9 @@ # Enabling to be consistent with prior documentation numfig = True +# -- Options for graphviz output --------------------------------------------- +graphviz_output_format = "svg" + # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for @@ -211,21 +215,6 @@ autosummary_generate = False -# -- Options for sphinx-mermaid ------------------------------------------------ -# Reference: https://github.com/mgaitan/sphinxcontrib-mermaid/issues/44 -# (we eventually use mmdc's PDF generation capability instead) - -mermaid_version = "9.2.2" - -# Use PDF diagram for latex PDF generation (with cropping the generated PDF) -if tags.has("noapi"): # noqa: F821 - mermaid_cmd = "/usr/bin/mmdc" - # 'docker/docs-builder/Dockerfile' creates 'puppeteer-config.json' - mermaid_params = ["-p", "/usr/bin/puppeteer-config.json"] - # pdfcrop is installed in the docker image (by 'texlive-extra-utils') - mermaid_pdfcrop = "pdfcrop" - mermaid_output_format = "pdf" - # -- Options for Sphinx -------------------------------------------------------- # Tell sphinx what the primary language being documented is. diff --git a/docs/deployment_stack.md b/docs/deployment_stack.md index f6a93149..335d52f8 100644 --- a/docs/deployment_stack.md +++ b/docs/deployment_stack.md @@ -3,14 +3,14 @@ NVIDIA Holoscan accelerates deployment of production-quality applications by providing a set of **OpenEmbedded** build recipes and reference configurations that can be leveraged to customize and build Holoscan-compatible Linux4Tegra (L4T) -embedded board support packages (BSP) on Holoscan Developer Kits. +embedded board support packages (BSP) on the NVIDIA IGX Developer Kits. [Holoscan OpenEmbedded/Yocto recipes](https://github.com/nvidia-holoscan/meta-tegra-holoscan) add -OpenEmbedded recipes and sample build configurations to build BSPs for NVIDIA Holoscan Developer Kits +OpenEmbedded recipes and sample build configurations to build BSPs for the NVIDIA IGX Developer Kit that feature support for discrete GPUs (dGPU), AJA Video Systems I/O boards, and the Holoscan SDK. -These BSPs are built on a developer's host machine and are then flashed onto a Holoscan Developer Kit -using provided scripts. +These BSPs are built on a developer's host machine and are then flashed onto the NVIDIA IGX +Developer Kit using provided scripts. There are two options available to set up a build environment and start building Holoscan BSP images using OpenEmbedded. diff --git a/docs/emergent_setup.md b/docs/emergent_setup.md index 02cfac6b..585e27c9 100644 --- a/docs/emergent_setup.md +++ b/docs/emergent_setup.md @@ -1,18 +1,12 @@ (emergent-vision-tech)= # Emergent Vision Technologies (EVT) -Thanks to a collaboration with [Emergent Vision Technologies](https://emergentvisiontec.com/), the Holoscan SDK now supports EVT high-speed cameras. - -:::{note} -The addition of an EVT camera to the Holoscan Developer Kits -is optional. The Holoscan SDK has an application that can be run with the EVT camera, -but there are other applications that can be run without EVT camera. -::: +Thanks to a collaboration with [Emergent Vision Technologies](https://emergentvisiontec.com/), the Holoscan SDK now supports EVT high-speed cameras on NVIDIA Developer Kits equipped with a [ConnectX NIC](https://www.nvidia.com/en-us/networking/ethernet-adapters/) using the [Rivermax SDK](https://developer.nvidia.com/networking/rivermax). (emergent-hw-install)= ## Installing EVT Hardware -The EVT cameras can be connected to Holoscan Developer Kits though [Mellanox ConnectX SmartNIC](https://www.nvidia.com/en-us/networking/ethernet-adapters/), with the most simple connection method being a single cable between a camera and the devkit. +The EVT cameras can be connected to NVIDIA Developer Kits through a [Mellanox ConnectX SmartNIC](https://www.nvidia.com/en-us/networking/ethernet-adapters/), with the most simple connection method being a single cable between a camera and the devkit. For 25 GigE cameras that use the SFP28 interface, this can be achieved by using [SFP28](https://store.nvidia.com/en-us/networking/store/product/MCP2M00-A001E30N/NVIDIAMCP2M00A001E30NDACCableEthernet25GbESFP281m/) cable with [QSFP28 to SFP28 adaptor](https://store.nvidia.com/en-us/networking/store/product/MAM1Q00A-QSA28/NVIDIAMAM1Q00AQSA28CableAdapter100Gbsto25GbsQSFP28toSFP28/). :::{note} diff --git a/docs/examples/byom.md b/docs/examples/byom.md index b6fc7072..0f3ac04f 100644 --- a/docs/examples/byom.md +++ b/docs/examples/byom.md @@ -61,15 +61,14 @@ You can also follow along using your own dataset by adjusting the operator param The video stream replayer supports reading video files that are encoded as gxf entities. These files are provided with the ultrasound dataset as the `ultrasound_256x256.gxf_entities` and `ultrasound_256x256.gxf_index` files. :::{note} -To use your own video data, you can use the `convert_video_to_gxf_entities.py` script from [here](https://github.com/nvidia-holoscan/holoscan-sdk/tree/main/scripts#convert_video_to_gxf_entitiespy) to encode your video. - +To use your own video data, you can use the `convert_video_to_gxf_entities.py` script (installed in `/opt/nvidia/holoscan/bin` or [on GitHub](https://github.com/nvidia-holoscan/holoscan-sdk/tree/main/scripts#convert_video_to_gxf_entitiespy)) to encode your video. Note that - using this script - the metadata in the generated GXF tensor files will indicate that the data should be copied to the GPU on read. ::: ### Input model Currently, the inference operators in Holoscan are able to load [ONNX models](https://onnx.ai/), or [TensorRT](https://developer.nvidia.com/tensorrt) engine files built for the GPU architecture on which you will be running the model. TensorRT engines are automatically generated from ONNX by the operators when the applications run. -If you are converting your model from PyTorch to ONNX, chances are your input is NCHW and will need to be converted to NHWC. We provide an example [transformation script on Github](https://github.com/nvidia-holoscan/holoscan-sdk/tree/main/scripts#graph_surgeonpy) named `graph_surgeon.py`. You may need to modify the dimensions as needed before modifying your model. +If you are converting your model from PyTorch to ONNX, chances are your input is NCHW and will need to be converted to NHWC. We provide an example transformation script named `graph_surgeon.py`, installed in `/opt/nvidia/holoscan/bin` or available [on GitHub](https://github.com/nvidia-holoscan/holoscan-sdk/tree/main/scripts#graph_surgeonpy). You may need to modify the dimensions as needed before modifying your model. :::{tip} To get a better understanding of your model, and if this step is necessary, websites such as [netron.app](https://netron.app/) can be used. diff --git a/docs/examples/ping_custom_op.md b/docs/examples/ping_custom_op.md index 083bc40f..f750d55c 100644 --- a/docs/examples/ping_custom_op.md +++ b/docs/examples/ping_custom_op.md @@ -19,28 +19,18 @@ The example source code and run instructions can be found in the [examples](http Here is the diagram of the operators and workflow used in this example. -```{mermaid} +```{digraph} custom_op :align: center :caption: A linear workflow with new custom operator -%%{init: {"theme": "base", "themeVariables": { "fontSize": "16px"}} }%% + rankdir="LR" + node [shape=record]; -classDiagram - direction LR - - PingTxOp --|> PingMxOp : out...in - PingMxOp --|> PingRxOp : out...in - - class PingTxOp { - out(out) int - } - class PingMxOp { - [in]in: int - out(out) int - } - class PingRxOp { - [in]in: int - } + tx [label="PingTxOp| |out(out) : int"]; + mx [label="PingMxOp| [in]in : int | out(out) : int "]; + rx [label="PingRxOp| [in]in : int | "]; + tx -> mx [label="out...in"] + mx -> rx [label="out...in"] ``` Compared to the previous example, we are adding a new **PingMxOp** operator between the diff --git a/docs/examples/ping_multi_port.md b/docs/examples/ping_multi_port.md index 977c6f99..df56538c 100644 --- a/docs/examples/ping_multi_port.md +++ b/docs/examples/ping_multi_port.md @@ -17,33 +17,20 @@ The example source code and run instructions can be found in the [examples](http Here is the diagram of the operators and workflow used in this example. -```{mermaid} +```{digraph} ping_multi_port :align: center :caption: A workflow with multiple inputs and outputs -%%{init: {"theme": "base", "themeVariables": { "fontSize": "16px"}} }%% - -classDiagram - direction LR - - PingTxOp --|> PingMxOp : out1...in1 - PingTxOp --|> PingMxOp : out2...in2 - PingMxOp --|> PingRxOp : out1...receivers - PingMxOp --|> PingRxOp : out2...receivers - - class PingTxOp { - out1(out) ValueData - out2(out) ValueData - } - class PingMxOp { - [in]in1 : ValueData - [in]in2 : ValueData - out1(out) ValueData - out2(out) ValueData - } - class PingRxOp { - [in]receivers : ValueData - } + rankdir="LR" + node [shape=record]; + + tx [label="PingTxOp| |out1(out) : ValueData\nout2(out) : ValueData"]; + mx [label="PingMxOp|[in]in1 : ValueData\n[in]in2 : ValueData|out1(out) : ValueData\nout2(out) : ValueData"]; + rx [label="PingRxOp|[in]receivers : ValueData | "]; + tx -> mx [label="out1...in1"] + tx -> mx [label="out2...in2"] + mx -> rx [label="out1...receivers"] + mx -> rx [label="out2...receivers"] ``` In this example, `PingTxOp` sends a stream of odd integers to the `out1` port, and even integers to the `out2` port. `PingMxOp` receives these values using `in1` and `in2` ports, multiplies them by a constant factor, then forwards them to a single port - `receivers` - on `PingRxOp`. @@ -494,6 +481,6 @@ Running the application should give you output similar to the following in your ``` :::{note} -Depending on your log level you may see more or fewer messages. The output above was generated using the default value of `INFO`. +Depending on your log level you may see more or fewer messages. The output above was generated using the default value of `INFO`. Refer to the {ref}`Logging` section for more details on how to set the log level. ::: diff --git a/docs/examples/ping_simple.md b/docs/examples/ping_simple.md index 8546ba9d..a8134b75 100644 --- a/docs/examples/ping_simple.md +++ b/docs/examples/ping_simple.md @@ -16,23 +16,17 @@ The example source code and run instructions can be found in the [examples](http Here is a example workflow involving two operators that are connected linearly. -```{mermaid} +```{digraph} ping_simple :align: center :caption: A linear workflow -%%{init: {"theme": "base", "themeVariables": { "fontSize": "16px"}} }%% + rankdir="LR" -classDiagram - direction LR + node [shape=record]; - PingTxOp --|> PingRxOp : out...in - - class PingTxOp { - out(out) int - } - class PingRxOp { - [in]in : int - } + tx [label="PingTxOp| |out(out) : int"]; + rx [label="PingRxOp|[in]in : int | "]; + tx -> rx [label="out...in"] ``` In this example, the source operator **PingTxOp** produces integers from 1 to 10 and passes it to the sink operator **PingRxOp** which prints the integers to standard output. diff --git a/docs/examples/video_replayer.md b/docs/examples/video_replayer.md index f1011042..46945749 100644 --- a/docs/examples/video_replayer.md +++ b/docs/examples/video_replayer.md @@ -19,23 +19,17 @@ The example source code and run instructions can be found in the [examples](http Here is the diagram of the operators and workflow used in this example. -```{mermaid} +```{digraph} video_replayer :align: center :caption: Workflow to load and display video from a file -%%{init: {"theme": "base", "themeVariables": { "fontSize": "16px"}} }%% + rankdir="LR" + node [shape=record]; -classDiagram - direction LR + replayer [label="VideoStreamReplayerOp| |output(out) : Tensor"]; + viz [label="HolovizOp|[in]receivers : Tensor | "]; - VideoStreamReplayerOp --|> HolovizOp : output...receivers - - class VideoStreamReplayerOp { - output(out) Tensor - } - class HolovizOp { - [in]receivers : Tensor - } + replayer -> viz [label="output...receivers"] ``` We connect the "output" port of the replayer operator to the "receivers" port of the Holoviz @@ -43,7 +37,7 @@ operator. ## Video Stream Replayer Operator -The built-in video stream replayer operator can be used to replay a video stream that has been encoded as gxf entities. You can use the [convert_video_to_gxf_entities.py](https://github.com/nvidia-holoscan/holoscan-sdk/tree/main/scripts#convert_video_to_gxf_entitiespy) script to encode a video file as gxf entities for use by this operator. +The built-in video stream replayer operator can be used to replay a video stream that has been encoded as gxf entities. You can use the `convert_video_to_gxf_entities.py` script (installed in `/opt/nvidia/holoscan/bin` or available [on GitHub](https://github.com/nvidia-holoscan/holoscan-sdk/tree/main/scripts#convert_video_to_gxf_entitiespy)) to encode a video file as gxf entities for use by this operator. This operator processes the encoded file sequentially and supports realtime, faster than realtime, or slower than realtime playback of prerecorded data. The input data can optionally be repeated to loop forever or only for a specified count. For more details, see {ref}`operators-video-stream-replayer`. diff --git a/docs/examples/video_replayer_distributed.md b/docs/examples/video_replayer_distributed.md index 4c0cff47..f77062f8 100644 --- a/docs/examples/video_replayer_distributed.md +++ b/docs/examples/video_replayer_distributed.md @@ -18,23 +18,17 @@ The example source code and run instructions can be found in the [examples](http Here is the diagram of the operators and workflow used in this example. -```{mermaid} +```{digraph} video_replayer_distributed :align: center :caption: Workflow to load and display video from a file -%%{init: {"theme": "base", "themeVariables": { "fontSize": "16px"}} }%% + rankdir="LR" + node [shape=record]; -classDiagram - direction LR + replayer [label="VideoStreamReplayerOp| |output(out) : Tensor"]; + viz [label="HolovizOp|[in]receivers : Tensor | "]; - VideoStreamReplayerOp --|> HolovizOp : output...receivers - - class VideoStreamReplayerOp { - output(out) Tensor - } - class HolovizOp { - [in]receivers : Tensor - } + replayer -> viz [label="output...receivers"] ``` This is the same workflow as the [single fragment video replayer](./video_replayer.md), each operator is assigned to a separate fragment and there is now a network connection between the fragments. diff --git a/docs/holoscan_create_app.md b/docs/holoscan_create_app.md index 013fbd7a..61d8850e 100644 --- a/docs/holoscan_create_app.md +++ b/docs/holoscan_create_app.md @@ -86,21 +86,21 @@ It is also possible to instead launch the application asynchronously (i.e. non-b `````{tab-set} ````{tab-item} C++ -This can be done simply by replacing the call to {cpp:func}`run()` with {cpp:func}`run_async()` which returns a `std::future`. Calling `future.wait()` will block until the application has finished running. +This can be done simply by replacing the call to {cpp:func}`run()` with {cpp:func}`run_async()` which returns a `std::future`. Calling `future.get()` will block until the application has finished running and throw an exception if a runtime error occurred during execution. ```{code-block} cpp :emphasize-lines: 3-4 :name: holoscan-app-skeleton-cpp-async int main() { auto app = holoscan::make_application(); - future = app->run_async(); - future.wait(); + auto future = app->run_async(); + future.get(); return 0; } ``` ```` ````{tab-item} Python -This can be done simply by replacing the call to {py:func}`run()` with {py:func}`run_async()` which returns a Python `concurrent.futures.Future`. Calling `future.result()` will block until the application has finished running. +This can be done simply by replacing the call to {py:func}`run()` with {py:func}`run_async()` which returns a Python `concurrent.futures.Future`. Calling `future.result()` will block until the application has finished running and raise an exception if a runtime error occurred during execution. ```{code-block} python :emphasize-lines: 3-4 :name: holoscan-app-skeleton-python-async @@ -521,7 +521,7 @@ def compose(self): ```` :::{note} -Python operators that wrap an underlying C++ operator currently do not accept resources as positional arguments. Instead one needs to call the {py:func}`add_arg()` method after the object has been constructed to add the resource. +Python operators that wrap an underlying C++ operator currently do not accept resources as positional arguments. Instead one needs to call the {py:func}`add_arg()` method after the object has been constructed to add the resource. ::: (configuring-app-scheduler)= @@ -532,7 +532,7 @@ The [scheduler](./components/schedulers.md) controls how the application schedul The default scheduler is a single-threaded [`GreedyScheduler`](./components/schedulers.md#greedy-scheduler). An application can be configured to use a different scheduler `Scheduler` ({cpp:class}`C++ `/{py:class}`Python `) or change the parameters from the default scheduler, using the `scheduler()` function ({cpp:func}`C++ `/{py:func}`Python `). -For example, if an application needs to run multiple operators in parallel, a [`MultiThreadScheduler`](./components/schedulers.md#multithreadscheduler) can instead be used. +For example, if an application needs to run multiple operators in parallel, the [`MultiThreadScheduler`](./components/schedulers.md#multithreadscheduler) or [`EventBasedScheduler`](./components/schedulers.md#eventbasedscheduler) can instead be used. The difference between the two is that the MultiThreadScheduler is based on actively polling operators to determine if they are ready to execute, while the EventBasedScheduler will instead wait for an event indicating that an operator is ready to execute. The code snippet belows shows how to set and configure a non-default scheduler: @@ -545,7 +545,7 @@ The code snippet belows shows how to set and configure a non-default scheduler: :name: holoscan-config-scheduler-cpp auto app = holoscan::make_application(); -auto scheduler = app->make_scheduler( +auto scheduler = app->make_scheduler( "myscheduler", Arg("worker_thread_number", 4), Arg("stop_on_deadlock", true) @@ -562,7 +562,7 @@ app->run(); :name: holoscan-config-scheduler-python app = App() -scheduler = holoscan.schedulers.MultiThreadScheduler( +scheduler = holoscan.schedulers.EventBasedScheduler( app, name="myscheduler", worker_thread_number=4, @@ -600,17 +600,14 @@ instantiation and execution order of the operators. The simplest form of a workflow would be a single operator. -```{mermaid} +```{digraph} myop :align: center :caption: A one-operator workflow -%%{init: {"theme": "base", "themeVariables": { "fontSize": "16px"}} }%% + rankdir="LR" + node [shape=record]; -classDiagram - direction LR - - class MyOp { - } + myop [label="MyOp| | "]; ``` The graph above shows an **Operator** ({cpp:class}`C++ `/{py:class}`Python `) (named `MyOp`) that has neither inputs nor output ports. @@ -658,28 +655,18 @@ class App(Application): Here is an example workflow where the operators are connected linearly: -```{mermaid} +```{digraph} linear_workflow :align: center :caption: A linear workflow -%%{init: {"theme": "base", "themeVariables": { "fontSize": "16px"}} }%% - -classDiagram - direction LR + rankdir="LR" + node [shape=record]; - SourceOp --|> ProcessOp : output...input - ProcessOp --|> SinkOp : output...input - - class SourceOp { - output(out) Tensor - } - class ProcessOp { - [in]input : Tensor - output(out) Tensor - } - class SinkOp { - [in]input : Tensor - } + sourceop [label="SourceOp| |output(out) : Tensor"]; + processop [label="ProcessOp| [in]input : Tensor | output(out) : Tensor "]; + sinkop [label="SinkOp| [in]input : Tensor | "]; + sourceop -> processop [label="output...input"] + processop -> sinkop [label="output...input"] ``` In this example, **SourceOp** produces a message and passes it to **ProcessOp**. **ProcessOp** produces another message and passes it to **SinkOp**. @@ -738,53 +725,27 @@ class App(Application): You can design a complex workflow like below where some operators have multi-inputs and/or multi-outputs: -```{mermaid} +```{digraph} complex_workflow :align: center :caption: A complex workflow (multiple inputs and outputs) -%%{init: {"theme": "base", "themeVariables": { "fontSize": "16px"}} }%% - -classDiagram - direction TB - - Reader1 --|> Processor1 : image...{image1,image2}\nmetadata...metadata - Reader2 --|> Processor2 : roi...roi - Processor1 --|> Processor2 : image...image - Processor2 --|> Processor3 : image...image - Processor2 --|> Notifier : image...image - Processor1 --|> Writer : image...image - Processor3 --|> Writer : seg_image...seg_image - - class Reader1 { - image(out) - metadata(out) - } - class Reader2 { - roi(out) - } - class Processor1 { - [in]image1 - [in]image2 - [in]metadata - image(out) - } - class Processor2 { - [in]image - [in]roi - image(out) - } - class Processor3 { - [in]image - seg_image(out) - } - class Writer { - [in]image - [in]seg_image - } - class Notifier { - [in]image - } - + node [shape=record]; + + reader1 [label="{Reader1| |image(out)\nmetadata(out)}"]; + reader2 [label="{Reader2| |roi(out)}"]; + processor1 [label="{Processor1|[in]image1\n[in]image2\n[in]metadata|image(out)}"]; + processor2 [label="{Processor2|[in]image\n[in]roi|image(out)}"]; + processor3 [label="{Processor3|[in]image|seg_image(out)}"]; + writer [label="{Writer|[in]image\n[in]seg_image| }"]; + notifier [label="{Notifier|[in]image| }"]; + + reader1->processor1 [label="image...{image1,image2}\nmetadata...metadata"] + reader2->processor2 [label="roi...roi"] + processor1->processor2 [label="image...image"] + processor1->writer [label="image...image"] + processor2->notifier [label="image...image"] + processor2->processor3 [label="image...image"] + processor3->writer [label="seg_image...seg_image"] ``` diff --git a/docs/holoscan_create_distributed_app.md b/docs/holoscan_create_distributed_app.md index f11e359f..2e698de0 100644 --- a/docs/holoscan_create_distributed_app.md +++ b/docs/holoscan_create_distributed_app.md @@ -238,7 +238,7 @@ You can set environment variables to modify the default actions of services and - **HOLOSCAN_HEALTH_CHECK_PORT** : designates the port number on which the Health Checking Service is launched. It must be an integer value representing a valid port number. If unspecified, it defaults to `8777`. -- **HOLOSCAN_DISTRIBUTED_APP_SCHEDULER** : controls which scheduler is used for distributed applications. It can be set to either `greedy` or `multithread`. If unspecified, the default scheduler is `multithread`. +- **HOLOSCAN_DISTRIBUTED_APP_SCHEDULER** : controls which scheduler is used for distributed applications. It can be set to either `greedy`, `multi_thread` or `event_based`. `multithread` is also allowed as a synonym for `multi_thread` for backwards compatibility. If unspecified, the default scheduler is `multi_thread`. - **HOLOSCAN_STOP_ON_DEADLOCK** : can be used in combination with `HOLOSCAN_DISTRIBUTED_APP_SCHEDULER` to control whether or not the application will automatically stop on deadlock. Values of "True", "1" or "ON" will be interpreted as true (enable stop on deadlock). It is true if unspecified. This environment variable is only used when `HOLOSCAN_DISTRIBUTED_APP_SCHEDULER` is explicitly set. @@ -294,6 +294,120 @@ A table of the types that have codecs pre-registered so that they can be seriali | GXF-specific types | nvidia::gxf::TimeStamp, nvidia::gxf::EndOfStream | +:::{warning} +If an operator transmitting both CPU and GPU tensors is to be used in distributed applications, the same output port cannot mix both GPU and CPU tensors. CPU and GPU tensor outputs should be placed on separate output ports. This is a limitation of the underlying UCX library being used for zero-copy tensor serialization between operators. + +As a concrete example, assume an operator, `MyOperator` with a single output port named "out" defined in it's setup method. If the output port is only ever going to connect to other operators within a fragment, but never across fragments then it is okay to have a `TensorMap` with a mixture of host and device arrays on that single port. + +`````{tab-set} +````{tab-item} C++ + +```cpp +void MyOperator::setup(OperatorSpec& spec) { + spec.output("out"); +} + +void MyOperator::compute(OperatorSpec& spec) { + + // omitted: some computation resulting in multiple holoscan::Tensors + // (two on CPU ("cpu_coords_tensor" and "cpu_metric_tensor") and one on device ("gpu_tensor"). + + TensorMap out_message; + + // insert all tensors in one TensorMap (mixing CPU and GPU tensors is okay when ports only connect within a Fragment) + out_message.insert({"coordinates", cpu_coords_tensor}); + out_message.insert({"metrics", cpu_metric_tensor}); + out_message.insert({"mask", gpu_tensor}); + + op_output.emit(out_message, "out"); +} + +``` + +```` +````{tab-item} Python + +```python +class MyOperator: + + def setup(self, spec: OperatorSpec): + spec.output("out") + + + def compute(self, op_input, op_output, context): + # Omitted: assume some computation resulting in three holoscan::Tensor or tensor-like + # objects. Two on CPU ("cpu_coords_tensor" and "cpu_metric_tensor") and one on device + # ("gpu_tensor"). + + # mixing CPU and GPU tensors in a single dict is okay only for within-Fragment connections + op_output.emit( + dict( + coordinates=cpu_coords_tensor, + metrics=cpu_metrics_tensor, + mask=gpu_tensor, + ), + "out" + ) +``` +````` + +However, this mixing of CPU and GPU arrays on a single port will not work for distributed apps and instead separate ports should be used if it is necessary for an operator to communicate across fragments. + +`````{tab-set} +````{tab-item} C++ + +```cpp +void MyOperator::setup(OperatorSpec& spec) { + spec.output("out_host"); + spec.output("out_device"); +} + +void MyOperator::compute(OperatorSpec& spec) { + + // some computation resulting in a pair of holoscan::Tensor, one on CPU ("cpu_tensor") and one on device ("gpu_tensor"). + TensorMap out_message_host; + TensorMap out_message_device; + + // put all CPU tensors on one port + out_message_host.insert({"coordinates", cpu_coordinates_tensor}); + out_message_host.insert({"metrics", cpu_metrics_tensor}); + op_output.emit(out_message_host, "out_host"); + + // put all GPU tensors on another + out_message_device.insert({"mask", gpu_tensor}); + op_output.emit(out_message_device, "out_device"); +} +``` + +```` +````{tab-item} Python + +```python +class MyOperator: + + def setup(self, spec: OperatorSpec): + spec.output("out_host") + spec.output("out_device") + + + def compute(self, op_input, op_output, context): + # Omitted: assume some computation resulting in three holoscan::Tensor or tensor-like + # objects. Two on CPU ("cpu_coords_tensor" and "cpu_metric_tensor") and one on device + # ("gpu_tensor"). + + # split CPU and GPU tensors across ports for compatibility with inter-fragment communication + op_output.emit( + dict(coordinates=cpu_coords_tensor, metrics=cpu_metrics_tensor), + "out_host" + ) + op_output.emit(dict(mask=gpu_tensor), "out_device") +``` + +```` +````` +::: + + ### Python For the Python API, any array-like object supporting the [DLPack](https://dmlc.github.io/dlpack/latest/) interface, [`__array_interface__`](https://numpy.org/doc/stable/reference/arrays.interface.html) or [`__cuda_array_interface__`](https://numba.readthedocs.io/en/stable/cuda/cuda_array_interface.html) will be transmitted using {py:class}`~holoscan.core.Tensor` serialization. This is done to avoid data copies for performance reasons. Objects of type `list[holoscan.HolovizOp.InputSpec]` will be sent using the underlying C++ serializer for `std::vector`. All other Python objects will be serialized to/from a `std::string` using the [cloudpickle](https://github.com/cloudpipe/cloudpickle) library. diff --git a/docs/holoscan_create_operator.md b/docs/holoscan_create_operator.md index 0d5c6598..f7a9f485 100644 --- a/docs/holoscan_create_operator.md +++ b/docs/holoscan_create_operator.md @@ -37,21 +37,20 @@ We will cover how to use {ref}`Conditions compute - compute --> compute - compute --> stop + start [label="start"] + compute [label="compute"] + stop [label="stop"] + start -> compute + compute -> compute + compute -> stop ``` We can override the default behavior of the operator by implementing the above methods. The following example shows how to implement a custom operator that overrides start, stop and compute methods. @@ -689,7 +688,7 @@ components: parameters: allocator: allocator - name: entity_serializer - type: nvidia::holoscan::stream_playback::VideoStreamSerializer # inheriting from nvidia::gxf::EntitySerializer + type: nvidia::gxf::StdEntitySerializer parameters: component_serializers: [component_serializer] - type: MyRecorder @@ -738,7 +737,7 @@ void MyRecorderOp::initialize() { // Set up prerequisite parameters before calling GXFOperator::initialize() auto frag = fragment(); auto serializer = - frag->make_resource("serializer"); + frag->make_resource("serializer"); add_arg(Arg("serializer") = serializer); GXFOperator::initialize(); @@ -765,7 +764,7 @@ components: parameters: allocator: allocator - name: entity_serializer - type: nvidia::holoscan::stream_playback::VideoStreamSerializer # inheriting from nvidia::gxf::EntitySerializer + type: nvidia::gxf::StdEntitySerializer parameters: component_serializers: [component_serializer] - type: MyRecorder @@ -781,7 +780,7 @@ components: ``` :::{note} -The Holoscan C++ API already provides the {cpp:class}`holoscan::VideoStreamSerializer` class which wraps the `nvidia::holoscan::stream_playback::VideoStreamSerializer` GXF component, used here as `serializer`. +The Holoscan C++ API already provides the {cpp:class}`holoscan::StdEntitySerializer` class which wraps the `nvidia::gxf::StdEntitySerializer` GXF component, used here as `serializer`. ::: #### Building your GXF operator @@ -812,28 +811,19 @@ Supporting Tensor Interoperability Consider the following example, where `GXFSendTensorOp` and `GXFReceiveTensorOp` are GXF operators, and where `ProcessTensorOp` is a C++ native operator: -```{mermaid} +```{digraph} interop :align: center :caption: The tensor interoperability between C++ native operator and GXF operator -%%{init: {"theme": "base", "themeVariables": { "fontSize": "16px"}} }%% - -classDiagram - direction LR + rankdir="LR" + node [shape=record]; - GXFSendTensorOp --|> ProcessTensorOp : signal...in - ProcessTensorOp --|> GXFReceiveTensorOp : out...signal + source [label="GXFSendTensorOp| |signal(out) : Tensor"]; + process [label="ProcessTensorOp| [in]in : TensorMap | out(out) : TensorMap "]; + sink [label="GXFReceiveTensorOp| [in]signal : Tensor | "]; - class GXFSendTensorOp { - signal(out) Tensor - } - class ProcessTensorOp { - [in]in : TensorMap - out(out) TensorMap - } - class GXFReceiveTensorOp { - [in]signal : Tensor - } + source->process [label="signal...in"] + process->sink [label="out...signal"] ``` The following code shows how to implement `ProcessTensorOp`'s `compute()` method as a C++ native operator communicating with GXF operators. Focus on the use of the `holoscan::gxf::Entity`: @@ -919,21 +909,21 @@ We will cover how to use {py:mod}`Conditions ` in the {ref} Typically, the `start()` and the `stop()` functions are only called once during the application's lifecycle. However, if the scheduling conditions are met again, the operator can be scheduled for execution, and the `start()` method will be called again. -```{mermaid} +```{digraph} lifecycle2 :align: center :caption: The sequence of method calls in the lifecycle of a Holoscan Operator -%%{init: {"theme": "base", "themeVariables": { "fontSize": "16px"}} }%% + rankdir="LR" -flowchart LR - start(start) - stop(stop) - compute(compute) + node [shape=Mrecord]; - start --> compute - compute --> compute - compute --> stop + start [label="start"] + compute [label="compute"] + stop [label="stop"] + start -> compute + compute -> compute + compute -> stop ``` We can override the default behavior of the operator by implementing the above methods. The following example shows how to implement a custom operator that overrides start, stop and compute methods. @@ -1572,28 +1562,19 @@ As described in the {ref}`Interoperability between GXF and native C++ operators< Consider the following example, where `VideoStreamReplayerOp` and `HolovizOp` are Python wrapped C++ operators, and where `ImageProcessingOp` is a Python native operator: -```{mermaid} +```{digraph} interop2 :align: center :caption: The tensor interoperability between Python native operator and C++\-based Python GXF operator -%%{init: {"theme": "base", "themeVariables": { "fontSize": "16px"}} }%% + rankdir="LR" + node [shape=record]; -classDiagram - direction LR + video [label="VideoStreamReplayerOp| |output_tensor(out) : Tensor"]; + processop [label="ImageProcessingOp| [in]input_tensor : dict[str,Tensor] | output_tensor(out) : dict[str,Tensor]"]; + viz [label="HolovizOp| [in]receivers : Tensor | "]; - VideoStreamReplayerOp --|> ImageProcessingOp : output_tensor...input_tensor - ImageProcessingOp --|> HolovizOp : output_tensor...receivers - - class VideoStreamReplayerOp { - output_tensor(out) Tensor - } - class ImageProcessingOp { - [in]input_tensor : dict[str,Tensor] - output_tensor(out) dict[str,Tensor] - } - class HolovizOp { - [in]receivers : Tensor - } + video->processop [label="output_tensor...input_tensor"] + processop->viz [label="output_tensor...receivers"] ``` The following code shows how to implement `ImageProcessingOp`'s `compute()` method as a Python native operator communicating with C++ operators: diff --git a/docs/holoscan_logging.md b/docs/holoscan_logging.md index 0872ce42..0e5f6c26 100644 --- a/docs/holoscan_logging.md +++ b/docs/holoscan_logging.md @@ -69,11 +69,6 @@ Under the hood, Holoscan SDK uses GXF to execute the computation graph. By defau For distributed applications, it can sometimes be useful to also enable additional logging for the UCX library used to transmit data between fragments. This can be done by setting the UCX environment variable `UCX_LOG_LEVEL` to one of: fatal, error, warn, info, debug, trace, req, data, async, func, poll. These have the behavior as described here: [UCX log levels](https://github.com/openucx/ucx/blob/v1.14.0/src/ucs/config/types.h#L16C1-L31). ::: -#### Precedence -If the `HOLOSCAN_LOG_LEVEL` environment variable is set, this setting is used to set the logging level. If the -environment variable is not set, then the application setting is used if available. If not, the SDK default setting -of INFO is used as the logging level. - ## Logger Format When a message is printed out, the default message format shows the message severity level, filename:linenumber, and @@ -144,9 +139,15 @@ For more details on custom formatting and details of each flag, please see the [ Additionally, at runtime, the user can also set the `HOLOSCAN_LOG_FORMAT` environment variable to modify the logger format. The accepted string pattern is the same as the string pattern for the `set_log_pattern()` api mentioned above. -:::{note} -If the `HOLOSCAN_LOG_FORMAT` environment variable is set, this setting is used to set the logger format. If the environment variable is not set, then the application setting is used if available. If not, the SDK default message format is used. -::: +#### Precedence of Logger Level and Logger Format + +The `HOLOSCAN_LOG_LEVEL` environment variable takes precedence and overrides the application settings, such as `Logger::set_log_level()` ({cpp:func}`C++ `/{py:func}`Python `). + +When `HOLOSCAN_LOG_LEVEL` is set, it determines the logging level. If this environment variable is unset, the application settings are used if they are available. Otherwise, the SDK's default logging level of INFO is applied. + +Similarly, the `HOLOSCAN_LOG_FORMAT` environment variable takes precedence and overrides the application settings, such as `Logger::set_log_pattern()` ({cpp:func}`C++ `/{py:func}`Python `). + +When `HOLOSCAN_LOG_FORMAT` is set, it determines the logging format. If this environment variable is unset, the application settings are used if they are available. Otherwise, the SDK's default logging format depending on the current log level (`FULL` format for `DEBUG` and `TRACE` log levels. `DEFAULT` format for other log levels) is applied. ## Calling the Logger in Your Application diff --git a/docs/holoscan_operators_extensions.md b/docs/holoscan_operators_extensions.md index 2734e1ba..350829a8 100644 --- a/docs/holoscan_operators_extensions.md +++ b/docs/holoscan_operators_extensions.md @@ -47,9 +47,7 @@ ___ ## Extensions The Holoscan SDK also includes some GXF extensions with GXF codelets, which are typically wrapped as operators, or present for legacy reasons. In addition to the core GXF extensions (std, cuda, serialization, multimedia) listed [here](gxf/doc/index.md), the Holoscan SDK includes the following GXF extensions: -- [bayer_demosaic](#bayer-demosaic) - [gxf_holoscan_wrapper](#gxf-holoscan-wrapper) -- [stream_playback](#stream-playback) - [ucx_holoscan](#ucx-holoscan) ### GXF Holoscan Wrapper @@ -58,16 +56,6 @@ The `gxf_holoscan_wrapper` extension includes the `holoscan::gxf::OperatorWrappe Learn more about it in the [Using Holoscan Operators in GXF Applications](gxf/gxf_wrap_holoscan_op.md) section. -### Stream Playback - -The `stream_playback` extension includes the `nvidia::holoscan::stream_playback::VideoStreamSerializer` entity serializer to/from a Tensor Object. -This extension does not include any codelets: reading and writing video stream (gxf entity files) from the disk was implemented as native operators with `VideoStreamRecorderOp` and `VideoStreamReplayerOp`, though they leverage the `VideoStreamSerializer` from this extension. - -:::{note} -The `VideoStreamSerializer` codelet is based on the `nvidia::gxf::StdEntitySerializer` with the addition of a `repeat` feature. -(If the `repeat` parameter is `true` and the frame count is out of the maximum frame index, unnecessary warning messages are printed with `nvidia::gxf::StdEntitySerializer`.) -::: - (ucx-holoscan)= ### UCX (Holoscan) @@ -81,4 +69,4 @@ ___ ### HoloHub -Visit the HoloHub repository to find a collection of additional Holoscan operators and extensions. +Visit the [HoloHub repository](https://github.com/nvidia-holoscan/holohub) to find a collection of additional Holoscan operators and extensions. diff --git a/docs/holoscan_packager.md b/docs/holoscan_packager.md index a48741ee..23ca55f1 100644 --- a/docs/holoscan_packager.md +++ b/docs/holoscan_packager.md @@ -52,34 +52,43 @@ Ensure the following are installed in the environment where you want to run the The Holoscan CLI is installed as part of the Holoscan SDK and can be called with the following instructions depending on your installation: -**If installed as a python wheel** + +`````{tab-set} +````{tab-item} Python Wheel - In a virtual environment: the `holoscan` CLI should already be in the PATH -- Globally: ensure that `$HOME/.local/bin` is added to your `PATH`. Run the following command make it available across sessions: +- System python: ensure that `$HOME/.local/bin` is added to your `PATH`. If using bash, the following command will make it persist across sessions: ```bash echo 'export PATH=$HOME/.local/bin:$PATH' >> ~/.bashrc ``` -**If installed as a debian package** +```` +````{tab-item} Debian Package -Ensure that `/opt/nvidia/holoscan/` is added to your `PATH`. Run the following command make it available across sessions: +Ensure that `/opt/nvidia/holoscan/` is added to your `PATH`. If using bash, the following command will make it persist across sessions: ```bash echo 'alias holoscan=/opt/nvidia/holoscan/bin/holoscan' >> ~/.bashrc ``` -**If built or installed from source (local only)** +```` +````{tab-item} From source + +If building the SDK from source and starting the build container with `run launch`, the `holoscan` CLI should already be in the PATH. -Ensure that `${BUILD_OR_INSTALL_DIR}/bin` is added to your `PATH`. Run the following command make it available across sessions: +If building bare-metal (advanced), ensure that `/bin` is added to your `PATH`. If using bash, the following command will make it persist across sessions: ```bash -echo 'alias holoscan=${BUILD_OR_INSTALL_DIR}/bin/holoscan' >> ~/.bashrc +echo 'alias holoscan=/bin/holoscan' >> ~/.bashrc ``` -:::{warning} -The Holoscan CLI is not available inside the [NGC Container](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/clara-holoscan/containers/holoscan) nor the development container (from source). -::: +```` + +````{tab-item} NGC Container +The NGC container has the CLI installed already, no additional steps are required. +```` +````` ## Package an application diff --git a/docs/latency_tool.rst b/docs/latency_tool.rst index bf5c72f4..793cd02a 100644 --- a/docs/latency_tool.rst +++ b/docs/latency_tool.rst @@ -3,13 +3,13 @@ Video Pipeline Latency Tool =========================== -The Holoscan Developer Kits excel as a high-performance computing platform +The NVIDIA Developer Kits excel as a high-performance computing platform by combining high-bandwidth video I/O components and the compute capabilities of an NVIDIA GPU to meet the needs of the most demanding video processing and inference applications. For many video processing applications located at the edge--especially -those designed to augment medical instruments and aid live medical +those designed to augment medical instruments and aid live medical procedures--minimizing the latency added between image capture and display, often referred to as the end-to-end latency, is of the utmost importance. @@ -22,7 +22,7 @@ capture and display is incorporated as this usually involves external capture hardware (e.g. cameras and other sensors) and displays. In order to establish a baseline measurement of the minimal end-to-end latency -that can be achieved with the Holoscan Developer Kits and various video I/O +that can be achieved with the NVIDIA Developer Kits and various video I/O hardware and software components, the Holoscan SDK includes a sample latency measurement tool. @@ -32,7 +32,7 @@ Requirements Hardware ^^^^^^^^ -The latency measurement tool requires the use of a Holoscan Developer Kit in +The latency measurement tool requires the use of a NVIDIA Developer Kit in dGPU mode, and operates by having an output component generate a sequence of known video frames that are then transferred back to an input component using a physical loopback cable. @@ -203,7 +203,7 @@ GPU To Onboard HDMI Capture Card In this configuration, a DisplayPort to HDMI cable is connected from the GPU to the onboard HDMI capture card. This configuration supports the :ref:`OpenGL ` and :ref:`GStreamer ` producers, and -the :ref:`V4L2 ` and :ref:`GStreamer ` consumers. +the :ref:`V4L2 ` and :ref:`GStreamer ` consumers. .. figure:: images/latency_setup_gpu_to_onboard_hdmi.jpg :align: center @@ -273,7 +273,7 @@ back to an input **consumer** component using a physical loopback cable. Timestamps are compared throughout the life of the frame to measure the overall latency that the frame sees during this process, and these results are summarized when all of the frames have been received and the measurement -completes. See `Producers`_, `Consumers`_, and `Example Configurations`_ for +completes. See `Producers`_, `Consumers`_, and `Example Configurations`_ for more details. Frame Measurements @@ -678,7 +678,7 @@ V4L2 (Onboard HDMI Capture Card) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ This consumer (:code:`v4l2`) uses the V4L2 API directly in order to capture -frames using the HDMI capture card that is onboard the Holoscan Developer Kits. +frames using the HDMI capture card that is onboard some of the NVIDIA Developer Kits. V4L2 Consumer Notes: diff --git a/docs/overview.md b/docs/overview.md index 14675e6e..540451dc 100644 --- a/docs/overview.md +++ b/docs/overview.md @@ -36,7 +36,7 @@ Packaging and deploying applications is a complex problem that can require large As highlighted in the relevant technologies section, the soul of the Holoscan project is to achieve peak performance by leveraging hardware and software developed at NVIDIA or provided by third-parties. To validate this, Holoscan provides performance tools to help users and developers track their application performance. They currently include: -- a {ref}`Video Pipeline Latency Measurement Tool ` to measure and estimate the total end-to-end latency of a video streaming application including the video capture, processing, and output using various hardware and software components that are supported by the Holoscan Developer Kits. +- a {ref}`Video Pipeline Latency Measurement Tool ` to measure and estimate the total end-to-end latency of a video streaming application including the video capture, processing, and output using various hardware and software components that are supported by the NVIDIA Developer Kits. - the [Data Flow Tracking](./flow_tracking.md) feature to profile your application and analyze the data flow between operators in its graph. 8. **Documentation** diff --git a/docs/relevant_technologies.md b/docs/relevant_technologies.md index e0306c76..168ada47 100644 --- a/docs/relevant_technologies.md +++ b/docs/relevant_technologies.md @@ -13,7 +13,7 @@ The Holoscan SDK relies on multiple core technologies to achieve low latency and (gpudirect_rdma)= ## Rivermax and GPUDirect RDMA -The Holoscan Developer Kits can be used along with the [NVIDIA Rivermax SDK](https://developer.nvidia.com/networking/rivermax) to provide an extremely efficient network connection using the onboard [ConnectX](https://www.nvidia.com/en-us/networking/ethernet-adapters/) network adapter that is further optimized for GPU workloads by using [GPUDirect](https://developer.nvidia.com/gpudirect) for RDMA. This technology avoids unnecessary memory copies and CPU overhead by copying data directly to or from pinned GPU memory, and supports both the integrated GPU or the discrete GPU. +The NVIDIA Developer Kits equipped with a [ConnectX network adapter](https://www.nvidia.com/en-us/networking/ethernet-adapters/) can be used along with the [NVIDIA Rivermax SDK](https://developer.nvidia.com/networking/rivermax) to provide an extremely efficient network connection that is further optimized for GPU workloads by using [GPUDirect](https://developer.nvidia.com/gpudirect) for RDMA. This technology avoids unnecessary memory copies and CPU overhead by copying data directly to or from pinned GPU memory, and supports both the integrated GPU or the discrete GPU. :::{note} NVIDIA is also committed to supporting hardware vendors enable RDMA within their own drivers, an example of which is provided by the {ref}`aja_video_systems` as part of a partnership with @@ -38,7 +38,7 @@ GXF will be mentioned in many places across this user guide, including a {ref}`d (tensorrt)= ## TensorRT Optimized Inference -[NVIDIA TensorRT](https://developer.nvidia.com/tensorrt) is a deep learning inference framework based on CUDA that provided the highest optimizations to run on NVIDIA GPUs, including the Holoscan Developer Kits. +[NVIDIA TensorRT](https://developer.nvidia.com/tensorrt) is a deep learning inference framework based on CUDA that provided the highest optimizations to run on NVIDIA GPUs, including the NVIDIA Developer Kits. The {ref}`inference module` leverages TensorRT among other backends, and provides the ability to execute multiple inferences in parallel. diff --git a/docs/sdk_installation.md b/docs/sdk_installation.md index 19639950..7205b595 100644 --- a/docs/sdk_installation.md +++ b/docs/sdk_installation.md @@ -11,14 +11,14 @@ An alternative for the [IGX Orin Developer Kit](https://www.nvidia.com/en-us/edg ## Prerequisites `````{tab-set} -````{tab-item} Holoscan Developer Kits (aarch64) +````{tab-item} NVIDIA Developer Kits Set up your developer kit: Developer Kit | User Guide | OS | GPU Mode ------------- | ---------- | --- | --- [NVIDIA IGX Orin][igx] | [Guide][igx-guide] | [IGX Software][igx-sw] 1.0 DP | iGPU **or*** dGPU -[NVIDIA Jetson AGX Orin and Orin Nano][jetson-orin] | [Guide][jetson-guide] | [JetPack][jp] 6.0 | iGPU +[NVIDIA Jetson AGX Orin and Orin Nano][jetson-orin] | [Guide][jetson-guide] | [JetPack][jp] 6.0 | iGPU [NVIDIA Clara AGX][clara-agx]
_Only supporting the NGC container_ | [Guide][clara-guide] | [HoloPack][sdkm] 1.2 | iGPU **or*** dGPU [clara-agx]: https://www.nvidia.com/en-gb/clara/intelligent-medical-instruments @@ -35,14 +35,33 @@ Developer Kit | User Guide | OS | GPU Mode _* iGPU and dGPU can be used concurrently on a single developer kit in dGPU mode. See [details here](./use_igpu_with_dgpu.md)._ ```` -````{tab-item} x86_64 - -You'll need the following to use the Holoscan SDK on x86_64: -- OS: Ubuntu 22.04 (GLIBC >= 2.35) -- NVIDIA discrete GPU (dGPU) - - Ampere or above recommended for best performance - - [Quadro/NVIDIA RTX](https://www.nvidia.com/en-gb/design-visualization/desktop-graphics/) necessary for RDMA support - - Tested with [NVIDIA Quadro RTX 6000](https://www.nvidia.com/content/dam/en-zz/Solutions/design-visualization/quadro-product-literature/quadro-rtx-6000-us-nvidia-704093-r4-web.pdf) and [NVIDIA RTX A6000](https://www.nvidia.com/en-us/design-visualization/rtx-a6000/) +````{tab-item} NVIDIA SuperChips + +This version of the Holoscan SDK was tested on the Grace-Hopper SuperChip (GH200) with Ubuntu 22.04. Follow setup instructions [here](https://docs.nvidia.com/grace-ubuntu-install-guide.pdf). + +:::{attention} +Display is not supported on SBSA/superchips. You can however do headless rendering with [HoloViz](./visualization.md#holoviz-operator) for example. +::: + +```` +````{tab-item} x86_64 Workstations + +Supported x86_64 distributions: + +OS | NGC Container | Debian/RPM package | Python wheel | Build from source +-- | ------------- | -------------- | ------------ | ----------------- +**Ubuntu 22.04** | Yes | Yes | Yes | Yes +**RHEL 9.x** | Yes | No | No | No¹ +**Other Linux distros** | No² | No | No³ | No¹ + +¹ Not formally tested or supported, but expected to work if building bare metal with the adequate dependencies.
Not formally tested or supported, but expected to work if [supported by the NVIDIA container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/supported-platforms.html).
Not formally tested or supported, but expected to work if the glibc version of the distribution is 2.35 or above.
+ +NVIDIA discrete GPU (dGPU) requirements: +- Ampere or above recommended for best performance +- [Quadro/NVIDIA RTX](https://www.nvidia.com/en-gb/design-visualization/desktop-graphics/) necessary for GPUDirect RDMA support +- Tested with [NVIDIA Quadro RTX 6000](https://www.nvidia.com/content/dam/en-zz/Solutions/design-visualization/quadro-product-literature/quadro-rtx-6000-us-nvidia-704093-r4-web.pdf) and [NVIDIA RTX A6000](https://www.nvidia.com/en-us/design-visualization/rtx-a6000/) - [NVIDIA dGPU drivers](https://docs.nvidia.com/datacenter/tesla/tesla-installation-notes): 535 or above ```` @@ -60,7 +79,7 @@ We provide multiple ways to install and run the Holoscan SDK: `````{tab-set} ````{tab-item} NGC Container -- **dGPU** (x86_64, IGX Orin dGPU, Clara AGX dGPU) +- **dGPU** (x86_64, IGX Orin dGPU, Clara AGX dGPU, GH200) ```bash docker pull nvcr.io/nvidia/clara-holoscan/holoscan:v1.0.3-dgpu ``` @@ -74,6 +93,7 @@ See details and usage instructions on [NGC][container]. - **IGX Orin**: Ensure the [compute stack is pre-installed](https://docs.nvidia.com/igx-orin/user-guide/latest/base-os.html#installing-the-compute-stack). - **Jetson**: Install the latest [CUDA keyring package](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/#network-repo-installation-for-ubuntu) for `ubuntu2204/arm64`. +- **GH200**: Install the latest [CUDA keyring package](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/#network-repo-installation-for-ubuntu) for `ubuntu2204/sbsa`. - **x86_64**: Install the latest [CUDA keyring package](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/#network-repo-installation-for-ubuntu) for `ubuntu2204/x86_64`. Then, install the holoscan SDK: @@ -97,7 +117,7 @@ pip install holoscan See details and troubleshooting on [PyPI][pypi]. :::{note} -For x86_64, ensure that the CUDA Runtime is installed, whether through [the CUDA Toolkit debian installation](https://developer.nvidia.com/cuda-12-2-2-download-archive?target_os=Linux&target_arch=x86_64&Distribution=Ubuntu&target_version=22.04) or with `python3 -m pip install nvidia-cuda-runtime-cu12`. +For x86_64, ensure that the [CUDA Runtime is installed](https://developer.nvidia.com/cuda-12-2-2-download-archive?target_os=Linux&target_arch=x86_64&Distribution=Ubuntu&target_version=22.04). ::: ```` @@ -108,16 +128,14 @@ For x86_64, ensure that the CUDA Runtime is installed, whether through [the CUDA ### Not sure what to choose? -- The [**Holoscan container image on NGC**][container] it the safest way to ensure all the dependencies are present with the expected versions (including Torch and ONNX Runtime). It is the simplest way to run the embedded examples, while still allowing you to create your own C++ and Python Holoscan application on top of it. These benefits come at a cost: +- The [**Holoscan container image on NGC**][container] it the safest way to ensure all the dependencies are present with the expected versions (including Torch and ONNX Runtime), and should work on most Linux distributions. It is the simplest way to run the embedded examples, while still allowing you to create your own C++ and Python Holoscan application on top of it. These benefits come at a cost: - large image size from the numerous (some of them optional) dependencies. If you need a lean runtime image, see {ref}`section below`. - standard inconvenience that exist when using Docker, such as more complex run instructions for proper configuration. - - supporting the CLI require more work than the other solutions at this time. -- If you are confident in your ability to manage dependencies on your own in your host environment, the **Holoscan Debian package** should provide all the capabilities needed to use the Holoscan SDK. -- If you are not interested in the C++ API but just need to work in Python, or want to use a different version than Python 3.10, you can use the [**Holoscan python wheels**][pypi] on PyPI. While they are the easiest solution to install the SDK, it might require the most work to setup your environment with extra dependencies based on your needs. +- If you are confident in your ability to manage dependencies on your own in your host environment, the **Holoscan Debian package** should provide all the capabilities needed to use the Holoscan SDK, assuming you are on Ubuntu 22.04. +- If you are not interested in the C++ API but just need to work in Python, or want to use a different version than Python 3.10, you can use the [**Holoscan python wheels**][pypi] on PyPI. While they are the easiest solution to install the SDK, it might require the most work to setup your environment with extra dependencies based on your needs. Finally, they are only formally supported on Ubuntu 22.04, though should support other linux distributions with glibc 2.35 or above. | | NGC dev Container | Debian Package | Python Wheels | |---|:---:|:---:|:---:| -| | | | | | Runtime libraries | **Included** | **Included** | **Included** | | Python module | 3.10 | 3.10 | **3.8 to 3.11** | | C++ headers and
CMake config | **Included** | **Included** | N/A | @@ -131,7 +149,7 @@ For x86_64, ensure that the CUDA Runtime is installed, whether through [the CUDA | [Torch][torch] support [^7] | **Included** | require manual [^8]
installation | require manual [^8]
installation | | [ONNX Runtime][ort] support [^9] | **Included** | require manual [^10]
installation | require manual [^10]
installation | | [MOFED][mofed] support [^11] | **User space included**
Install kernel drivers on the host | require manual
installation | require manual
installation | -| [CLI] support | needs docker dind
with buildx plugin
on top of the image | needs docker w/
buildx plugin | needs docker w/
buildx plugin | +| [CLI] support | **Included** | needs docker w/
buildx plugin | needs docker w/
buildx plugin | [examples]: https://github.com/nvidia-holoscan/holoscan-sdk/blob/main/examples#readme [data]: https://catalog.ngc.nvidia.com/orgs/nvidia/teams/clara-holoscan/collections/clara_holoscan @@ -153,7 +171,7 @@ For x86_64, ensure that the CUDA Runtime is installed, whether through [the CUDA [^8]: To install LibTorch and TorchVision, either build them from source, download our [pre-built packages](https://edge.urm.nvidia.com/artifactory/sw-holoscan-thirdparty-generic-local/), or copy them from the holoscan container (in `/opt`). [^9]: ONNXRuntime 1.15.1+ needed for the Inference operator. Note that ONNX models are also supported through the TensoRT backend of the Inference Operator. [^10]: To install ONNXRuntime, either build it from source, download our [pre-built package](https://edge.urm.nvidia.com/artifactory/sw-holoscan-thirdparty-generic-local/) with CUDA 12 and TensoRT execution provider support, or copy it from the holoscan container (in `/opt/onnxruntime`). -[^11]: Tested with MOFED 23.07 +[^11]: Tested with MOFED 23.10 ### Need more control over the SDK? diff --git a/docs/set_up_gpudirect_rdma.md b/docs/set_up_gpudirect_rdma.md index ca5a458f..55fbd039 100644 --- a/docs/set_up_gpudirect_rdma.md +++ b/docs/set_up_gpudirect_rdma.md @@ -19,7 +19,7 @@ The following steps are required to ensure your ConnectX can be used for RDMA ov ### 1. Install MOFED drivers -Ensure the Mellanox OFED drivers version 23.07 or above are installed: +Ensure the Mellanox OFED drivers version 23.10 or above are installed: ```bash cat /sys/module/mlx5_core/version @@ -30,7 +30,7 @@ If not installed, or an older version, install the appropriate version from the ```bash # You can choose different versions/OS or download directly from the # Download Center in the webpage linked above -MOFED_VERSION="23.07-0.5.1.2" +MOFED_VERSION="23.10-2.1.3.1" OS="ubuntu22.04" MOFED_PACKAGE="MLNX_OFED_LINUX-${MOFED_VERSION}-${OS}-$(uname -m)" wget --progress=dot:giga https://www.mellanox.com/downloads/ofed/MLNX_OFED-${MOFED_VERSION}/${MOFED_PACKAGE}.tgz diff --git a/docs/use_igpu_with_dgpu.md b/docs/use_igpu_with_dgpu.md index 5062c6e5..21d4c3b9 100644 --- a/docs/use_igpu_with_dgpu.md +++ b/docs/use_igpu_with_dgpu.md @@ -1,6 +1,6 @@ -# Use both Integrated and Discrete GPUs on Holoscan developer kits +# Use both Integrated and Discrete GPUs on NVIDIA Developer Kits -Holoscan developer kits like the [NVIDIA IGX Orin](https://www.nvidia.com/en-us/edge-computing/products/igx/) or the [NVIDIA Clara AGX](https://www.nvidia.com/en-gb/clara/intelligent-medical-instruments/) have both a discrete GPU (dGPU - optional on IGX Orin) and an integrated GPU (iGPU - Tegra SoC). +NVIDIA Developer Kits like the [NVIDIA IGX Orin](https://www.nvidia.com/en-us/edge-computing/products/igx/) or the [NVIDIA Clara AGX](https://www.nvidia.com/en-gb/clara/intelligent-medical-instruments/) have both a discrete GPU (dGPU - optional on IGX Orin) and an integrated GPU (iGPU - Tegra SoC). As of this release, when these developer kits are flashed to leverage the dGPU, there are two limiting factors preventing the use of the iGPU: diff --git a/examples/CMakeLists.min.txt.in b/examples/CMakeLists.min.txt.in index 89a57a7d..d1e50e95 100644 --- a/examples/CMakeLists.min.txt.in +++ b/examples/CMakeLists.min.txt.in @@ -18,7 +18,11 @@ project(holoscan_examples) # Finds the package holoscan find_package(holoscan REQUIRED CONFIG - PATHS "/opt/nvidia/holoscan" "/workspace/holoscan-sdk/install") + PATHS "/opt/nvidia/holoscan" "/workspace/holoscan-sdk/install" + "/workspace/holoscan-sdk/install-x86_64" + "/workspace/holoscan-sdk/install-aarch64-dgpu" + "/workspace/holoscan-sdk/install-aarch64-igpu" + ) # Enable testing include(CTest) diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 0d172fe4..2c1f7944 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -50,13 +50,38 @@ endforeach() configure_file(CMakeLists.min.txt.in CMakeLists.min.txt @ONLY) install(FILES "${CMAKE_CURRENT_BINARY_DIR}/CMakeLists.min.txt" - RENAME "CMakeLists.txt" - DESTINATION examples - COMPONENT holoscan-examples + RENAME "CMakeLists.txt" + DESTINATION examples + COMPONENT holoscan-examples ) install(FILES README.md - DESTINATION examples - COMPONENT holoscan-examples + DESTINATION examples + COMPONENT holoscan-examples ) + +# Install CMake script to download example data from NGC +install(FILES ${CMAKE_SOURCE_DIR}/scripts/download_example_data + DESTINATION examples + PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ + COMPONENT holoscan-examples +) + +# Files for testing +install(FILES testing/run_example_tests + DESTINATION examples/testing + COMPONENT holoscan-examples + PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ +) + +install(DIRECTORY ${CMAKE_SOURCE_DIR}/tests/data/validation_frames + DESTINATION examples/testing + COMPONENT holoscan-examples +) + +install(FILES ${CMAKE_SOURCE_DIR}/tests/recorder.hpp + DESTINATION examples/testing + COMPONENT holoscan-examples +) + endif() diff --git a/examples/README.md b/examples/README.md index e36c1a48..150bdd31 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,8 +1,45 @@ # Holoscan SDK Examples -This directory contains examples to help users learn how to use the Holoscan SDK for development. See [HoloHub](https://nvidia-holoscan.github.io/holohub) to find additional reference applications. +This directory contains examples to help users learn how to use the Holoscan SDK for development. +See [HoloHub](https://nvidia-holoscan.github.io/holohub) to find additional reference applications. -## Core +## Build instructions + +- **From source**: See the [building guide](../DEVELOP.md) +- **Python wheels**: Download the python examples from GitHub, no building necessary. +- **NGC container or debian package**: the python examples and pre-built C++ examples are already included under `/opt/nvidia/holoscan/examples`. You can rebuild the C++ examples like so: + + ```sh + export src_dir="/opt/nvidia/holoscan/examples/" # Add "/cpp" to build a specific example + export build_dir="/opt/nvidia/holoscan/examples/build` # Or the path of your choice + cmake -S $src_dir -B $build_dir + cmake --build $build_dir -j + ``` + +## Run instructions + +See the README of each example for specific run instructions based on your installation type. + +## Test instructions + +- **From source**: See the [building guide](../DEVELOP.md#testing) +- **Python wheels**: not available +- **NGC container or debian package**: + - Running the following command will run the examples and compare the results with expected baselines. + + ```sh + ctest --test-dir $build_dir + ``` + + - To group building and testing: + + ```sh + /opt/nvidia/holoscan/examples/testing/run_example_tests + ``` + +## Example list + +### Core The following examples demonstrate the basics of the Holoscan core API, and are ideal for new users starting with the SDK: @@ -40,22 +77,21 @@ The following examples illustrate the use of specific resource classes that can ## Inference * [**Bring-Your-Own-Model**](bring_your_own_model): create a simple inference pipeline for ML applications - -## Working with third-party frameworks +### Working with third-party frameworks The following examples demonstrate how to seamlessly leverage third-party frameworks in holoscan applications: * [**NumPy native**](numpy_native): signal processing on the CPU using numpy arrays * [**CuPy native**](cupy_native): basic computation on the GPU using cupy arrays -## Sensors +### Sensors The following examples demonstrate how sensors can be used as input streams to your holoscan applications: * [**v4l2 camera**](v4l2_camera): for USB and HDMI input, such as USB cameras or HDMI output of laptop * [**AJA capture**](aja_capture): for AJA capture cards -## GXF and Holoscan +### GXF and Holoscan * [**Tensor interop**](tensor_interop): use the `Entity` message to pass tensors to/from Holoscan operators wrapping GXF codelets in Holoscan applications * [**Wrap operator as GXF extension**](wrap_operator_as_gxf_extension): wrap Holoscan native operators as GXF codelets to use in GXF applications diff --git a/examples/bring_your_own_model/README.md b/examples/bring_your_own_model/README.md index 6211223c..e60cbe9d 100644 --- a/examples/bring_your_own_model/README.md +++ b/examples/bring_your_own_model/README.md @@ -27,7 +27,7 @@ through how to modify the python example code to run the application with an ult ``` * **using deb package install**: ```bash - # [Prerequisite] Download NGC dataset above to `DATA_DIR` + /opt/nvidia/holoscan/examples/download_example_data export HOLOSCAN_INPUT_PATH= export PYTHONPATH=/opt/nvidia/holoscan/python/lib # Need to enable write permission in the model directory to write the engine file (use with caution) diff --git a/examples/conditions/asynchronous/README.md b/examples/conditions/asynchronous/README.md index 7483df37..90415441 100644 --- a/examples/conditions/asynchronous/README.md +++ b/examples/conditions/asynchronous/README.md @@ -10,7 +10,8 @@ There are two operators involved in this example: The transmit operator will be asynchronous if `async_transmit: true` in `ping_async.yaml`. The receive operator will be asynchronous if `async_receive: true` in `ping_async.yaml`. -The multi-threaded scheduler will be used if `multithreaded: true` in `ping_async.yaml`. + +The scheduler to be used can be set via the `scheduler` entry in `ping_async.yaml`. It defaults to `event_based` (an event-based multi-thread scheduler), but can also be set to either `multi_thread` (polling-based) or `greedy` (single thread). *Visit the [SDK User Guide](https://docs.nvidia.com/holoscan/sdk-user-guide/components/conditions.html) to learn more about the Asynchronous Condition.* diff --git a/examples/conditions/asynchronous/cpp/CMakeLists.min.txt b/examples/conditions/asynchronous/cpp/CMakeLists.min.txt index 7633569a..0d25e198 100644 --- a/examples/conditions/asynchronous/cpp/CMakeLists.min.txt +++ b/examples/conditions/asynchronous/cpp/CMakeLists.min.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the \"License\"); @@ -33,6 +33,14 @@ target_link_libraries(ping_async holoscan::ops::ping_tx ) +# Copy config file to the build tree +add_custom_target(ping_async_yaml + COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_CURRENT_SOURCE_DIR}/ping_async.yaml" ${CMAKE_CURRENT_BINARY_DIR} + DEPENDS "ping_async.yaml" + BYPRODUCTS "ping_async.yaml" +) +add_dependencies(ping_async ping_async_yaml) + # Testing if(BUILD_TESTING) add_test(NAME EXAMPLE_CPP_PING_ASYNC_TEST diff --git a/examples/conditions/asynchronous/cpp/ping_async.cpp b/examples/conditions/asynchronous/cpp/ping_async.cpp index bb364352..0e9f1f65 100644 --- a/examples/conditions/asynchronous/cpp/ping_async.cpp +++ b/examples/conditions/asynchronous/cpp/ping_async.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,6 +15,8 @@ * limitations under the License. */ +#include + #include #include #include @@ -72,13 +74,22 @@ int main(int argc, char** argv) { app->set_async_receive(async_receive); app->set_async_transmit(async_transmit); - bool multithreaded = app->from_config("multithreaded").as(); - if (multithreaded) { + std::string scheduler = app->from_config("scheduler").as(); + holoscan::ArgList scheduler_args{holoscan::Arg("stop_on_deadlock", true), + holoscan::Arg("stop_on_deadlock_timeout", 500L)}; + if (scheduler == "multi_thread") { // use MultiThreadScheduler instead of the default GreedyScheduler - app->scheduler(app->make_scheduler( - "multithread-scheduler", - holoscan::Arg("stop_on_deadlock", true), - holoscan::Arg("stop_on_deadlock_timeout", 500L))); + app->scheduler(app->make_scheduler("MTS", scheduler_args)); + } else if (scheduler == "event_based") { + // use EventBasedScheduler instead of the default GreedyScheduler + app->scheduler(app->make_scheduler("EBS", scheduler_args)); + } else if (scheduler == "greedy") { + app->scheduler(app->make_scheduler("GS", scheduler_args)); + } else if (scheduler != "default") { + throw std::runtime_error(fmt::format( + "unrecognized scheduler option '{}', should be one of {'multi_thread', 'event_based', " + "'greedy', 'default'}", + scheduler)); } // run the application diff --git a/examples/conditions/asynchronous/cpp/ping_async.yaml b/examples/conditions/asynchronous/cpp/ping_async.yaml index 7b7bd900..a5d6b1b9 100644 --- a/examples/conditions/asynchronous/cpp/ping_async.yaml +++ b/examples/conditions/asynchronous/cpp/ping_async.yaml @@ -1,5 +1,5 @@ %YAML 1.2 -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,4 +18,4 @@ async_receive: true async_transmit: false -multithreaded: true +scheduler: event_based # multi_thread, event_based or greedy diff --git a/examples/hello_world/cpp/hello_world.cpp b/examples/hello_world/cpp/hello_world.cpp index e8277316..38e943b9 100644 --- a/examples/hello_world/cpp/hello_world.cpp +++ b/examples/hello_world/cpp/hello_world.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,8 +26,7 @@ class HelloWorldOp : public Operator { HelloWorldOp() = default; - void setup(OperatorSpec& spec) override { - } + void setup(OperatorSpec& spec) override {} void compute(InputContext& op_input, OutputContext& op_output, ExecutionContext& context) override { @@ -39,7 +38,6 @@ class HelloWorldOp : public Operator { } // namespace holoscan::ops - class HelloWorldApp : public holoscan::Application { public: void compose() override { diff --git a/examples/holoviz/README.md b/examples/holoviz/README.md index 01db36ad..ba35c76d 100644 --- a/examples/holoviz/README.md +++ b/examples/holoviz/README.md @@ -29,8 +29,8 @@ The following dataset is used by this example: ``` * **using deb package install**: ```bash - # [Prerequisite] Download NGC dataset above to `DATA_DIR` - export HOLOSCAN_INPUT_PATH= + /opt/nvidia/holoscan/examples/download_example_data + export HOLOSCAN_INPUT_PATH=/opt/nvidia/holoscan/data python3 -m pip install numpy python3 -m pip install cupy-cuda12x export PYTHONPATH=/opt/nvidia/holoscan/python/lib diff --git a/examples/holoviz/cpp/CMakeLists.min.txt b/examples/holoviz/cpp/CMakeLists.min.txt index c0bb99d7..c49b345f 100644 --- a/examples/holoviz/cpp/CMakeLists.min.txt +++ b/examples/holoviz/cpp/CMakeLists.min.txt @@ -14,7 +14,7 @@ # limitations under the License. cmake_minimum_required(VERSION 3.20) -project(holoscan_hello_world CXX) +project(holoviz_examples_cpp CXX) # Finds the package holoscan find_package(holoscan REQUIRED CONFIG @@ -33,12 +33,63 @@ target_link_libraries(holoviz_geometry # Testing if(BUILD_TESTING) + set(RECORDING_DIR ${CMAKE_CURRENT_BINARY_DIR}/recording_output) + set(SOURCE_VIDEO_BASENAME holoviz_geometry_output) + set(VALIDATION_FRAMES_DIR ${CMAKE_SOURCE_DIR}/testing/validation_frames/holoviz_geometry/) + + file(MAKE_DIRECTORY ${RECORDING_DIR}) + + # Patch the current example to enable recording the rendering window + add_custom_command(OUTPUT holoviz_geometry_test.cpp + PRE_LINK + COMMAND patch -u -o holoviz_geometry_test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/holoviz_geometry.cpp + ${CMAKE_SOURCE_DIR}/testing/validation_frames/holoviz_geometry/cpp_holoviz_geometry.patch + ) + + # Create the test executable + add_executable(holoviz_geometry_test + holoviz_geometry_test.cpp + ) + + target_include_directories(holoviz_geometry_test + PRIVATE ${CMAKE_SOURCE_DIR}/testing) + + target_compile_definitions(holoviz_geometry_test + PRIVATE RECORD_OUTPUT RECORDING_DIR="${RECORDING_DIR}" + PRIVATE SOURCE_VIDEO_BASENAME="${SOURCE_VIDEO_BASENAME}" + ) + + target_link_libraries(holoviz_geometry_test + PRIVATE + holoscan::core + holoscan::ops::holoviz + holoscan::ops::video_stream_replayer + holoscan::ops::video_stream_recorder + holoscan::ops::format_converter + ) + + # Add the test and make sure it runs add_test(NAME EXAMPLE_CPP_HOLOVIZ_GEOMETRY_TEST - COMMAND ${CMAKE_CURRENT_BINARY_DIR}/holoviz_geometry --count 10 + COMMAND ${CMAKE_CURRENT_BINARY_DIR}/holoviz_geometry_test --count 10 WORKING_DIRECTORY ${CMAKE_BINARY_DIR} ) set_tests_properties(EXAMPLE_CPP_HOLOVIZ_GEOMETRY_TEST PROPERTIES PASS_REGULAR_EXPRESSION "Received camera pose:" PASS_REGULAR_EXPRESSION "Reach end of file or playback count reaches to the limit. Stop ticking." ) + + # Add a test to check the validity of the frames + add_test(NAME EXAMPLE_CPP_HOLOVIZ_GEOMETRY_RENDER_TEST + COMMAND python3 ${CMAKE_SOURCE_DIR}/../bin/video_validation.py + --source_video_dir ${RECORDING_DIR} + --source_video_basename ${SOURCE_VIDEO_BASENAME} + --output_dir ${RECORDING_DIR} + --validation_frames_dir ${VALIDATION_FRAMES_DIR} + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + ) + + set_tests_properties(EXAMPLE_CPP_HOLOVIZ_GEOMETRY_RENDER_TEST PROPERTIES + DEPENDS EXAMPLE_CPP_HOLOVIZ_GEOMETRY_TEST + PASS_REGULAR_EXPRESSION "Valid video output!" + ) endif() diff --git a/examples/holoviz/cpp/CMakeLists.txt b/examples/holoviz/cpp/CMakeLists.txt index 4ee8d796..5af45cfb 100644 --- a/examples/holoviz/cpp/CMakeLists.txt +++ b/examples/holoviz/cpp/CMakeLists.txt @@ -75,6 +75,9 @@ if(HOLOSCAN_BUILD_TESTS) holoviz_geometry_test.cpp ) + target_include_directories(holoviz_geometry_test + PRIVATE ${CMAKE_SOURCE_DIR}/tests) + target_compile_definitions(holoviz_geometry_test PRIVATE RECORD_OUTPUT RECORDING_DIR="${RECORDING_DIR}" PRIVATE SOURCE_VIDEO_BASENAME="${SOURCE_VIDEO_BASENAME}" diff --git a/examples/holoviz/cpp/holoviz_geometry.cpp b/examples/holoviz/cpp/holoviz_geometry.cpp index 5778b18e..d527f55a 100644 --- a/examples/holoviz/cpp/holoviz_geometry.cpp +++ b/examples/holoviz/cpp/holoviz_geometry.cpp @@ -240,6 +240,15 @@ class HolovizGeometryApp : public holoscan::Application { void compose() override { using namespace holoscan; + ArgList args; + auto data_directory = std::getenv("HOLOSCAN_INPUT_PATH"); + if (data_directory != nullptr && data_directory[0] != '\0') { + auto video_directory = std::filesystem::path(data_directory); + video_directory /= "racerx"; + args.add(Arg("directory", video_directory.string())); + HOLOSCAN_LOG_INFO("Using video from {}", video_directory.string()); + } + // Define the replayer, geometry source and holoviz operators auto replayer = make_operator("replayer", @@ -248,7 +257,8 @@ class HolovizGeometryApp : public holoscan::Application { Arg("frame_rate", 0.f), Arg("repeat", true), Arg("realtime", true), - Arg("count", count_)); + Arg("count", count_), + args); auto source = make_operator("source"); diff --git a/examples/holoviz/python/CMakeLists.min.txt b/examples/holoviz/python/CMakeLists.min.txt new file mode 100644 index 00000000..5831ef1b --- /dev/null +++ b/examples/holoviz/python/CMakeLists.min.txt @@ -0,0 +1,149 @@ +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Holoviz Geometry Testing +if(BUILD_TESTING) + + # Testing holoviz_geometry + set(RECORDING_DIR ${CMAKE_CURRENT_BINARY_DIR}/recording_output) + set(SOURCE_VIDEO_BASENAME python_holoviz_geometry_output) + set(VALIDATION_FRAMES_DIR ${CMAKE_SOURCE_DIR}/testing/validation_frames/holoviz_geometry/) + + file(MAKE_DIRECTORY ${RECORDING_DIR}) + + # Patch the current example to enable recording the rendering window + add_custom_command(OUTPUT holoviz_geometry_test.py + PRE_LINK + COMMAND patch -u -o holoviz_geometry_test.py ${CMAKE_CURRENT_SOURCE_DIR}/holoviz_geometry.py + ${CMAKE_SOURCE_DIR}/testing/validation_frames/holoviz_geometry/python_holoviz_geometry.patch + COMMAND sed -i "s#RECORDING_DIR#${RECORDING_DIR}#g" holoviz_geometry_test.py + COMMAND sed -i "s#SOURCE_VIDEO_BASENAME#${SOURCE_VIDEO_BASENAME}#g" holoviz_geometry_test.py + ) + + add_custom_target(python_holoviz_geometry_test ALL + DEPENDS "holoviz_geometry_test.py" + ) + + add_test(NAME EXAMPLE_PYTHON_HOLOVIZ_GEOMETRY_TEST + COMMAND python3 holoviz_geometry_test.py --count 10 + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + ) + + set_tests_properties(EXAMPLE_PYTHON_HOLOVIZ_GEOMETRY_TEST PROPERTIES + PASS_REGULAR_EXPRESSION "Received camera pose:" + PASS_REGULAR_EXPRESSION "Reach end of file or playback count reaches to the limit. Stop ticking." + ) + + # Add a test to check the validity of the frames + add_test(NAME EXAMPLE_PYTHON_HOLOVIZ_GEOMETRY_RENDER_TEST + COMMAND python3 ${CMAKE_SOURCE_DIR}/../bin/video_validation.py + --source_video_dir ${RECORDING_DIR} + --source_video_basename ${SOURCE_VIDEO_BASENAME} + --output_dir ${RECORDING_DIR} + --validation_frames_dir ${VALIDATION_FRAMES_DIR} + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + ) + + set_tests_properties(EXAMPLE_PYTHON_HOLOVIZ_GEOMETRY_RENDER_TEST PROPERTIES + DEPENDS EXAMPLE_PYTHON_HOLOVIZ_GEOMETRY_TEST + PASS_REGULAR_EXPRESSION "Valid video output!" + ) + + # Testing holoviz_geometry_3d + set(SOURCE_VIDEO_BASENAME python_holoviz_geometry_3d_output) + set(VALIDATION_FRAMES_DIR ${CMAKE_SOURCE_DIR}/testing/validation_frames/holoviz_geometry_3d/) + + file(MAKE_DIRECTORY ${RECORDING_DIR}) + + # Patch the current example to enable recording the rendering window + add_custom_command(OUTPUT holoviz_geometry_3d_test.py + PRE_LINK + COMMAND patch -u -o holoviz_geometry_3d_test.py ${CMAKE_CURRENT_SOURCE_DIR}/holoviz_geometry_3d.py + ${CMAKE_SOURCE_DIR}/testing/validation_frames/holoviz_geometry_3d/python_holoviz_geometry_3d.patch + COMMAND sed -i "s#RECORDING_DIR#${RECORDING_DIR}#g" holoviz_geometry_3d_test.py + COMMAND sed -i "s#SOURCE_VIDEO_BASENAME#${SOURCE_VIDEO_BASENAME}#g" holoviz_geometry_3d_test.py + ) + + add_custom_target(python_holoviz_geometry_3d_test ALL + DEPENDS "holoviz_geometry_3d_test.py" + ) + + add_test(NAME EXAMPLE_PYTHON_HOLOVIZ_GEOMETRY_3D_TEST + COMMAND python3 holoviz_geometry_3d_test.py --count 10 + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + ) + + set_tests_properties(EXAMPLE_PYTHON_HOLOVIZ_GEOMETRY_3D_TEST PROPERTIES + PASS_REGULAR_EXPRESSION "Scheduler finished." + ) + + # Add a test to check the validity of the frames + add_test(NAME EXAMPLE_PYTHON_HOLOVIZ_GEOMETRY_3D_RENDER_TEST + COMMAND python3 ${CMAKE_SOURCE_DIR}/../bin/video_validation.py + --source_video_dir ${RECORDING_DIR} + --source_video_basename ${SOURCE_VIDEO_BASENAME} + --output_dir ${RECORDING_DIR} + --validation_frames_dir ${VALIDATION_FRAMES_DIR} + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + ) + + set_tests_properties(EXAMPLE_PYTHON_HOLOVIZ_GEOMETRY_3D_RENDER_TEST PROPERTIES + DEPENDS EXAMPLE_PYTHON_HOLOVIZ_GEOMETRY_3D_TEST + PASS_REGULAR_EXPRESSION "Valid video output!" + ) + + # Testing holoviz_views + set(SOURCE_VIDEO_BASENAME python_holoviz_views_output) + set(VALIDATION_FRAMES_DIR ${CMAKE_SOURCE_DIR}/testing/validation_frames/holoviz_views/) + + file(MAKE_DIRECTORY ${RECORDING_DIR}) + + # Patch the current example to enable recording the rendering window + add_custom_command(OUTPUT holoviz_views_test.py + PRE_LINK + COMMAND patch -u -o holoviz_views_test.py ${CMAKE_CURRENT_SOURCE_DIR}/holoviz_views.py + ${CMAKE_SOURCE_DIR}/testing/validation_frames/holoviz_views/python_holoviz_views.patch + COMMAND sed -i "s#RECORDING_DIR#${RECORDING_DIR}#g" holoviz_views_test.py + COMMAND sed -i "s#SOURCE_VIDEO_BASENAME#${SOURCE_VIDEO_BASENAME}#g" holoviz_views_test.py + ) + + add_custom_target(python_holoviz_views_test ALL + DEPENDS "holoviz_views_test.py" + ) + + add_test(NAME EXAMPLE_PYTHON_HOLOVIZ_VIEWS_TEST + COMMAND python3 holoviz_views_test.py --count 10 + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + ) + + set_tests_properties(EXAMPLE_PYTHON_HOLOVIZ_VIEWS_TEST PROPERTIES + PASS_REGULAR_EXPRESSION "Reach end of file or playback count reaches to the limit. Stop ticking." + ) + + # Add a test to check the validity of the frames + add_test(NAME EXAMPLE_PYTHON_HOLOVIZ_VIEWS_RENDER_TEST + COMMAND python3 ${CMAKE_SOURCE_DIR}/../bin/video_validation.py + --source_video_dir ${RECORDING_DIR} + --source_video_basename ${SOURCE_VIDEO_BASENAME} + --output_dir ${RECORDING_DIR} + --validation_frames_dir ${VALIDATION_FRAMES_DIR} + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + ) + + set_tests_properties(EXAMPLE_PYTHON_HOLOVIZ_VIEWS_RENDER_TEST PROPERTIES + DEPENDS EXAMPLE_PYTHON_HOLOVIZ_VIEWS_TEST + PASS_REGULAR_EXPRESSION "Valid video output!" + ) +endif() diff --git a/examples/holoviz/python/CMakeLists.txt b/examples/holoviz/python/CMakeLists.txt index 7597897b..ffb99c7f 100644 --- a/examples/holoviz/python/CMakeLists.txt +++ b/examples/holoviz/python/CMakeLists.txt @@ -95,6 +95,13 @@ install(FILES COMPONENT "holoscan-examples" ) +# Install the minimal CMakeLists.txt file +install(FILES CMakeLists.min.txt + RENAME "CMakeLists.txt" + DESTINATION "${app_relative_dest_path}" + COMPONENT holoscan-examples +) + # Holoviz Geometry 3D Testing if(HOLOSCAN_BUILD_TESTS) diff --git a/examples/multithread/README.md b/examples/multithread/README.md index 29c2c742..88bd252d 100644 --- a/examples/multithread/README.md +++ b/examples/multithread/README.md @@ -33,11 +33,11 @@ Then, run: ./examples/multithread/cpp/multithread ``` -To run with the default, greedy single-threaded scheduler, set `multithread: false` in `app_config.yaml`. +For the C++ application, the scheduler to be used can be set via the `scheduler` entry in `ping_async.yaml`. It defaults to `event_based` (an event-based multithread scheduler), but can also be set to either `multi_thread` (polling-based) or `greedy` (single thread). ## Python API -- `multithread.py`: This example demonstrates how to configure and use a multi-threaded scheduler instead of the default single-threaded one. It involves three operators as described for the C++ API example described above. The primary difference is that instead of using a YAML file for the configuration variables, all values are set via the command line. Call the script below with the `--help` option to get a full description of the command line parameters. +- `multithread.py`: This example demonstrates how to configure and use a multi-threaded scheduler instead of the default single-threaded one. It involves three operators as described for the C++ API example described above. The primary difference is that instead of using a YAML file for the configuration variables, all values are set via the command line. Call the script below with the `--help` option to get a full description of the command line parameters. By default a polling-based multithread scheduler will be used, but if `--event-based` is specified, the event-based multithread scheduler will be used instead. ### Build instructions @@ -50,5 +50,5 @@ First, go in your `build` or `install` directory (automatically done by `./run l Then, run the app with the options of your choice. For example, to use 8 worker threads to run 32 delay operators with delays ranging linearly from 0.2 to (0.2 + 0.05 * 31), one would set: ```bash -python3 ./examples/multithread/python/multithread.py --threads 8 --num_delay_ops 32 --delay 0.2 --delay_step 0.05 +python3 ./examples/multithread/python/multithread.py --threads 8 --num_delay_ops 32 --delay 0.2 --delay_step 0.05 --event-based ``` diff --git a/examples/multithread/cpp/multithread.cpp b/examples/multithread/cpp/multithread.cpp index 0c2d85fa..3abae8c9 100644 --- a/examples/multithread/cpp/multithread.cpp +++ b/examples/multithread/cpp/multithread.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -158,11 +158,23 @@ int main(int argc, char** argv) { app->set_delay(delay); app->set_delay_step(delay_step); - bool multithreaded = app->from_config("multithreaded").as(); - if (multithreaded) { + std::string scheduler = app->from_config("scheduler").as(); + if (scheduler == "multi_thread") { // use MultiThreadScheduler instead of the default GreedyScheduler app->scheduler(app->make_scheduler( - "multithread-scheduler", app->from_config("scheduler"))); + "multithread-scheduler", app->from_config("multi_thread_scheduler"))); + } else if (scheduler == "event_based") { + // use EventBasedScheduler instead of the default GreedyScheduler + app->scheduler(app->make_scheduler( + "event-based-scheduler", app->from_config("event_based_scheduler"))); + } else if (scheduler == "greedy") { + app->scheduler(app->make_scheduler( + "greedy-scheduler", app->from_config("greedy_scheduler"))); + } else if (scheduler != "default") { + throw std::runtime_error(fmt::format( + "unrecognized scheduler option '{}', should be one of {'multi_thread', 'event_based', " + "'greedy', 'default'}", + scheduler)); } app->run(); diff --git a/examples/multithread/cpp/multithread.yaml b/examples/multithread/cpp/multithread.yaml index b59c39af..76c6472c 100644 --- a/examples/multithread/cpp/multithread.yaml +++ b/examples/multithread/cpp/multithread.yaml @@ -1,5 +1,5 @@ %YAML 1.2 -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,12 +17,21 @@ extensions: - libgxf_std.so -multithreaded: true +scheduler: event_based # event_based, multi_thread or greedy num_delay_ops: 32 delay: 0.1 delay_step: 0.01 -scheduler: +greedy_scheduler: + stop_on_deadlock: true + stop_on_deadlock_timeout: 500 + +multi_thread_scheduler: + worker_thread_number: 8 + stop_on_deadlock: true + stop_on_deadlock_timeout: 500 + +event_based_scheduler: worker_thread_number: 8 stop_on_deadlock: true stop_on_deadlock_timeout: 500 diff --git a/examples/multithread/python/CMakeLists.txt b/examples/multithread/python/CMakeLists.txt index 0a1d3917..1d18fc5d 100644 --- a/examples/multithread/python/CMakeLists.txt +++ b/examples/multithread/python/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -38,4 +38,11 @@ if(HOLOSCAN_BUILD_TESTS) ) set_tests_properties(EXAMPLE_PYTHON_MULTITHREAD_TEST PROPERTIES PASS_REGULAR_EXPRESSION "sum of received values: 496") + + add_test(NAME EXAMPLE_PYTHON_EVENT_BASED_TEST + COMMAND python3 multithread.py --event_based + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + ) + set_tests_properties(EXAMPLE_PYTHON_MULTITHREAD_TEST PROPERTIES + PASS_REGULAR_EXPRESSION "sum of received values: 496") endif() diff --git a/examples/multithread/python/multithread.py b/examples/multithread/python/multithread.py index 787051ab..4f6aaaf8 100644 --- a/examples/multithread/python/multithread.py +++ b/examples/multithread/python/multithread.py @@ -21,7 +21,7 @@ from holoscan.conditions import CountCondition from holoscan.core import Application, Operator, OperatorSpec -from holoscan.schedulers import GreedyScheduler, MultiThreadScheduler +from holoscan.schedulers import EventBasedScheduler, GreedyScheduler, MultiThreadScheduler class PingTxOp(Operator): @@ -119,13 +119,14 @@ def compose(self): self.add_flow(d, rx, {("out_val", "values"), ("out_name", "names")}) -def main(threads, num_delays, delay, delay_step): +def main(threads, num_delays, delay, delay_step, event_based): app = ParallelPingApp(num_delays=num_delays, delay=delay, delay_step=delay_step) if threads == 0: # Explicitly setting GreedyScheduler is not strictly required as it is the default. scheduler = GreedyScheduler(app, name="greedy_scheduler") else: - scheduler = MultiThreadScheduler( + scheduler_class = EventBasedScheduler if event_based else MultiThreadScheduler + scheduler = scheduler_class( app, worker_thread_number=threads, stop_on_deadlock=True, @@ -148,9 +149,10 @@ def main(threads, num_delays, delay, delay_step): type=int, default=-1, help=( - "The number of threads to use for the multi-threaded scheduler. Set this to 0 to use " + "The number of threads to use for multi-threaded schedulers. Set this to 0 to use " "the default greedy scheduler instead. If set to -1, multiprocessing.cpu_count() " - "threads will be used." + "threads will be used. To use the event-based scheduler instead of the default " + "multi-thread scheduler, please specify --event_based." ), ) parser.add_argument( @@ -181,6 +183,15 @@ def main(threads, num_delays, delay, delay_step): "0 to (num_delay_ops - 1)." ), ) + parser.add_argument( + "--event_based", + action="store_true", + help=( + "Sets the application to use the event-based scheduler instead of the default " + "multi-thread scheduler when threads > 0." + ), + ) + args = parser.parse_args() if args.delay < 0: raise ValueError("delay must be non-negative") @@ -199,4 +210,5 @@ def main(threads, num_delays, delay, delay_step): num_delays=args.num_delay_ops, delay=args.delay, delay_step=args.delay_step, + event_based=args.event_based, ) diff --git a/examples/ping_distributed/README.md b/examples/ping_distributed/README.md index 84c3a429..5e1a62b4 100644 --- a/examples/ping_distributed/README.md +++ b/examples/ping_distributed/README.md @@ -3,7 +3,8 @@ This example demonstrates a distributed ping application with two operators connected using add_flow(). There are two operators involved in this example: - 1. a transmitter in Fragment 1 (`fragment1`), set to transmit a tensor map containing a single tensor named 'out' on its 'out' port. + + 1. a transmitter in Fragment 1 (`fragment1`), set to transmit a tensor map containing a single tensor named 'out' on its 'out' port. 2. a receiver in Fragment 2 (`fragment2`) that prints the received names and shapes of any received tensors to the terminal The `--gpu` command line argument can be provided to indicate that the tensor should be on the GPU instead of the host (CPU). The user can also override the default tensor shape and data type. Run the application with `-h` or `--help` to see full details of the additional supported arguments. @@ -16,25 +17,24 @@ Please refer to the [user guide](https://docs.nvidia.com/holoscan/sdk-user-guide ### Prerequisites -* **using deb package install**: - ```bash - # Set the application folder - APP_DIR=/opt/nvidia/holoscan/examples/ping_distributed/cpp - ``` +* **using deb package install or NGC container**: -* **from NGC container**: ```bash # Set the application folder APP_DIR=/opt/nvidia/holoscan/examples/ping_distributed/cpp ``` + * **source (dev container)**: + ```bash ./run launch # optional: append `install` for install tree (default: `build`) # Set the application folder APP_DIR=./examples/ping_distributed/cpp ``` + * **source (local env)**: + ```bash # Set the application folder APP_DIR=${BUILD_OR_INSTALL_DIR}/examples/ping_distributed/cpp @@ -65,41 +65,34 @@ Please refer to the [user guide](https://docs.nvidia.com/holoscan/sdk-user-guide ### Prerequisites * **using python wheel**: + ```bash - # [Prerequisite] Download NGC dataset above to `DATA_DIR` - export HOLOSCAN_INPUT_PATH= # [Prerequisite] Download example .py file below to `APP_DIR` # [Optional] Start the virtualenv where holoscan is installed # Set the application folder APP_DIR= ``` -* **using deb package install**: - ```bash - # [Prerequisite] Download NGC dataset above to `DATA_DIR` (e.g., `/opt/nvidia/data`) - export HOLOSCAN_INPUT_PATH= - export PYTHONPATH=/opt/nvidia/holoscan/python/lib - # Set the application folder - APP_DIR=/opt/nvidia/holoscan/examples/ping_distributed/python - ``` -* **from NGC container**: - ```bash - # HOLOSCAN_INPUT_PATH is set to /opt/nvidia/data by default +* **using deb package or NGC container**: + ```bash # Set the application folder APP_DIR=/opt/nvidia/holoscan/examples/ping_distributed/python ``` + * **source (dev container)**: + ```bash ./run launch # optional: append `install` for install tree (default: `build`) # Set the application folder APP_DIR=./examples/ping_distributed/python ``` + * **source (local env)**: + ```bash - export HOLOSCAN_INPUT_PATH=${SRC_DIR}/data export PYTHONPATH=${BUILD_OR_INSTALL_DIR}/python/lib # Set the application folder diff --git a/examples/ping_distributed/cpp/CMakeLists.txt b/examples/ping_distributed/cpp/CMakeLists.txt index d3c8ab62..3fd08853 100644 --- a/examples/ping_distributed/cpp/CMakeLists.txt +++ b/examples/ping_distributed/cpp/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -68,6 +68,7 @@ if(HOLOSCAN_BUILD_TESTS) ) set_tests_properties(EXAMPLE_CPP_PING_DISTRIBUTED_TEST PROPERTIES PASS_REGULAR_EXPRESSION "message 10: Tensor key: 'out', shape: \\(32, 64\\)" + FAIL_REGULAR_EXPRESSION "initialized independent of a parent entity" ) add_test(NAME EXAMPLE_CPP_PING_DISTRIBUTED_GPU_TEST @@ -76,6 +77,7 @@ if(HOLOSCAN_BUILD_TESTS) ) set_tests_properties(EXAMPLE_CPP_PING_DISTRIBUTED_GPU_TEST PROPERTIES PASS_REGULAR_EXPRESSION "message 10: Tensor key: 'out', shape: \\(32, 64\\)" + FAIL_REGULAR_EXPRESSION "initialized independent of a parent entity" ) add_test(NAME EXAMPLE_CPP_PING_DISTRIBUTED_HELP_STRING_TEST @@ -84,5 +86,6 @@ if(HOLOSCAN_BUILD_TESTS) ) set_tests_properties(EXAMPLE_CPP_PING_DISTRIBUTED_HELP_STRING_TEST PROPERTIES PASS_REGULAR_EXPRESSION "Usage: ping_distributed \\[OPTIONS\\]" + FAIL_REGULAR_EXPRESSION "initialized independent of a parent entity" ) endif() diff --git a/examples/ping_distributed/cpp/ping_distributed_ops.cpp b/examples/ping_distributed/cpp/ping_distributed_ops.cpp index b5534cb1..e4c92a06 100644 --- a/examples/ping_distributed/cpp/ping_distributed_ops.cpp +++ b/examples/ping_distributed/cpp/ping_distributed_ops.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -161,11 +161,13 @@ void PingTensorTxOp::compute(InputContext&, OutputContext& op_output, ExecutionC tensor_shape, dtype, bytes_per_element, strides, storage_type, allocator.value()); if (!result) { HOLOSCAN_LOG_ERROR("failed to generate tensor"); } - // Create Holoscan GXF tensor - auto holoscan_gxf_tensor = holoscan::gxf::GXFTensor(*gxf_tensor); - // Create Holoscan tensor - auto holoscan_tensor = holoscan_gxf_tensor.as_tensor(); + auto maybe_dl_ctx = (*gxf_tensor).toDLManagedTensorContext(); + if (!maybe_dl_ctx) { + HOLOSCAN_LOG_ERROR( + "failed to get std::shared_ptr from nvidia::gxf::Tensor"); + } + std::shared_ptr holoscan_tensor = std::make_shared(maybe_dl_ctx.value()); // insert tensor into the TensorMap out_message.insert({tensor_name_.get().c_str(), holoscan_tensor}); diff --git a/examples/ping_distributed/python/CMakeLists.txt b/examples/ping_distributed/python/CMakeLists.txt index f357f008..f9efcd91 100644 --- a/examples/ping_distributed/python/CMakeLists.txt +++ b/examples/ping_distributed/python/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -39,6 +39,7 @@ if(HOLOSCAN_BUILD_TESTS) set_tests_properties(EXAMPLE_PYTHON_PING_DISTRIBUTED_TEST PROPERTIES PASS_REGULAR_EXPRESSION "message 10: Tensor key: 'out', shape: \\(32, 64\\)" FAIL_REGULAR_EXPRESSION "AssertionError:" + FAIL_REGULAR_EXPRESSION "initialized independent of a parent entity" ) add_test(NAME EXAMPLE_PYTHON_PING_DISTRIBUTED_GPU_TEST @@ -48,6 +49,7 @@ if(HOLOSCAN_BUILD_TESTS) set_tests_properties(EXAMPLE_PYTHON_PING_DISTRIBUTED_GPU_TEST PROPERTIES PASS_REGULAR_EXPRESSION "message 10: Tensor key: 'out', shape: \\(32, 64\\)" FAIL_REGULAR_EXPRESSION "AssertionError:" + FAIL_REGULAR_EXPRESSION "initialized independent of a parent entity" ) add_test(NAME EXAMPLE_PYTHON_PING_DISTRIBUTED_HELP_STRING_TEST @@ -56,6 +58,7 @@ if(HOLOSCAN_BUILD_TESTS) ) set_tests_properties(EXAMPLE_PYTHON_PING_DISTRIBUTED_HELP_STRING_TEST PROPERTIES PASS_REGULAR_EXPRESSION "Usage: ping_distributed.py \\[OPTIONS\\]" + FAIL_REGULAR_EXPRESSION "initialized independent of a parent entity" ) add_test(NAME EXAMPLE_PYTHON_PING_DISTRIBUTED_INVALID_DEVICE_TEST @@ -67,5 +70,6 @@ if(HOLOSCAN_BUILD_TESTS) ENVIRONMENT "HOLOSCAN_UCX_DEVICE_ID=-5" PASS_REGULAR_EXPRESSION "GPUDevice value found and cached. dev_id: -5" PASS_REGULAR_EXPRESSION "cudaSetDevice Failed - 101, device id -5" + FAIL_REGULAR_EXPRESSION "initialized independent of a parent entity" ) endif() diff --git a/examples/ping_simple/cpp/ping_simple.cpp b/examples/ping_simple/cpp/ping_simple.cpp index 878f2415..beb7447c 100644 --- a/examples/ping_simple/cpp/ping_simple.cpp +++ b/examples/ping_simple/cpp/ping_simple.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,7 +19,6 @@ #include #include - class MyPingApp : public holoscan::Application { public: void compose() override { diff --git a/examples/ping_simple_run_async/cpp/ping_simple_run_async.cpp b/examples/ping_simple_run_async/cpp/ping_simple_run_async.cpp index 1d2b7272..d3a3fcb4 100644 --- a/examples/ping_simple_run_async/cpp/ping_simple_run_async.cpp +++ b/examples/ping_simple_run_async/cpp/ping_simple_run_async.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -42,7 +42,6 @@ int main(int argc, char** argv) { auto app = holoscan::make_application(); auto future = app->run_async(); HOLOSCAN_LOG_INFO("Application is running asynchronously."); - // Executing `future.wait();` here would block the main thread until the application finishes auto print_status = std::thread([&app, &future]() { // Wait for the application to finish @@ -65,7 +64,9 @@ int main(int argc, char** argv) { }); print_status.join(); // print status while application is running - future.wait(); + // Block until application is done and throw any exceptions + future.get(); + HOLOSCAN_LOG_INFO("Application has finished running."); return 0; } diff --git a/examples/ping_simple_run_async/python/ping_simple_run_async.py b/examples/ping_simple_run_async/python/ping_simple_run_async.py index 8b1d0cc6..c78fa512 100644 --- a/examples/ping_simple_run_async/python/ping_simple_run_async.py +++ b/examples/ping_simple_run_async/python/ping_simple_run_async.py @@ -62,5 +62,7 @@ def print_status(): print_status() # print status while application is running + # Block until application is done and raise any exceptions future.result() + print("# Application has finished running.") diff --git a/examples/tensor_interop/README.md b/examples/tensor_interop/README.md index 53dfa355..a6a707e7 100644 --- a/examples/tensor_interop/README.md +++ b/examples/tensor_interop/README.md @@ -55,8 +55,8 @@ The following dataset is used by this example: ``` * **using deb package install**: ```bash - # [Prerequisite] Download NGC dataset above to `DATA_DIR` - export HOLOSCAN_INPUT_PATH= + /opt/nvidia/holoscan/examples/download_example_data + export HOLOSCAN_INPUT_PATH=/opt/nvidia/holoscan/data python3 -m pip install cupy-cuda12x export PYTHONPATH=/opt/nvidia/holoscan/python/lib python3 /opt/nvidia/holoscan/examples/tensor_interop/python/tensor_interop.py diff --git a/examples/tensor_interop/cpp/receive_tensor_gxf.hpp b/examples/tensor_interop/cpp/receive_tensor_gxf.hpp index 991e760e..4a29b2fc 100644 --- a/examples/tensor_interop/cpp/receive_tensor_gxf.hpp +++ b/examples/tensor_interop/cpp/receive_tensor_gxf.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,7 +26,7 @@ #include "gxf/std/allocator.hpp" #include "gxf/std/codelet.hpp" -#include "gxf/std/parameter_parser_std.hpp" +#include "gxf/core/parameter_parser_std.hpp" #include "gxf/std/receiver.hpp" #include "gxf/std/tensor.hpp" diff --git a/examples/tensor_interop/cpp/send_tensor_gxf.hpp b/examples/tensor_interop/cpp/send_tensor_gxf.hpp index 05864b12..34238480 100644 --- a/examples/tensor_interop/cpp/send_tensor_gxf.hpp +++ b/examples/tensor_interop/cpp/send_tensor_gxf.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -25,7 +25,7 @@ #include "gxf/std/allocator.hpp" #include "gxf/std/codelet.hpp" -#include "gxf/std/parameter_parser_std.hpp" +#include "gxf/core/parameter_parser_std.hpp" #include "gxf/std/tensor.hpp" #include "gxf/std/transmitter.hpp" @@ -83,8 +83,8 @@ class SendTensor : public Codelet { } void* output_data_ptr = maybe_output_tensor.value()->pointer(); - CUDA_TRY(cudaMemset(output_data_ptr, value_, tensor_shape.size() * - gxf::PrimitiveTypeSize(element_type))); + CUDA_TRY(cudaMemset( + output_data_ptr, value_, tensor_shape.size() * gxf::PrimitiveTypeSize(element_type))); value_ = (value_ + 1) % 255; diff --git a/examples/tensor_interop/cpp/tensor_interop.cpp b/examples/tensor_interop/cpp/tensor_interop.cpp index c506016b..7bddc9af 100644 --- a/examples/tensor_interop/cpp/tensor_interop.cpp +++ b/examples/tensor_interop/cpp/tensor_interop.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -27,7 +27,6 @@ #include #include #include -#include #include #include "./receive_tensor_gxf.hpp" diff --git a/examples/tensor_interop/python/CMakeLists.min.txt b/examples/tensor_interop/python/CMakeLists.min.txt new file mode 100644 index 00000000..ca9410e8 --- /dev/null +++ b/examples/tensor_interop/python/CMakeLists.min.txt @@ -0,0 +1,65 @@ +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Testing +if(BUILD_TESTING) + + set(RECORDING_DIR ${CMAKE_CURRENT_BINARY_DIR}/recording_output) + set(SOURCE_VIDEO_BASENAME python_tensor_interop_output) + set(VALIDATION_FRAMES_DIR ${CMAKE_SOURCE_DIR}/testing/validation_frames/tensor_interop/) + + file(MAKE_DIRECTORY ${RECORDING_DIR}) + + file(READ ${CMAKE_CURRENT_SOURCE_DIR}/tensor_interop.yaml CONFIG_STRING) + string(REPLACE "count: 0" "count: 10" CONFIG_STRING ${CONFIG_STRING}) + string(APPEND CONFIG_STRING " enable_render_buffer_output: true\n\nrecorder:\n directory: \"${RECORDING_DIR}\"\n basename: \"${SOURCE_VIDEO_BASENAME}\"") + set(CONFIG_FILE ${CMAKE_CURRENT_BINARY_DIR}/python_tensor_interop_testing_config.yaml) + file(WRITE ${CONFIG_FILE} ${CONFIG_STRING}) + + # Patch the current example to enable recording the rendering window + add_custom_command(OUTPUT tensor_interop_test.py + PRE_LINK + COMMAND patch -u -o tensor_interop_test.py ${CMAKE_CURRENT_SOURCE_DIR}/tensor_interop.py + ${CMAKE_SOURCE_DIR}/testing/validation_frames/tensor_interop/python_tensor_interop.patch + ) + + add_custom_target(python_tensor_interop_test ALL + DEPENDS "tensor_interop_test.py" + ) + + add_test(NAME EXAMPLE_PYTHON_TENSOR_INTEROP_TEST + COMMAND python3 tensor_interop_test.py --config ${CONFIG_FILE} + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + ) + set_tests_properties(EXAMPLE_PYTHON_TENSOR_INTEROP_TEST PROPERTIES + PASS_REGULAR_EXPRESSION "Reach end of file or playback count reaches to the limit. Stop ticking." + ) + + # Add a test to check the validity of the frames + add_test(NAME EXAMPLE_PYTHON_TENSOR_INTEROP_RENDER_TEST + COMMAND python3 ${CMAKE_SOURCE_DIR}/../bin/video_validation.py + --source_video_dir ${RECORDING_DIR} + --source_video_basename ${SOURCE_VIDEO_BASENAME} + --output_dir ${RECORDING_DIR} + --validation_frames_dir ${VALIDATION_FRAMES_DIR} + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + ) + + set_tests_properties(EXAMPLE_PYTHON_TENSOR_INTEROP_RENDER_TEST PROPERTIES + DEPENDS EXAMPLE_PYTHON_TENSOR_INTEROP_TEST + PASS_REGULAR_EXPRESSION "Valid video output!" + ) + +endif() diff --git a/examples/tensor_interop/python/CMakeLists.txt b/examples/tensor_interop/python/CMakeLists.txt index 36d07f8b..62fd082c 100644 --- a/examples/tensor_interop/python/CMakeLists.txt +++ b/examples/tensor_interop/python/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -39,6 +39,13 @@ install(FILES COMPONENT "holoscan-examples" ) +# Install the minimal CMakeLists.txt file +install(FILES CMakeLists.min.txt + RENAME "CMakeLists.txt" + DESTINATION "${app_relative_dest_path}" + COMPONENT holoscan-examples +) + # Testing if(HOLOSCAN_BUILD_TESTS) diff --git a/examples/testing/run_example_tests b/examples/testing/run_example_tests new file mode 100644 index 00000000..1a59a745 --- /dev/null +++ b/examples/testing/run_example_tests @@ -0,0 +1,31 @@ +#!/bin/bash +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script builds and runs the examples to make sure that the Holoscan SDK is correctly +# installed and examples can be built and run correctly +# NOTE: This is meant to be called from an installation of the SDK, not from the git repository + +# Get path to the examples. This assumes the directory is where the script is located +SCRIPT_DIR=$(dirname "$(readlink -f "$0")") +source_dir=$(realpath "$SCRIPT_DIR/..") + +# Compile the examples +build_dir=${source_dir}/examples-build +cmake -S ${source_dir} -B ${build_dir} +cmake --build ${build_dir} -j + +# Run CTest +ctest --test-dir ${build_dir} diff --git a/examples/v4l2_camera/CMakeLists.txt b/examples/v4l2_camera/CMakeLists.txt index 939fe2bd..35338b09 100644 --- a/examples/v4l2_camera/CMakeLists.txt +++ b/examples/v4l2_camera/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/examples/v4l2_camera/README.md b/examples/v4l2_camera/README.md index b056a82e..d5d04f54 100644 --- a/examples/v4l2_camera/README.md +++ b/examples/v4l2_camera/README.md @@ -17,43 +17,48 @@ If using a container outside the `run` script, add `--group-add video` and `--de ### Local Development Install the following dependency: + +```sh +sudo apt-get install libv4l-dev +``` + +To use `v4l2-ctl` for debugging, also install `v4l-utils`: + ```sh -sudo apt-get install libv4l-dev=1.18.0-2build1 +sudo apt-get install v4l-utils ``` If you do not have permissions to open the video device, run: + ```sh - sudo usermod -aG video $USER +sudo usermod -aG video $USER ``` ### Updating HDMI IN Firmware -Before using the HDMI IN device, please ensure that it has the latest firmware by following instructions from the [devkit guide](https://docs.nvidia.com/igx-orin/user-guide/latest/post-installation.html#updating-hdmi-in-input-firmware). +Before using the HDMI IN device on NVIDIA IGX or Clara AGX developer kits, please ensure that it has the latest firmware by following instructions from the [devkit guide](https://docs.nvidia.com/igx-orin/user-guide/latest/post-installation.html#updating-hdmi-in-input-firmware). ## Parameters There are a few parameters that can be specified: -* `device`: The mount point of the device (default=`"/dev/video0"`). -* `pixel_format`: The [V4L2 pixel format](https://docs.kernel.org/userspace-api/media/v4l/pixfmt-intro.html) of the device, as FourCC code (if not specified, app will auto select 'AR24' or 'YUYV' if supported by the device) -* `width`: The frame size width (if not specified, uses device default). Currently, only `V4L2_FRMSIZE_TYPE_DISCRETE` are supported. -* `height`: The frame size height (if not specified, uses device default). Currently, only `V4L2_FRMSIZE_TYPE_DISCRETE` are supported. - -**OBS:** Note that specifying both the `width` and `height` parameters will make the app use `BlockMemoryPool` rather than `UnboundedAllocator` which improves the latency (FPS), however -please ensure that your device supports that combination of `width` and `height` (see `v4l2-ctl --list-formats-ext` below) otherwise the application will fail to start. - -The parameters of the available V4L2-supported devices can be found with: -```sh -v4l2-ctl --list-devices -``` -followed by: -```sh -v4l2-ctl -d /dev/video0 --list-formats-ext -``` -If you do not have the `v4l2-ctl` app, it can be installed with (if running via Holoscan Docker image, already available): -```sh -sudo apt-get install v4l-utils -``` +* `device`: The mount point of the device + * Default: `"/dev/video0"` + * List available options with `v4l2-ctl --list-devices` +* `pixel_format`: The [V4L2 pixel format](https://docs.kernel.org/userspace-api/media/v4l/pixfmt-intro.html) of the device, as FourCC code + * Default: auto selects `AB24` or `YUYV` based on device support + * List available options with `v4l2-ctl -d /dev/ --list-formats` +* `width` and `height`: The frame dimensions + * Default: device default + * List available options with `v4l2-ctl -d /dev/ --list-formats-ext` +* `exposure_time`: The exposure time of the camera sensor in multiples of 100 μs (e.g. setting exposure_time to 100 is 10 ms) + * Default: auto exposure, or device default if auto is not supported + * List supported range with `v4l2-ctl -d /dev/ -L` +* `gain`: The gain of the camera sensor + * Default: auto gain, or device default if auto is not supported + * List supported range with `v4l2-ctl -d /dev/ -L` + +> Note that specifying both the `width` and `height` parameters to values supported by your device (see `v4l2-ctl --list-formats-ext`) will make the app use `BlockMemoryPool` rather than `UnboundedAllocator` which optimizes memory and should improve the latency (FPS). ## Run Instructions diff --git a/examples/v4l2_camera/cpp/CMakeLists.min.txt b/examples/v4l2_camera/cpp/CMakeLists.min.txt index 7b759cd8..50cc80c9 100644 --- a/examples/v4l2_camera/cpp/CMakeLists.min.txt +++ b/examples/v4l2_camera/cpp/CMakeLists.min.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the \"License\"); diff --git a/examples/v4l2_camera/cpp/CMakeLists.txt b/examples/v4l2_camera/cpp/CMakeLists.txt index dc56c002..b177f570 100644 --- a/examples/v4l2_camera/cpp/CMakeLists.txt +++ b/examples/v4l2_camera/cpp/CMakeLists.txt @@ -69,14 +69,8 @@ endif() # Testing option(HOLOSCAN_BUILD_V4L2_TESTS "Build tests for V4L2 loopback" OFF) if(HOLOSCAN_BUILD_TESTS AND HOLOSCAN_BUILD_V4L2_TESTS) - # Assumes that the v4l2 video loopback is mounted on /dev/video3. This allows us to create a - # a virtual video device and stream data from an mp4 file without the need for a physical - # video input device. To setup v4l2 video loopback, refer to the "Use with V4L2 Loopback Devices" - # section of the README file for this example - file(READ ${CMAKE_CURRENT_SOURCE_DIR}/v4l2_camera.yaml CONFIG_STRING) - string(REPLACE "device: \"/dev/video0\"" "device: \"/dev/video3\"" CONFIG_STRING "${CONFIG_STRING}") - set(CONFIG_FILE ${CMAKE_CURRENT_BINARY_DIR}/cpp_v4l2_camera_config.yaml) - file(WRITE ${CONFIG_FILE} "${CONFIG_STRING}") + # Assumes that the v4l2 video loopback has already been mounted and the yaml files have been + # updated to use the virtual loopback device. # Modify testcase to only run 10 frames add_custom_command(OUTPUT v4l2_camera_test.cpp @@ -100,9 +94,7 @@ if(HOLOSCAN_BUILD_TESTS AND HOLOSCAN_BUILD_V4L2_TESTS) add_dependencies(v4l2_camera_test racerx_data) add_test(NAME EXAMPLE_CPP_V4L2_CAMERA_TEST - COMMAND bash -c "ffmpeg -stream_loop -1 -re -i ${CMAKE_SOURCE_DIR}/data/racerx/racerx-small.mp4 \ - -pix_fmt yuyv422 -f v4l2 /dev/video3 & sleep 5; \ - ${CMAKE_CURRENT_BINARY_DIR}/v4l2_camera_test ${CONFIG_FILE}; echo 'Done'; kill %1" + COMMAND "${CMAKE_CURRENT_BINARY_DIR}/v4l2_camera_test" WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} ) set_tests_properties(EXAMPLE_CPP_V4L2_CAMERA_TEST PROPERTIES diff --git a/examples/v4l2_camera/cpp/v4l2_camera.cpp b/examples/v4l2_camera/cpp/v4l2_camera.cpp index 7251ed94..e2773d6d 100644 --- a/examples/v4l2_camera/cpp/v4l2_camera.cpp +++ b/examples/v4l2_camera/cpp/v4l2_camera.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -50,9 +50,7 @@ class App : public holoscan::Application { auto allocator = make_resource("pool", 0, block_size, 1); source = make_operator( - "source", - from_config("source"), - Arg("allocator") = allocator); + "source", from_config("source"), Arg("allocator") = allocator); // Set Holoviz width and height from source resolution auto viz_args = from_config("visualizer"); @@ -61,8 +59,8 @@ class App : public holoscan::Application { else if (arg.name() == "height") viz_args.add(arg); } - visualizer = make_operator( - "visualizer", viz_args, Arg("allocator") = allocator); + visualizer = + make_operator("visualizer", viz_args, Arg("allocator") = allocator); } else { // width and height not given, use UnboundedAllocator (worse latency) source = make_operator( @@ -83,9 +81,7 @@ int main(int argc, char** argv) { // Get the configuration auto config_path = std::filesystem::canonical(argv[0]).parent_path(); config_path += "/v4l2_camera.yaml"; - if ( argc >= 2 ) { - config_path = argv[1]; - } + if (argc >= 2) { config_path = argv[1]; } app.config(config_path); app.run(); @@ -94,4 +90,3 @@ int main(int argc, char** argv) { return 0; } - diff --git a/examples/v4l2_camera/cpp/v4l2_camera.yaml b/examples/v4l2_camera/cpp/v4l2_camera.yaml index cbf9daec..9e2c2605 100644 --- a/examples/v4l2_camera/cpp/v4l2_camera.yaml +++ b/examples/v4l2_camera/cpp/v4l2_camera.yaml @@ -1,5 +1,5 @@ %YAML 1.2 -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +15,7 @@ # limitations under the License. --- source: # V4L2VideoCaptureOp - # | Input device. On devkit, /dev/video0 is generally HDMI in + # | Input device. On Clara AGX or NVIDIA IGX devkits, /dev/video0 is generally the HDMI IN device: "/dev/video0" # | App will auto-select default width and height if not provided @@ -26,6 +26,13 @@ source: # V4L2VideoCaptureOp # | App will auto-select the default "pixel_format" for your device if not provided. # | See this app's readme file for details. - # pixel_format: "AR24" + # pixel_format: "AB24" + + # | These properties might not be supported for all v4l2 nodes. + # | The app will attempt to do auto exposure and gain if not provided. If auto is not supported, + # | it will use the defaults defined by your device. + # | See this app's readme file for details. + # exposure_time: 500 + # gain: 100 visualizer: # Holoviz diff --git a/examples/v4l2_camera/python/CMakeLists.txt b/examples/v4l2_camera/python/CMakeLists.txt index fc7df49d..11016497 100644 --- a/examples/v4l2_camera/python/CMakeLists.txt +++ b/examples/v4l2_camera/python/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -43,14 +43,8 @@ install(FILES # Testing option(HOLOSCAN_BUILD_V4L2_TESTS "Build tests for V4L2 loopback" OFF) if(HOLOSCAN_BUILD_TESTS AND HOLOSCAN_BUILD_V4L2_TESTS) - # Assumes that the v4l2 video loopback is mounted on /dev/video3. This allows us to create a - # a virtual video device and stream data from an mp4 file without the need for a physical - # video input device. To setup v4l2 video loopback, refer to the "Use with V4L2 Loopback Devices" - # section of the README file for this example - file(READ ${CMAKE_CURRENT_SOURCE_DIR}/v4l2_camera.yaml CONFIG_STRING) - string(REPLACE "device: \"/dev/video0\"" "device: \"/dev/video3\"" CONFIG_STRING "${CONFIG_STRING}") - set(CONFIG_FILE ${CMAKE_CURRENT_BINARY_DIR}/python_v4l2_camera_config.yaml) - file(WRITE ${CONFIG_FILE} "${CONFIG_STRING}") + # Assumes that the v4l2 video loopback has already been mounted and the yaml files have been + # updated to use the virtual loopback device. # Modify testcase to only run 10 frames file(READ ${CMAKE_CURRENT_SOURCE_DIR}/v4l2_camera.py PYTHON_SOURCE_STRING) @@ -61,9 +55,7 @@ if(HOLOSCAN_BUILD_TESTS AND HOLOSCAN_BUILD_V4L2_TESTS) file(WRITE ${PYTHON_SOURCE_FILE} "${PYTHON_SOURCE_STRING}") add_test(NAME EXAMPLE_PYTHON_V4L2_CAMERA_TEST - COMMAND bash -c "ffmpeg -stream_loop -1 -re -i ${CMAKE_SOURCE_DIR}/data/racerx/racerx-small.mp4 \ - -pix_fmt yuyv422 -f v4l2 /dev/video3 & sleep 5; \ - python3 v4l2_camera_test.py --config ${CONFIG_FILE}; echo 'Done'; kill %1" + COMMAND python3 v4l2_camera_test.py WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} ) diff --git a/examples/v4l2_camera/python/v4l2_camera.yaml b/examples/v4l2_camera/python/v4l2_camera.yaml index cbf9daec..9e2c2605 100644 --- a/examples/v4l2_camera/python/v4l2_camera.yaml +++ b/examples/v4l2_camera/python/v4l2_camera.yaml @@ -1,5 +1,5 @@ %YAML 1.2 -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +15,7 @@ # limitations under the License. --- source: # V4L2VideoCaptureOp - # | Input device. On devkit, /dev/video0 is generally HDMI in + # | Input device. On Clara AGX or NVIDIA IGX devkits, /dev/video0 is generally the HDMI IN device: "/dev/video0" # | App will auto-select default width and height if not provided @@ -26,6 +26,13 @@ source: # V4L2VideoCaptureOp # | App will auto-select the default "pixel_format" for your device if not provided. # | See this app's readme file for details. - # pixel_format: "AR24" + # pixel_format: "AB24" + + # | These properties might not be supported for all v4l2 nodes. + # | The app will attempt to do auto exposure and gain if not provided. If auto is not supported, + # | it will use the defaults defined by your device. + # | See this app's readme file for details. + # exposure_time: 500 + # gain: 100 visualizer: # Holoviz diff --git a/examples/video_replayer/README.md b/examples/video_replayer/README.md index 56a4afba..3becbb15 100644 --- a/examples/video_replayer/README.md +++ b/examples/video_replayer/README.md @@ -1,6 +1,8 @@ # Video Replayer -Minimal example to demonstrate the use of the video stream replayer operator to load video from disk. The video frames need to have been converted to a gxf entity format, as shown [here](../../scripts/README.md#convert_video_to_gxf_entitiespy). +Minimal example to demonstrate the use of the video stream replayer operator to load video from disk. + +The video frames need to have been converted to a gxf entity format to use as input. You can use the `convert_video_to_gxf_entities.py` script installed in `/opt/nvidia/holoscan/bin` or available [on GitHub](https://github.com/nvidia-holoscan/holoscan-sdk/tree/main/scripts#convert_video_to_gxf_entitiespy) (tensors will be loaded on the GPU). > Note: Support for H264 stream support is in progress and can be found on [HoloHub](https://nvidia-holoscan.github.io/holohub) @@ -15,13 +17,12 @@ The following dataset is used by this example: * **using deb package install**: ```bash - # [Prerequisite] Download NGC dataset above to `/opt/nvidia/data` - cd /opt/nvidia/holoscan # to find dataset + /opt/nvidia/holoscan/examples/download_example_data + export HOLOSCAN_INPUT_PATH=/opt/nvidia/holoscan/data ./examples/video_replayer/cpp/video_replayer ``` * **from NGC container**: ```bash - cd /opt/nvidia/holoscan # to find dataset ./examples/video_replayer/cpp/video_replayer ``` * **source (dev container)**: @@ -47,8 +48,8 @@ The following dataset is used by this example: ``` * **using deb package install**: ```bash - # [Prerequisite] Download NGC dataset above to `DATA_DIR` - export HOLOSCAN_INPUT_PATH= + /opt/nvidia/holoscan/examples/download_example_data + export HOLOSCAN_INPUT_PATH=/opt/nvidia/holoscan/data export PYTHONPATH=/opt/nvidia/holoscan/python/lib python3 /opt/nvidia/holoscan/examples/video_replayer/python/video_replayer.py ``` diff --git a/examples/video_replayer/cpp/CMakeLists.min.txt b/examples/video_replayer/cpp/CMakeLists.min.txt index 5064e855..75b7349c 100644 --- a/examples/video_replayer/cpp/CMakeLists.min.txt +++ b/examples/video_replayer/cpp/CMakeLists.min.txt @@ -42,16 +42,67 @@ add_dependencies(video_replayer video_replayer_yaml) # Testing if(BUILD_TESTING) + set(RECORDING_DIR ${CMAKE_CURRENT_BINARY_DIR}/recording_output) + set(SOURCE_VIDEO_BASENAME video_replayer_output) + set(VALIDATION_FRAMES_DIR ${CMAKE_SOURCE_DIR}/testing/validation_frames/video_replayer/) + + file(MAKE_DIRECTORY ${RECORDING_DIR}) + file(READ ${CMAKE_CURRENT_SOURCE_DIR}/video_replayer.yaml CONFIG_STRING) - string(REPLACE "count: 0" "count: 10" CONFIG_STRING ${CONFIG_STRING}) + string(REPLACE "count: 0" "count: 10" CONFIG_STRING "${CONFIG_STRING}") set(CONFIG_FILE ${CMAKE_CURRENT_BINARY_DIR}/cpp_video_replayer_config.yaml) - file(WRITE ${CONFIG_FILE} ${CONFIG_STRING}) + file(WRITE ${CONFIG_FILE} "${CONFIG_STRING}") + + # Patch the current example to enable recording the rendering window + add_custom_command(OUTPUT video_replayer_test.cpp + PRE_LINK + COMMAND patch -u -o video_replayer_test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/video_replayer.cpp + ${CMAKE_SOURCE_DIR}/testing/validation_frames/video_replayer/cpp_video_replayer.patch + ) + # Create the test executable + add_executable(video_replayer_test + video_replayer_test.cpp + ) + + target_include_directories(video_replayer_test + PRIVATE ${CMAKE_SOURCE_DIR}/testing) + + target_compile_definitions(video_replayer_test + PRIVATE RECORD_OUTPUT RECORDING_DIR="${RECORDING_DIR}" + PRIVATE SOURCE_VIDEO_BASENAME="${SOURCE_VIDEO_BASENAME}" + ) + + target_link_libraries(video_replayer_test + PRIVATE + holoscan::core + holoscan::ops::holoviz + holoscan::ops::video_stream_replayer + holoscan::ops::video_stream_recorder + holoscan::ops::format_converter + ) + + # Add the test and make sure it runs add_test(NAME EXAMPLE_CPP_VIDEO_REPLAYER_TEST - COMMAND ${CMAKE_CURRENT_BINARY_DIR}/video_replayer ${CONFIG_FILE} + COMMAND ${CMAKE_CURRENT_BINARY_DIR}/video_replayer_test ${CONFIG_FILE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR} ) set_tests_properties(EXAMPLE_CPP_VIDEO_REPLAYER_TEST PROPERTIES PASS_REGULAR_EXPRESSION "Reach end of file or playback count reaches to the limit. Stop ticking." ) + + # Add a test to check the validity of the frames + add_test(NAME EXAMPLE_CPP_VIDEO_REPLAYER_RENDER_TEST + COMMAND python3 ${CMAKE_SOURCE_DIR}/../bin/video_validation.py + --source_video_dir ${RECORDING_DIR} + --source_video_basename ${SOURCE_VIDEO_BASENAME} + --output_dir ${RECORDING_DIR} + --validation_frames_dir ${VALIDATION_FRAMES_DIR} + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + ) + + set_tests_properties(EXAMPLE_CPP_VIDEO_REPLAYER_RENDER_TEST PROPERTIES + DEPENDS EXAMPLE_CPP_VIDEO_REPLAYER_TEST + PASS_REGULAR_EXPRESSION "Valid video output!" + ) endif() diff --git a/examples/video_replayer/cpp/CMakeLists.txt b/examples/video_replayer/cpp/CMakeLists.txt index 672f30f6..a5cd547f 100644 --- a/examples/video_replayer/cpp/CMakeLists.txt +++ b/examples/video_replayer/cpp/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -93,6 +93,9 @@ if(HOLOSCAN_BUILD_TESTS) video_replayer_test.cpp ) + target_include_directories(video_replayer_test + PRIVATE ${CMAKE_SOURCE_DIR}/tests) + target_compile_definitions(video_replayer_test PRIVATE RECORD_OUTPUT RECORDING_DIR="${RECORDING_DIR}" PRIVATE SOURCE_VIDEO_BASENAME="${SOURCE_VIDEO_BASENAME}" diff --git a/examples/video_replayer/cpp/video_replayer.cpp b/examples/video_replayer/cpp/video_replayer.cpp index 7f8ba5db..01eb52b7 100644 --- a/examples/video_replayer/cpp/video_replayer.cpp +++ b/examples/video_replayer/cpp/video_replayer.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,6 +14,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +#include #include #include @@ -24,8 +25,18 @@ class VideoReplayerApp : public holoscan::Application { void compose() override { using namespace holoscan; + // Sets the data directory to use from the environment variable if it is set + ArgList args; + auto data_directory = std::getenv("HOLOSCAN_INPUT_PATH"); + if (data_directory != nullptr && data_directory[0] != '\0') { + auto video_directory = std::filesystem::path(data_directory); + video_directory /= "racerx"; + args.add(Arg("directory", video_directory.string())); + } + // Define the replayer and holoviz operators and configure using yaml configuration - auto replayer = make_operator("replayer", from_config("replayer")); + auto replayer = + make_operator("replayer", from_config("replayer"), args); auto visualizer = make_operator("holoviz", from_config("holoviz")); // Define the workflow: replayer -> holoviz @@ -37,9 +48,7 @@ int main(int argc, char** argv) { // Get the yaml configuration file auto config_path = std::filesystem::canonical(argv[0]).parent_path(); config_path /= std::filesystem::path("video_replayer.yaml"); - if ( argc >= 2 ) { - config_path = argv[1]; - } + if (argc >= 2) { config_path = argv[1]; } auto app = holoscan::make_application(); app->config(config_path); diff --git a/examples/video_replayer/python/CMakeLists.min.txt b/examples/video_replayer/python/CMakeLists.min.txt new file mode 100644 index 00000000..316b15a9 --- /dev/null +++ b/examples/video_replayer/python/CMakeLists.min.txt @@ -0,0 +1,67 @@ +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Testing +if(BUILD_TESTING) + + set(RECORDING_DIR ${CMAKE_CURRENT_BINARY_DIR}/recording_output) + set(SOURCE_VIDEO_BASENAME python_video_replayer_output) + set(VALIDATION_FRAMES_DIR ${CMAKE_SOURCE_DIR}/testing/validation_frames/video_replayer/) + + file(MAKE_DIRECTORY ${RECORDING_DIR}) + + file(READ ${CMAKE_CURRENT_SOURCE_DIR}/video_replayer.yaml CONFIG_STRING) + string(REPLACE "count: 0" "count: 10" CONFIG_STRING ${CONFIG_STRING}) + string(APPEND CONFIG_STRING " enable_render_buffer_output: true\n\nrecorder:\n directory: \"${RECORDING_DIR}\"\n basename: \"${SOURCE_VIDEO_BASENAME}\"") + set(CONFIG_FILE ${CMAKE_CURRENT_BINARY_DIR}/python_video_replayer_config.yaml) + file(WRITE ${CONFIG_FILE} ${CONFIG_STRING}) + + # Patch the current example to enable recording the rendering window + add_custom_command(OUTPUT video_replayer_test.py + PRE_LINK + COMMAND patch -u -o video_replayer_test.py ${CMAKE_CURRENT_SOURCE_DIR}/video_replayer.py + ${CMAKE_SOURCE_DIR}/testing/validation_frames/video_replayer/python_video_replayer.patch + ) + + add_custom_target(python_video_replayer_test ALL + DEPENDS "video_replayer_test.py" + ) + + add_test(NAME EXAMPLE_PYTHON_VIDEO_REPLAYER_TEST + COMMAND python3 video_replayer_test.py --config python_video_replayer_config.yaml + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + ) + + set_tests_properties(EXAMPLE_PYTHON_VIDEO_REPLAYER_TEST PROPERTIES + DEPENDS "video_replayer_test.py" + PASS_REGULAR_EXPRESSION "Reach end of file or playback count reaches to the limit. Stop ticking." + ) + + # Add a test to check the validity of the frames + add_test(NAME EXAMPLE_PYTHON_VIDEO_REPLAYER_RENDER_TEST + COMMAND python3 ${CMAKE_SOURCE_DIR}/../bin/video_validation.py + --source_video_dir ${RECORDING_DIR} + --source_video_basename ${SOURCE_VIDEO_BASENAME} + --output_dir ${RECORDING_DIR} + --validation_frames_dir ${VALIDATION_FRAMES_DIR} + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + ) + + set_tests_properties(EXAMPLE_PYTHON_VIDEO_REPLAYER_RENDER_TEST PROPERTIES + DEPENDS EXAMPLE_PYTHON_VIDEO_REPLAYER_TEST + PASS_REGULAR_EXPRESSION "Valid video output!" + ) + +endif() diff --git a/examples/video_replayer/python/CMakeLists.txt b/examples/video_replayer/python/CMakeLists.txt index 4115dfbd..cbc55e2c 100644 --- a/examples/video_replayer/python/CMakeLists.txt +++ b/examples/video_replayer/python/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -40,6 +40,13 @@ install(FILES COMPONENT "holoscan-examples" ) +# Install the minimal CMakeLists.txt file +install(FILES CMakeLists.min.txt + RENAME "CMakeLists.txt" + DESTINATION "${app_relative_dest_path}" + COMPONENT holoscan-examples +) + # Testing if(HOLOSCAN_BUILD_TESTS) diff --git a/examples/video_replayer_distributed/README.md b/examples/video_replayer_distributed/README.md index 0d88ce9b..58fe2830 100644 --- a/examples/video_replayer_distributed/README.md +++ b/examples/video_replayer_distributed/README.md @@ -1,7 +1,8 @@ # Distributed Video Replayer Minimal example to demonstrate the use of the video stream replayer operator to load video from disk in a distributed manner. -The video frames need to have been converted to a gxf entity format, as shown [here](../../scripts/README.md#convert_video_to_gxf_entitiespy). + +The video frames need to have been converted to a gxf entity format to use as input. You can use the `convert_video_to_gxf_entities.py` script installed in `/opt/nvidia/holoscan/bin` or available [on GitHub](https://github.com/nvidia-holoscan/holoscan-sdk/tree/main/scripts#convert_video_to_gxf_entitiespy) (tensors will be loaded on the GPU). > Note: Support for H264 stream support is in progress and can be found on [HoloHub](https://nvidia-holoscan.github.io/holohub) @@ -21,8 +22,8 @@ Please refer to the [user guide](https://docs.nvidia.com/holoscan/sdk-user-guide * **using deb package install**: ```bash - # [Prerequisite] Download NGC dataset above to `DATA_DIR` (e.g., `/opt/nvidia/data`) - export HOLOSCAN_INPUT_PATH= + /opt/nvidia/holoscan/examples/download_example_data + export HOLOSCAN_INPUT_PATH=/opt/nvidia/holoscan/data # Set the application folder APP_DIR=/opt/nvidia/holoscan/examples/video_replayer_distributed/cpp @@ -82,8 +83,8 @@ Please refer to the [user guide](https://docs.nvidia.com/holoscan/sdk-user-guide ``` * **using deb package install**: ```bash - # [Prerequisite] Download NGC dataset above to `DATA_DIR` (e.g., `/opt/nvidia/data`) - export HOLOSCAN_INPUT_PATH= + /opt/nvidia/holoscan/examples/download_example_data + export HOLOSCAN_INPUT_PATH=/opt/nvidia/holoscan/data export PYTHONPATH=/opt/nvidia/holoscan/python/lib # Set the application folder @@ -156,9 +157,9 @@ Refer to the documentation in the [user guide](https://docs.nvidia.com/holoscan/ # in one machine (e.g. IP address `10.2.34.56`) using the port number `10000`, # and another worker (`fragment2` that renders video to display) in another machine. # If `--fragments` is not specified, any fragment in the application will be chosen to run. -# The `--nic ` argument is required when running a distributed application +# The `--nic ` argument is required when running a distributed application # across multiple nodes; it instructs the application to use the specified network -# interface for communicating with other application nodes. +# interface for communicating with other application nodes. # # note: use the following command to get a list of available network interface name and its assigned IP address. ip -o -4 addr show | awk '{print $2, $4}' diff --git a/examples/video_replayer_distributed/cpp/CMakeLists.min.txt b/examples/video_replayer_distributed/cpp/CMakeLists.min.txt index 9d8a57d1..f962f48c 100644 --- a/examples/video_replayer_distributed/cpp/CMakeLists.min.txt +++ b/examples/video_replayer_distributed/cpp/CMakeLists.min.txt @@ -42,22 +42,74 @@ add_dependencies(video_replayer_distributed video_replayer_distributed_yaml) # Testing if(BUILD_TESTING) + set(RECORDING_DIR ${CMAKE_CURRENT_BINARY_DIR}/recording_output) + set(SOURCE_VIDEO_BASENAME video_replayer_output) + set(VALIDATION_FRAMES_DIR ${CMAKE_SOURCE_DIR}/testing/validation_frames/video_replayer/) + + file(MAKE_DIRECTORY ${RECORDING_DIR}) + file(READ ${CMAKE_CURRENT_SOURCE_DIR}/video_replayer_distributed.yaml CONFIG_STRING) string(REPLACE "count: 0" "count: 10" CONFIG_STRING ${CONFIG_STRING}) set(CONFIG_FILE ${CMAKE_CURRENT_BINARY_DIR}/cpp_video_replayer_distributed_config.yaml) file(WRITE ${CONFIG_FILE} ${CONFIG_STRING}) + # Patch the current example to enable recording the rendering window + add_custom_command(OUTPUT video_replayer_distributed_test.cpp + PRE_LINK + COMMAND patch -u -o video_replayer_distributed_test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/video_replayer_distributed.cpp + ${CMAKE_SOURCE_DIR}/testing/validation_frames/video_replayer/cpp_video_replayer_distributed.patch + ) + + # Create the test executable + add_executable(video_replayer_distributed_test + video_replayer_distributed_test.cpp + ) + + target_include_directories(video_replayer_distributed_test + PRIVATE ${CMAKE_SOURCE_DIR}/testing) + + target_compile_definitions(video_replayer_distributed_test + PRIVATE RECORD_OUTPUT RECORDING_DIR="${RECORDING_DIR}" + PRIVATE SOURCE_VIDEO_BASENAME="${SOURCE_VIDEO_BASENAME}" + ) + + target_link_libraries(video_replayer_distributed_test + PRIVATE + holoscan::core + holoscan::ops::holoviz + holoscan::ops::video_stream_replayer + holoscan::ops::video_stream_recorder + holoscan::ops::format_converter + ) + add_test(NAME EXAMPLE_CPP_VIDEO_REPLAYER_DISTRIBUTED_TEST - COMMAND ${CMAKE_CURRENT_BINARY_DIR}/video_replayer_distributed --config ${CONFIG_FILE} + COMMAND ${CMAKE_CURRENT_BINARY_DIR}/video_replayer_distributed_test --config ${CONFIG_FILE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR} ) - + add_test(NAME EXAMPLE_CPP_VIDEO_REPLAYER_DISTRIBUTED_DRIVER_AND_WORKER_TEST - COMMAND ${CMAKE_CURRENT_BINARY_DIR}/video_replayer_distributed --config ${CONFIG_FILE} --driver --worker --fragments=all + COMMAND ${CMAKE_CURRENT_BINARY_DIR}/video_replayer_distributed_test --config ${CONFIG_FILE} --driver --worker --fragments=all WORKING_DIRECTORY ${CMAKE_BINARY_DIR} ) set_tests_properties(EXAMPLE_CPP_VIDEO_REPLAYER_DISTRIBUTED_TEST PROPERTIES PASS_REGULAR_EXPRESSION "Reach end of file or playback count reaches to the limit. Stop ticking." + FAIL_REGULAR_EXPRESSION "initialized independent of a parent entity" + ) + + # Add a test to check the validity of the frames + add_test(NAME EXAMPLE_CPP_VIDEO_REPLAYER_DISTRIBUTED_RENDER_TEST + COMMAND python3 ${CMAKE_SOURCE_DIR}/../bin/video_validation.py + --source_video_dir ${RECORDING_DIR} + --source_video_basename ${SOURCE_VIDEO_BASENAME} + --output_dir ${RECORDING_DIR} + --validation_frames_dir ${VALIDATION_FRAMES_DIR} + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + ) + + set_tests_properties(EXAMPLE_CPP_VIDEO_REPLAYER_DISTRIBUTED_RENDER_TEST PROPERTIES + DEPENDS EXAMPLE_CPP_VIDEO_REPLAYER_DISTRIBUTED_TEST + PASS_REGULAR_EXPRESSION "Valid video output!" + FAIL_REGULAR_EXPRESSION "initialized independent of a parent entity" ) endif() diff --git a/examples/video_replayer_distributed/cpp/CMakeLists.txt b/examples/video_replayer_distributed/cpp/CMakeLists.txt index 19a76c24..47520466 100644 --- a/examples/video_replayer_distributed/cpp/CMakeLists.txt +++ b/examples/video_replayer_distributed/cpp/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -93,6 +93,9 @@ if(HOLOSCAN_BUILD_TESTS) video_replayer_distributed_test.cpp ) + target_include_directories(video_replayer_distributed_test + PRIVATE ${CMAKE_SOURCE_DIR}/tests) + target_compile_definitions(video_replayer_distributed_test PRIVATE RECORD_OUTPUT RECORDING_DIR="${RECORDING_DIR}" PRIVATE SOURCE_VIDEO_BASENAME="${SOURCE_VIDEO_BASENAME}" @@ -119,6 +122,7 @@ if(HOLOSCAN_BUILD_TESTS) set_tests_properties(EXAMPLE_CPP_VIDEO_REPLAYER_DISTRIBUTED_TEST PROPERTIES PASS_REGULAR_EXPRESSION "Reach end of file or playback count reaches to the limit. Stop ticking." + FAIL_REGULAR_EXPRESSION "initialized independent of a parent entity" ) # Add a test to check the validity of the frames @@ -134,6 +138,7 @@ if(HOLOSCAN_BUILD_TESTS) set_tests_properties(EXAMPLE_CPP_VIDEO_REPLAYER_DISTRIBUTED_RENDER_TEST PROPERTIES DEPENDS EXAMPLE_CPP_VIDEO_REPLAYER_DISTRIBUTED_TEST PASS_REGULAR_EXPRESSION "Valid video output!" + FAIL_REGULAR_EXPRESSION "initialized independent of a parent entity" ) endif() diff --git a/examples/video_replayer_distributed/python/CMakeLists.min.txt b/examples/video_replayer_distributed/python/CMakeLists.min.txt new file mode 100644 index 00000000..178eefb9 --- /dev/null +++ b/examples/video_replayer_distributed/python/CMakeLists.min.txt @@ -0,0 +1,68 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the \"License\"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an \"AS IS\" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Testing +if(BUILD_TESTING) + + set(RECORDING_DIR ${CMAKE_CURRENT_BINARY_DIR}/recording_output) + set(SOURCE_VIDEO_BASENAME python_video_replayer_distributed_output) + set(VALIDATION_FRAMES_DIR ${CMAKE_SOURCE_DIR}/testing/validation_frames/video_replayer/) + + file(MAKE_DIRECTORY ${RECORDING_DIR}) + + file(READ ${CMAKE_CURRENT_SOURCE_DIR}/video_replayer_distributed.yaml CONFIG_STRING) + string(REPLACE "count: 0" "count: 10" CONFIG_STRING ${CONFIG_STRING}) + string(APPEND CONFIG_STRING " enable_render_buffer_output: true\n\nrecorder:\n directory: \"${RECORDING_DIR}\"\n basename: \"${SOURCE_VIDEO_BASENAME}\"") + set(CONFIG_FILE ${CMAKE_CURRENT_BINARY_DIR}/python_video_replayer_distributed_config.yaml) + file(WRITE ${CONFIG_FILE} ${CONFIG_STRING}) + + # Patch the current example to enable recording the rendering window + add_custom_command(OUTPUT video_replayer_distributed_test.py + PRE_LINK + COMMAND patch -u -o video_replayer_distributed_test.py ${CMAKE_CURRENT_SOURCE_DIR}/video_replayer_distributed.py + ${CMAKE_SOURCE_DIR}/testing/validation_frames/video_replayer/python_video_replayer_distributed.patch + ) + + add_custom_target(python_video_replayer_distributed_test ALL + DEPENDS "video_replayer_distributed_test.py" + ) + + add_test(NAME EXAMPLE_PYTHON_VIDEO_REPLAYER_DISTRIBUTED_TEST + COMMAND python3 video_replayer_distributed_test.py --config python_video_replayer_distributed_config.yaml + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + ) + + set_tests_properties(EXAMPLE_PYTHON_VIDEO_REPLAYER_DISTRIBUTED_TEST PROPERTIES + DEPENDS "video_replayer_distributed_test.py" + PASS_REGULAR_EXPRESSION "Reach end of file or playback count reaches to the limit. Stop ticking." + ) + + # Add a test to check the validity of the frames + add_test(NAME EXAMPLE_PYTHON_VIDEO_REPLAYER_DISTRIBUTED_RENDER_TEST + COMMAND python3 ${CMAKE_SOURCE_DIR}/../bin/video_validation.py + --source_video_dir ${RECORDING_DIR} + --source_video_basename ${SOURCE_VIDEO_BASENAME} + --output_dir ${RECORDING_DIR} + --validation_frames_dir ${VALIDATION_FRAMES_DIR} + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + ) + + set_tests_properties(EXAMPLE_PYTHON_VIDEO_REPLAYER_DISTRIBUTED_RENDER_TEST PROPERTIES + DEPENDS EXAMPLE_PYTHON_VIDEO_REPLAYER_DISTRIBUTED_TEST + PASS_REGULAR_EXPRESSION "Valid video output!" + ) + +endif() diff --git a/examples/video_replayer_distributed/python/CMakeLists.txt b/examples/video_replayer_distributed/python/CMakeLists.txt index 2f719542..04ad5953 100644 --- a/examples/video_replayer_distributed/python/CMakeLists.txt +++ b/examples/video_replayer_distributed/python/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -41,6 +41,13 @@ install(FILES COMPONENT "holoscan-examples" ) +# Install the minimal CMakeLists.txt file +install(FILES CMakeLists.min.txt + RENAME "CMakeLists.txt" + DESTINATION "${app_relative_dest_path}" + COMPONENT holoscan-examples +) + # Testing if(HOLOSCAN_BUILD_TESTS) @@ -75,6 +82,7 @@ if(HOLOSCAN_BUILD_TESTS) set_tests_properties(EXAMPLE_PYTHON_VIDEO_REPLAYER_DISTRIBUTED_TEST PROPERTIES DEPENDS "video_replayer_distributed_test.py" PASS_REGULAR_EXPRESSION "Reach end of file or playback count reaches to the limit. Stop ticking." + FAIL_REGULAR_EXPRESSION "initialized independent of a parent entity" ) # Add a test to check the validity of the frames @@ -90,6 +98,7 @@ if(HOLOSCAN_BUILD_TESTS) set_tests_properties(EXAMPLE_PYTHON_VIDEO_REPLAYER_DISTRIBUTED_RENDER_TEST PROPERTIES DEPENDS EXAMPLE_PYTHON_VIDEO_REPLAYER_DISTRIBUTED_TEST PASS_REGULAR_EXPRESSION "Valid video output!" + FAIL_REGULAR_EXPRESSION "initialized independent of a parent entity" ) endif() diff --git a/examples/wrap_operator_as_gxf_extension/ping_rx_native_op/ping_rx_native_op.cpp b/examples/wrap_operator_as_gxf_extension/ping_rx_native_op/ping_rx_native_op.cpp index f1d0e821..b0baee89 100644 --- a/examples/wrap_operator_as_gxf_extension/ping_rx_native_op/ping_rx_native_op.cpp +++ b/examples/wrap_operator_as_gxf_extension/ping_rx_native_op/ping_rx_native_op.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,8 +17,6 @@ #include "ping_rx_native_op.hpp" -#include - using namespace holoscan; namespace myops { diff --git a/examples/wrap_operator_as_gxf_extension/ping_tx_native_op/ping_tx_native_op.cpp b/examples/wrap_operator_as_gxf_extension/ping_tx_native_op/ping_tx_native_op.cpp index 559a062a..4dbdcfc3 100644 --- a/examples/wrap_operator_as_gxf_extension/ping_tx_native_op/ping_tx_native_op.cpp +++ b/examples/wrap_operator_as_gxf_extension/ping_tx_native_op/ping_tx_native_op.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,8 +17,6 @@ #include "ping_tx_native_op.hpp" -#include - using namespace holoscan; namespace myops { diff --git a/gxf_extensions/CMakeLists.txt b/gxf_extensions/CMakeLists.txt index 7a4a4e14..406d9cfc 100644 --- a/gxf_extensions/CMakeLists.txt +++ b/gxf_extensions/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -28,5 +28,4 @@ set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib/gxf_extensions) set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib/gxf_extensions) add_subdirectory(gxf_holoscan_wrapper) -add_subdirectory(stream_playback) add_subdirectory(ucx) diff --git a/gxf_extensions/README.md b/gxf_extensions/README.md index 4b3f20cb..90565970 100644 --- a/gxf_extensions/README.md +++ b/gxf_extensions/README.md @@ -4,5 +4,4 @@ See the User Guide for details regarding the extensions in GXF and Holoscan SDK, - `bayer_demosaic`: includes the `nvidia::holoscan::BayerDemosaic` codelet. It performs color filter array (CFA) interpolation for 1-channel inputs of 8 or 16-bit unsigned integer and outputs an RGB or RGBA image. This codelet is no longer used in the core SDK as there is now also a native `holoscan::Operator` version available (instead of wrapping this codelet as a `holoscan::gxf::GXFOperator`). This version is kept as a concrete example of a codelet and a `GXFOperator` wrapping this codelet can still be found in `tests/system/bayer_demosaic_gxf.hpp` where it is used for test cases. - `gxf_holoscan_wrapper`: includes the `holoscan::gxf::OperatorWrapper` codelet. It is used as a utility base class to wrap a holoscan operator to interface with the GXF framework. -- `stream_playback`: includes the `nvidia::holoscan::stream_playback::VideoStreamSerializer` entity serializer to/from a Tensor Object. - `ucx_holoscan`: includes `nvidia::holoscan::UcxHoloscanComponentSerializer` which is a `nvidia::gxf::ComponentSerializer` that handles serialization and deserialization of `holoscan::Message` and `holoscan::Tensor` types over a Unified Communication X (UCX) network connection. UCX is used by Holoscan SDK to send data between fragments of distributed applications. This extension must be used in combination with standard GXF UCX extension components. Specifically, this `UcxHoloscanComponentSerializer` is intended for use by the `UcxEntitySerializer` where it can operate alongside the `UcxComponentSerializer` that serializes GXF-specific types (`nvidia::gxf::Tensor`, `nvidia::gxf::VideoBuffer`, etc.). diff --git a/gxf_extensions/gxf_holoscan_wrapper/gxf_holoscan_wrapper_ext.cpp b/gxf_extensions/gxf_holoscan_wrapper/gxf_holoscan_wrapper_ext.cpp index 0d44e11c..e4cd7480 100644 --- a/gxf_extensions/gxf_holoscan_wrapper/gxf_holoscan_wrapper_ext.cpp +++ b/gxf_extensions/gxf_holoscan_wrapper/gxf_holoscan_wrapper_ext.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,7 +17,6 @@ #include "gxf/std/extension_factory_helper.hpp" #include "holoscan/core/domain/tensor.hpp" -#include "holoscan/core/gxf/gxf_tensor.hpp" #include "holoscan/core/message.hpp" #include "operator_wrapper.hpp" @@ -28,8 +27,6 @@ GXF_EXT_FACTORY_SET_INFO(0x12d01b4ee06f49ef, 0x93c4961834347385, "HoloscanWrappe // Register types/components that are used by Holoscan GXF_EXT_FACTORY_ADD_0(0x61510ca06aa9493b, 0x8a777d0bf87476b7, holoscan::Message, "Holoscan message type"); -GXF_EXT_FACTORY_ADD(0xa02945eaf20e418c, 0x8e6992b68672ce40, holoscan::gxf::GXFTensor, - nvidia::gxf::Tensor, "Holoscan's GXF Tensor type"); GXF_EXT_FACTORY_ADD_0(0xa5eb0ed57d7f4aa2, 0xb5865ccca0ef955c, holoscan::Tensor, "Holoscan's Tensor type"); diff --git a/gxf_extensions/gxf_holoscan_wrapper/operator_wrapper.hpp b/gxf_extensions/gxf_holoscan_wrapper/operator_wrapper.hpp index 20eb5896..5ddeeed5 100644 --- a/gxf_extensions/gxf_holoscan_wrapper/operator_wrapper.hpp +++ b/gxf_extensions/gxf_holoscan_wrapper/operator_wrapper.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,7 +26,7 @@ #include "operator_wrapper_fragment.hpp" #include "gxf/std/codelet.hpp" -#include "gxf/std/parameter_parser_std.hpp" +#include "gxf/core/parameter_parser_std.hpp" namespace holoscan::gxf { diff --git a/gxf_extensions/gxf_holoscan_wrapper/operator_wrapper_fragment.hpp b/gxf_extensions/gxf_holoscan_wrapper/operator_wrapper_fragment.hpp index e8104f0f..9eb9545c 100644 --- a/gxf_extensions/gxf_holoscan_wrapper/operator_wrapper_fragment.hpp +++ b/gxf_extensions/gxf_holoscan_wrapper/operator_wrapper_fragment.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -32,9 +32,7 @@ class OperatorWrapperFragment : public holoscan::Fragment { public: OperatorWrapperFragment(); - GXFExecutor& gxf_executor() { - return static_cast(executor()); - } + GXFExecutor& gxf_executor() { return static_cast(executor()); } }; } // namespace holoscan::gxf diff --git a/gxf_extensions/stream_playback/CMakeLists.txt b/gxf_extensions/stream_playback/CMakeLists.txt deleted file mode 100644 index 29dff8ed..00000000 --- a/gxf_extensions/stream_playback/CMakeLists.txt +++ /dev/null @@ -1,36 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: Apache-2.0 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Create library -add_library(gxf_stream_playback_lib SHARED - video_stream_serializer.cpp - video_stream_serializer.hpp -) -target_link_libraries(gxf_stream_playback_lib - PUBLIC - GXF::serialization - yaml-cpp -) - -# Create extension -add_library(gxf_stream_playback SHARED - stream_playback_ext.cpp -) -target_link_libraries(gxf_stream_playback - PUBLIC gxf_stream_playback_lib - PRIVATE holoscan_security_flags -) -# Install GXF extension as a component 'holoscan-gxf_extensions' -install_gxf_extension(gxf_stream_playback) diff --git a/gxf_extensions/stream_playback/stream_playback_ext.cpp b/gxf_extensions/stream_playback/stream_playback_ext.cpp deleted file mode 100644 index ea11e832..00000000 --- a/gxf_extensions/stream_playback/stream_playback_ext.cpp +++ /dev/null @@ -1,27 +0,0 @@ -/* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "gxf/std/extension_factory_helper.hpp" - -#include "video_stream_serializer.hpp" - -GXF_EXT_FACTORY_BEGIN() -GXF_EXT_FACTORY_SET_INFO(0xe6c168715f3f428d, 0x96cd24dce2f42f46, "StreamPlaybackExtension", - "Holoscan StreamPlayback extension", "NVIDIA", "0.2.0", "LICENSE"); -GXF_EXT_FACTORY_ADD(0x7ee08fcc84c94245, 0xa415022b42f4ef39, - nvidia::holoscan::stream_playback::VideoStreamSerializer, - nvidia::gxf::EntitySerializer, "VideoStreamSerializer component."); -GXF_EXT_FACTORY_END() diff --git a/gxf_extensions/stream_playback/video_stream_serializer.cpp b/gxf_extensions/stream_playback/video_stream_serializer.cpp deleted file mode 100644 index e0649ee8..00000000 --- a/gxf_extensions/stream_playback/video_stream_serializer.cpp +++ /dev/null @@ -1,279 +0,0 @@ -/* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "video_stream_serializer.hpp" - -#include - -#include -#include -#include -#include - -namespace nvidia { -namespace holoscan { -namespace stream_playback { - -namespace { - -// Serializes EntityHeader -gxf::Expected SerializeEntityHeader(VideoStreamSerializer::EntityHeader header, - gxf::Endpoint* endpoint) { - if (!endpoint) { return gxf::Unexpected{GXF_ARGUMENT_NULL}; } - header.serialized_size = htole64(header.serialized_size); - header.checksum = htole32(header.checksum); - header.sequence_number = htole64(header.sequence_number); - header.flags = htole32(header.flags); - header.component_count = htole64(header.component_count); - header.reserved = htole64(header.reserved); - return endpoint->writeTrivialType(&header).substitute(sizeof(header)); -} - -// Deserializes EntityHeader -gxf::Expected DeserializeEntityHeader( - gxf::Endpoint* endpoint) { - if (!endpoint) { return gxf::Unexpected{GXF_ARGUMENT_NULL}; } - VideoStreamSerializer::EntityHeader header; - return endpoint->readTrivialType(&header).and_then([&]() { - header.serialized_size = le64toh(header.serialized_size); - header.checksum = le32toh(header.checksum); - header.sequence_number = le64toh(header.sequence_number); - header.flags = le32toh(header.flags); - header.component_count = le64toh(header.component_count); - header.reserved = le64toh(header.reserved); - return header; - }); -} - -// Serializes ComponentHeader -gxf::Expected SerializeComponentHeader(VideoStreamSerializer::ComponentHeader header, - gxf::Endpoint* endpoint) { - if (!endpoint) { return gxf::Unexpected{GXF_ARGUMENT_NULL}; } - header.serialized_size = htole64(header.serialized_size); - header.tid.hash1 = htole64(header.tid.hash1); - header.tid.hash2 = htole64(header.tid.hash2); - header.name_size = htole64(header.name_size); - return endpoint->writeTrivialType(&header).substitute(sizeof(header)); -} - -// Deserializes ComponentHeader -gxf::Expected DeserializeComponentHeader( - gxf::Endpoint* endpoint) { - if (!endpoint) { return gxf::Unexpected{GXF_ARGUMENT_NULL}; } - VideoStreamSerializer::ComponentHeader header; - return endpoint->readTrivialType(&header).and_then([&]() { - header.serialized_size = le64toh(header.serialized_size); - header.tid.hash1 = le64toh(header.tid.hash1); - header.tid.hash2 = le64toh(header.tid.hash2); - header.name_size = le64toh(header.name_size); - return header; - }); -} - -} // namespace - -struct VideoStreamSerializer::ComponentEntry { - ComponentHeader header = {0, GxfTidNull(), 0}; - gxf::UntypedHandle component = gxf::UntypedHandle::Null(); - gxf::Handle serializer = gxf::Handle::Null(); -}; - -gxf_result_t VideoStreamSerializer::registerInterface(gxf::Registrar* registrar) { - if (registrar == nullptr) { return GXF_ARGUMENT_NULL; } - gxf::Expected result; - result &= - registrar->parameter(component_serializers_, "component_serializers", "Component serializers", - "List of serializers for serializing and deserializing components"); - return gxf::ToResultCode(result); -} - -gxf_result_t VideoStreamSerializer::serialize_entity_abi(gxf_uid_t eid, gxf::Endpoint* endpoint, - uint64_t* size) { - if (endpoint == nullptr || size == nullptr) { return GXF_ARGUMENT_NULL; } - FixedVector components; - FixedVector entries; - return gxf::ToResultCode( - gxf::Entity::Shared(context(), eid) - .map([&](gxf::Entity entity) { return entity.findAll(components); }) - .and_then([&]() { return createComponentEntries(components); }) - .assign_to(entries) - .and_then([&]() { - EntityHeader entity_header; - entity_header.serialized_size = 0; // How can we compute this before serializing? - entity_header.checksum = 0x00000000; - entity_header.sequence_number = outgoing_sequence_number_++; - entity_header.flags = 0x00000000; - entity_header.component_count = entries.size(); - entity_header.reserved = 0; - return SerializeEntityHeader(entity_header, endpoint); - }) - .assign_to(*size) - .and_then([&]() { return serializeComponents(entries, endpoint); }) - .map([&](size_t serialized_size) { *size += serialized_size; })); -} - -gxf::Expected VideoStreamSerializer::deserialize_entity_header_abi( - gxf::Endpoint* endpoint) { - gxf::Entity entity; - - gxf_result_t result = gxf::ToResultCode( - gxf::Entity::New(context()) - .assign_to(entity) - .and_then([&]() { return DeserializeEntityHeader(endpoint); }) - .map([&](EntityHeader entity_header) { - if (entity_header.sequence_number != incoming_sequence_number_) { - incoming_sequence_number_ = entity_header.sequence_number; - } - incoming_sequence_number_++; - return deserializeComponents(entity_header.component_count, entity, endpoint); - }) - .substitute(entity)); - - if (result != GXF_SUCCESS) { GXF_LOG_ERROR("Deserialize entity header failed"); } - return entity; -} - -gxf_result_t VideoStreamSerializer::deserialize_entity_abi(gxf_uid_t eid, gxf::Endpoint* endpoint) { - if (endpoint == nullptr) { return GXF_ARGUMENT_NULL; } - gxf::Entity entity; - return gxf::ToResultCode(gxf::Entity::Shared(context(), eid) - .assign_to(entity) - .and_then([&]() { return DeserializeEntityHeader(endpoint); }) - .map([&](EntityHeader entity_header) { - if (entity_header.sequence_number != incoming_sequence_number_) { - // Note:: This is a workaround for the issue that the frame count - // is out of the maximum frame index. - // Modified to support 'repeat' feature in - // nvidia::holoscan::stream_playback::VideoStreamReplayer - // which reuses gxf::EntityReplayer. - // When 'repeat' parameter is 'true' and the frame count - // is out of the maximum frame index, this error message - // is printed with nvidia::gxf::StdEntitySerializer but it - // is actually not a warning so we provide - // nvidia::holoscan::stream_playback::VideoStreamSerializer - // to replace nvidia::gxf::StdEntitySerializer and not to - // print this warning message. - incoming_sequence_number_ = entity_header.sequence_number; - } - incoming_sequence_number_++; - return deserializeComponents(entity_header.component_count, entity, - endpoint); - })); -} - -gxf::Expected> -VideoStreamSerializer::createComponentEntries( - const FixedVectorBase& components) { - FixedVector entries; - for (size_t i = 0; i < components.size(); i++) { - const auto component = components[i]; - if (!component) { return gxf::Unexpected{GXF_ARGUMENT_OUT_OF_RANGE}; } - - // Check if component is serializable - auto component_serializer = findComponentSerializer(component->tid()); - if (!component_serializer) { - GXF_LOG_WARNING("No serializer found for component '%s' with type ID 0x%016zx%016zx", - component->name(), component->tid().hash1, component->tid().hash2); - continue; - } - - // Create component header - ComponentHeader component_header; - component_header.serialized_size = 0; // How can we compute this before serializing? - component_header.tid = component->tid(); - component_header.name_size = std::strlen(component->name()); - - // Update component list - const auto result = - entries.emplace_back(component_header, component.value(), component_serializer.value()); - if (!result) { return gxf::Unexpected{GXF_EXCEEDING_PREALLOCATED_SIZE}; } - } - - return entries; -} - -gxf::Expected VideoStreamSerializer::serializeComponents( - const FixedVectorBase& entries, gxf::Endpoint* endpoint) { - size_t size = 0; - for (size_t i = 0; i < entries.size(); i++) { - const auto& entry = entries[i]; - if (!entry) { return gxf::Unexpected{GXF_ARGUMENT_OUT_OF_RANGE}; } - const auto result = - SerializeComponentHeader(entry->header, endpoint) - .map([&](size_t component_header_size) { size += component_header_size; }) - .and_then( - [&]() { return endpoint->write(entry->component.name(), entry->header.name_size); }) - .and_then([&]() { size += entry->header.name_size; }) - .and_then( - [&]() { return entry->serializer->serializeComponent(entry->component, endpoint); }) - .map([&](size_t component_size) { size += component_size; }); - if (!result) { return gxf::ForwardError(result); } - } - return size; -} - -gxf::Expected VideoStreamSerializer::deserializeComponents(size_t component_count, - gxf::Entity entity, - gxf::Endpoint* endpoint) { - for (size_t i = 0; i < component_count; i++) { - ComponentEntry entry; - const auto result = - DeserializeComponentHeader(endpoint) - .assign_to(entry.header) - .and_then([&]() { return findComponentSerializer(entry.header.tid); }) - .assign_to(entry.serializer) - .and_then([&]() -> gxf::Expected { - try { - std::string name(entry.header.name_size, '\0'); - return gxf::ExpectedOrError( - endpoint->read(const_cast(name.data()), name.size()), name); - } catch (const std::exception& exception) { - GXF_LOG_ERROR("Failed to deserialize component name: %s", exception.what()); - return gxf::Unexpected{GXF_OUT_OF_MEMORY}; - } - }) - .map([&](std::string name) { return entity.add(entry.header.tid, name.c_str()); }) - .assign_to(entry.component) - .and_then([&]() { - return entry.serializer->deserializeComponent(entry.component, endpoint); - }); - if (!result) { return gxf::ForwardError(result); } - } - return gxf::Success; -} - -gxf::Expected> VideoStreamSerializer::findComponentSerializer( - gxf_tid_t tid) { - // Search cache for valid serializer - const auto search = serializer_cache_.find(tid); - if (search != serializer_cache_.end()) { return search->second; } - - // Search serializer list for valid serializer and cache result - for (size_t i = 0; i < component_serializers_.get().size(); i++) { - const auto component_serializer = component_serializers_.get()[i]; - if (!component_serializer) { return gxf::Unexpected{GXF_ARGUMENT_OUT_OF_RANGE}; } - if (component_serializer.value()->isSupported(tid)) { - serializer_cache_[tid] = component_serializer.value(); - return component_serializer.value(); - } - } - - return gxf::Unexpected{GXF_QUERY_NOT_FOUND}; -} - -} // namespace stream_playback -} // namespace holoscan -} // namespace nvidia diff --git a/gxf_extensions/stream_playback/video_stream_serializer.hpp b/gxf_extensions/stream_playback/video_stream_serializer.hpp deleted file mode 100644 index 96dafdfb..00000000 --- a/gxf_extensions/stream_playback/video_stream_serializer.hpp +++ /dev/null @@ -1,107 +0,0 @@ -/* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef NVIDIA_CLARA_HOLOSCAN_GXF_EXTENSIONS_STREAM_PLAYBACK_VIDEO_STREAM_SERIALIZER_HPP_ -#define NVIDIA_CLARA_HOLOSCAN_GXF_EXTENSIONS_STREAM_PLAYBACK_VIDEO_STREAM_SERIALIZER_HPP_ - -#include - -#include "common/fixed_vector.hpp" -#include "gxf/serialization/component_serializer.hpp" -#include "gxf/serialization/entity_serializer.hpp" -#include "gxf/serialization/tid_hash.hpp" - -namespace nvidia::holoscan::stream_playback { - -/// @brief Data marshalling codelet for video stream entities. -/// -/// Serializes and deserializes entities with the provided component serializers. -/// Little-endian is used over big-endian for better performance on x86 and arm platforms. -/// Entities are serialized in the following format: -/// -/// | Entity Header || Component Header | Component Name | Component | ... | ... | ... | -/// -/// Components will be serialized in the order they are added to the entity. -/// Components without serializers will be skipped. -/// Each component will be preceded by a component header and the name of the component. -/// The component itself will be serialized with a component serializer. -/// An entity header will be added at the beginning. -class VideoStreamSerializer : gxf::EntitySerializer { - public: -#pragma pack(push, 1) - // Header preceding entities - struct EntityHeader { - uint64_t serialized_size; // Size of the serialized entity in bytes - uint32_t checksum; // Checksum to verify the integrity of the message - uint64_t sequence_number; // Sequence number of the message - uint32_t flags; // Flags to specify delivery options - uint64_t component_count; // Number of components in the entity - uint64_t reserved; // Bytes reserved for future use - }; -#pragma pack(pop) - -#pragma pack(push, 1) - // Header preceding components - struct ComponentHeader { - uint64_t serialized_size; // Size of the serialized component in bytes - gxf_tid_t tid; // Type ID of the component - uint64_t name_size; // Size of the component name in bytes - }; -#pragma pack(pop) - - gxf_result_t registerInterface(gxf::Registrar* registrar) override; - gxf_result_t initialize() override { return GXF_SUCCESS; } - gxf_result_t deinitialize() override { return GXF_SUCCESS; } - - gxf_result_t serialize_entity_abi(gxf_uid_t eid, gxf::Endpoint* endpoint, - uint64_t* size) override; - gxf_result_t deserialize_entity_abi(gxf_uid_t eid, gxf::Endpoint* endpoint) override; - gxf::Expected deserialize_entity_header_abi(gxf::Endpoint* endpoint) override; - - private: - // Structure used to organize serializable components - struct ComponentEntry; - - // Populates a list of component entries using a list of component handles - gxf::Expected> createComponentEntries( - const FixedVectorBase& components); - // Serializes a list of components and writes them to an endpoint - // Returns the total number of bytes serialized - gxf::Expected serializeComponents(const FixedVectorBase& entries, - gxf::Endpoint* endpoint); - // Reads from an endpoint and deserializes a list of components - gxf::Expected deserializeComponents(size_t component_count, gxf::Entity entity, - gxf::Endpoint* endpoint); - // Searches for a component serializer that supports the given type ID - // Uses the first valid serializer found and caches it for subsequent lookups - // Returns an Unexpected if no valid serializer is found - gxf::Expected> findComponentSerializer(gxf_tid_t tid); - - gxf::Parameter, kMaxComponents>> - component_serializers_; - - // Table that caches type ID with a valid component serializer - std::unordered_map, gxf::TidHash> - serializer_cache_; - // Sequence number for outgoing messages - uint64_t outgoing_sequence_number_; - // Sequence number for incoming messages - uint64_t incoming_sequence_number_; -}; - -} // namespace nvidia::holoscan::stream_playback - -#endif // NVIDIA_CLARA_HOLOSCAN_GXF_EXTENSIONS_STREAM_PLAYBACK_VIDEO_STREAM_SERIALIZER_HPP_ diff --git a/gxf_extensions/ucx/ucx_holoscan_component_serializer.cpp b/gxf_extensions/ucx/ucx_holoscan_component_serializer.cpp index ba43a0e7..ce0e1834 100644 --- a/gxf_extensions/ucx/ucx_holoscan_component_serializer.cpp +++ b/gxf_extensions/ucx/ucx_holoscan_component_serializer.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -28,22 +28,6 @@ namespace nvidia { namespace gxf { -// copy of TensorHeader from UcxComponentSerializer needed for GXFTensor serialize/deserialize -namespace { - -#pragma pack(push, 1) -struct TensorHeader { - MemoryStorageType storage_type; // CPU or GPU tensor - PrimitiveType element_type; // Tensor element type - uint64_t bytes_per_element; // Bytes per tensor element - uint32_t rank; // Tensor rank - int32_t dims[Shape::kMaxRank]; // Tensor dimensions - uint64_t strides[Shape::kMaxRank]; // Tensor strides -}; -#pragma pack(pop) - -} // namespace - gxf_result_t UcxHoloscanComponentSerializer::registerInterface(Registrar* registrar) { Expected result; result &= registrar->parameter( @@ -63,87 +47,21 @@ gxf_result_t UcxHoloscanComponentSerializer::initialize() { Expected UcxHoloscanComponentSerializer::configureSerializers() { Expected result; - result &= setSerializer([this](void* component, Endpoint* endpoint) { - return serializeHoloscanGXFTensor(*static_cast(component), endpoint); - }); result &= setSerializer([this](void* component, Endpoint* endpoint) { return serializeHoloscanMessage(*static_cast(component), endpoint); }); - result &= setSerializer([this](void* component, Endpoint* endpoint) { - return serializeTensor(*static_cast(component), endpoint); - }); - return result; } Expected UcxHoloscanComponentSerializer::configureDeserializers() { Expected result; - result &= setDeserializer([this](void* component, Endpoint* endpoint) { - return deserializeHoloscanGXFTensor(endpoint).assign_to( - *static_cast(component)); - }); result &= setDeserializer([this](void* component, Endpoint* endpoint) { return deserializeHoloscanMessage(endpoint).assign_to( *static_cast(component)); }); - result &= setDeserializer([this](void* component, Endpoint* endpoint) { - return deserializeTensor(endpoint).assign_to(*static_cast(component)); - }); - return result; } -Expected UcxHoloscanComponentSerializer::serializeHoloscanGXFTensor( - const holoscan::gxf::GXFTensor& tensor, Endpoint* endpoint) { - GXF_LOG_DEBUG("UcxHoloscanComponentSerializer::serializeHoloscanGXFTensor"); - // Implementation matches UcxComponentSerializer::serializeTensor since holoscan::gxf::Tensor - // inherits from nvidia::gxf::Tensor. - TensorHeader header; - header.storage_type = tensor.storage_type(); - header.element_type = tensor.element_type(); - header.bytes_per_element = tensor.bytes_per_element(); - header.rank = tensor.rank(); - for (size_t i = 0; i < Shape::kMaxRank; i++) { - header.dims[i] = tensor.shape().dimension(i); - header.strides[i] = tensor.stride(i); - } - auto result = endpoint->write_ptr(tensor.pointer(), tensor.size(), tensor.storage_type()); - if (!result) { return ForwardError(result); } - auto size = endpoint->writeTrivialType(&header); - if (!size) { return ForwardError(size); } - return sizeof(header); -} - -Expected UcxHoloscanComponentSerializer::deserializeHoloscanGXFTensor( - Endpoint* endpoint) { - GXF_LOG_DEBUG("UcxHoloscanComponentSerializer::deserializeHoloscanGXFTensor"); - // Implementation is as in UcxComponentSerializer::deserializeTensor is private, but with an - // additional conversion to GXFTensor at the end. - if (!endpoint) { return Unexpected{GXF_ARGUMENT_NULL}; } - - TensorHeader header; - auto size = endpoint->readTrivialType(&header); - if (!size) { return ForwardError(size); } - - std::array dims; - std::memcpy(dims.data(), header.dims, sizeof(header.dims)); - Tensor::stride_array_t strides; - std::memcpy(strides.data(), header.strides, sizeof(header.strides)); - - Tensor tensor; - auto result = tensor.reshapeCustom(Shape(dims, header.rank), - header.element_type, - header.bytes_per_element, - strides, - header.storage_type, - allocator_); - if (!result) { return ForwardError(result); } - result = endpoint->write_ptr(tensor.pointer(), tensor.size(), tensor.storage_type()); - if (!result) { return ForwardError(result); } - // Convert to GXFTensor (doesn't need to protect with mutex since 'tensor' is local) - return holoscan::gxf::GXFTensor(tensor, -1); -} - Expected UcxHoloscanComponentSerializer::serializeHoloscanMessage( const holoscan::Message& message, Endpoint* endpoint) { GXF_LOG_DEBUG("UcxHoloscanComponentSerializer::serializeHoloscanMessage"); @@ -197,69 +115,5 @@ Expected UcxHoloscanComponentSerializer::deserializeHoloscanM return deserialize_func(endpoint); } -Expected UcxHoloscanComponentSerializer::serializeTensor(const Tensor& tensor, - Endpoint* endpoint) { - TensorHeader header; - header.storage_type = tensor.storage_type(); - header.element_type = tensor.element_type(); - header.bytes_per_element = tensor.bytes_per_element(); - header.rank = tensor.rank(); - for (size_t i = 0; i < Shape::kMaxRank; i++) { - header.dims[i] = tensor.shape().dimension(i); - header.strides[i] = tensor.stride(i); - } - - // Issue 4371324 - // Following the resolution of issue 4272363, the conversion of GXF Tensor to Holoscan - // GXFTensor now avoids thread contention by utilizing a mutex. However, this mutex is not - // employed when sending the GXF Tensor to a remote endpoint. Consequently, the tensor pointer - // may be null during transmission. To address this, the tensor pointer is checked before - // sending; if it is null, the thread yields and retries, continuing this process for up to 100ms. - // If the tensor pointer remains null after this duration, an error is returned. This logic - // ensures a balance between efficient error handling and avoiding excessive delays in tensor - // transmission. - holoscan::Timer timer("Waiting time: {:.8f} seconds\n", true, false); - auto pointer = tensor.pointer(); - while (pointer == nullptr && timer.stop() < 0.1) { - std::this_thread::yield(); - pointer = tensor.pointer(); - } - if (pointer == nullptr) { - GXF_LOG_ERROR("Tensor pointer is still null after 100ms"); - return Unexpected{GXF_NULL_POINTER}; - } - - auto result = endpoint->write_ptr(pointer, tensor.size(), tensor.storage_type()); - if (!result) { return ForwardError(result); } - auto size = endpoint->writeTrivialType(&header); - if (!size) { return ForwardError(size); } - return sizeof(header); -} - -Expected UcxHoloscanComponentSerializer::deserializeTensor(Endpoint* endpoint) { - if (!endpoint) { return Unexpected{GXF_ARGUMENT_NULL}; } - - TensorHeader header; - auto size = endpoint->readTrivialType(&header); - if (!size) { return ForwardError(size); } - - std::array dims; - std::memcpy(dims.data(), header.dims, sizeof(header.dims)); - Tensor::stride_array_t strides; - std::memcpy(strides.data(), header.strides, sizeof(header.strides)); - - Tensor tensor; - auto result = tensor.reshapeCustom(Shape(dims, header.rank), - header.element_type, - header.bytes_per_element, - strides, - header.storage_type, - allocator_); - if (!result) { return ForwardError(result); } - result = endpoint->write_ptr(tensor.pointer(), tensor.size(), tensor.storage_type()); - if (!result) { return ForwardError(result); } - return tensor; -} - } // namespace gxf } // namespace nvidia diff --git a/gxf_extensions/ucx/ucx_holoscan_component_serializer.hpp b/gxf_extensions/ucx/ucx_holoscan_component_serializer.hpp index a9902cfd..0a5deec0 100644 --- a/gxf_extensions/ucx/ucx_holoscan_component_serializer.hpp +++ b/gxf_extensions/ucx/ucx_holoscan_component_serializer.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -25,7 +25,6 @@ #include "gxf/std/allocator.hpp" #include "gxf/std/tensor.hpp" #include "holoscan/core/codec_registry.hpp" -#include "holoscan/core/gxf/gxf_tensor.hpp" #include "holoscan/core/message.hpp" namespace nvidia { @@ -45,19 +44,10 @@ class UcxHoloscanComponentSerializer : public ComponentSerializer { Expected configureSerializers(); // Configures all deserializer functions Expected configureDeserializers(); - // Serializes a holoscan::gxf::GXFTensor - Expected serializeHoloscanGXFTensor(const holoscan::gxf::GXFTensor& tensor, - Endpoint* endpoint); - // Deserializes a holoscan::gxf::GXFTensor - Expected deserializeHoloscanGXFTensor(Endpoint* endpoint); // Serializes a holoscan::Message Expected serializeHoloscanMessage(const holoscan::Message& message, Endpoint* endpoint); // Deserializes a holoscan::Message Expected deserializeHoloscanMessage(Endpoint* endpoint); - // Serializes a nvidia::gxf::Tensor - Expected serializeTensor(const Tensor& tensor, Endpoint* endpoint); - // Deserializes a nvidia::gxf::Tensor - Expected deserializeTensor(Endpoint* endpoint); Parameter> allocator_; }; diff --git a/include/common/logger/spdlog_logger.hpp b/include/common/logger/spdlog_logger.hpp new file mode 100644 index 00000000..b005b65d --- /dev/null +++ b/include/common/logger/spdlog_logger.hpp @@ -0,0 +1,58 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef COMMON_LOGGER_SPDLOG_LOGGER_HPP +#define COMMON_LOGGER_SPDLOG_LOGGER_HPP + +#include +#include +#include +#include + +#include + +namespace nvidia { + +/// Namespace for the NVIDIA logger functionality. +namespace logger { + +class SpdlogLogger : public Logger { + public: + /// Create a logger with the given name. + /// + /// This constructor creates a logger with the given name and optional logger and log function. + /// If no logger or log function is provided, a default spdlog logger will be created. + /// + /// @param name The name of the logger. + /// @param logger The logger to use (default: nullptr). + /// @param func The log function to use (default: nullptr). + explicit SpdlogLogger(const char* name, const std::shared_ptr& logger = nullptr, + const LogFunction& func = nullptr); + + /// Return the log pattern. + /// @return The reference to the log pattern string. + std::string& pattern_string(); + + protected: + std::string name_; ///< logger name +}; + +} // namespace logger + +} // namespace nvidia + +#endif /* COMMON_LOGGER_SPDLOG_LOGGER_HPP */ diff --git a/include/holoscan/core/application.hpp b/include/holoscan/core/application.hpp index 5bab1114..4be1ace5 100644 --- a/include/holoscan/core/application.hpp +++ b/include/holoscan/core/application.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -119,7 +119,7 @@ class Application : public Fragment { */ template >> - std::shared_ptr make_fragment(const StringT& name, ArgsT&&... args) { + std::shared_ptr make_fragment(StringT name, ArgsT&&... args) { auto fragment = std::make_shared(std::forward(args)...); fragment->name(name); fragment->application(this); diff --git a/include/holoscan/core/argument_setter.hpp b/include/holoscan/core/argument_setter.hpp index 12483f5f..6be8cb10 100644 --- a/include/holoscan/core/argument_setter.hpp +++ b/include/holoscan/core/argument_setter.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -266,6 +266,7 @@ class ArgumentSetter { typename holoscan::type_info::derived_type>(arg_value); // Initialize the condition in case the condition created by // Fragment::make_condition() is added to the operator as an argument. + // TODO: would like this to be assigned to the same entity as the operator if (converted_value) { converted_value->initialize(); } param = converted_value; @@ -281,6 +282,7 @@ class ArgumentSetter { typename holoscan::type_info::derived_type>(arg_value); // Initialize the resource in case the resource created by // Fragment::make_resource() is added to the operator as an argument. + // TODO: would like this to be assigned to the same entity as the operator if (converted_value) { converted_value->initialize(); } param = converted_value; @@ -389,6 +391,7 @@ class ArgumentSetter { // Initialize the condition in case the condition created by // Fragment::make_condition() is added to the operator as an argument. + // TODO: would like this to be assigned to the same entity as the operator if (condition) { condition->initialize(); } converted_value.push_back(condition); @@ -411,6 +414,7 @@ class ArgumentSetter { // Initialize the resource in case the resource created by // Fragment::make_resource() is added to the operator as an argument. + // TODO: would like this to be assigned to the same entity as the operator if (resource) { resource->initialize(); } converted_value.push_back(resource); diff --git a/include/holoscan/core/codecs.hpp b/include/holoscan/core/codecs.hpp index 77a260fd..8e26ac88 100644 --- a/include/holoscan/core/codecs.hpp +++ b/include/holoscan/core/codecs.hpp @@ -171,44 +171,45 @@ struct codec { ////////////////////////////////////////////////////////////////////////////////////////////////// // Codec type 4: serialization of std::vector only // -// Note: Have to serialize std::vector differently than the numeric types due to how it is -// packed. This is currently inefficient as 8x the data size is transferred due to bit->byte -// conversion. Could revisit packing the data more efficiently if needed, but likely not -// worth it if only a small length vector is being sent. +// Performs bit-packing/unpacking to/from uint8_t type for more efficient serialization. // codec of std::vector template <> struct codec> { static expected serialize(const std::vector& data, Endpoint* endpoint) { - ContiguousDataHeader header; - header.size = data.size(); - header.bytes_per_element = header.size > 0 ? sizeof(data[0]) : 1; - auto size = endpoint->write_trivial_type(&header); + size_t total_bytes = 0; + size_t num_bits = data.size(); + size_t num_bytes = (num_bits + 7) / 8; // the number of bytes needed to store the bits + auto size = endpoint->write_trivial_type(&num_bits); if (!size) { return forward_error(size); } - size_t total_bytes = size.value(); - expected size2; - for (const auto b : data) { - bool bool_b = b; - size2 = endpoint->write_trivial_type(&bool_b); - if (!size2) { return forward_error(size2); } + total_bytes += size.value(); + std::vector packed_data(num_bytes, 0); // Create a vector to store the packed data + for (size_t i = 0; i < num_bits; ++i) { + if (data[i]) { + packed_data[i / 8] |= (1 << (i % 8)); // Pack the bits into the bytes + } } - total_bytes += size2.value() * header.size; + auto result = endpoint->write(packed_data.data(), packed_data.size()); + if (!result) { return forward_error(result); } + total_bytes += result.value(); return total_bytes; } static expected, RuntimeError> deserialize(Endpoint* endpoint) { - ContiguousDataHeader header; - auto header_size = endpoint->read_trivial_type(&header); - if (!header_size) { return forward_error(header_size); } - std::vector data; - data.resize(header.size); - expected result; - for (auto&& b : data) { - bool bool_b; - result = endpoint->read_trivial_type(&bool_b); - if (!result) { return forward_error(result); } - b = bool_b; + size_t num_bits; + auto size = endpoint->read_trivial_type(&num_bits); + if (!size) { return forward_error(size); } + size_t num_bytes = + (num_bits + 7) / 8; // Calculate the number of bytes needed to store the bits + std::vector packed_data(num_bytes, 0); // Create a vector to store the packed data + auto result = endpoint->read(packed_data.data(), packed_data.size()); + if (!result) { return forward_error(result); } + std::vector data(num_bits, false); // Create a vector to store the unpacked data + for (size_t i = 0; i < num_bits; ++i) { + if (packed_data[i / 8] & (1 << (i % 8))) { // Unpack the bits from the bytes + data[i] = true; + } } return data; } diff --git a/include/holoscan/core/component.hpp b/include/holoscan/core/component.hpp index 9afe89f1..4e41e932 100644 --- a/include/holoscan/core/component.hpp +++ b/include/holoscan/core/component.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -31,11 +32,11 @@ #include "./arg.hpp" #include "./forward_def.hpp" -#define HOLOSCAN_COMPONENT_FORWARD_TEMPLATE() \ - template > && \ - (std::is_same_v> || \ +#define HOLOSCAN_COMPONENT_FORWARD_TEMPLATE() \ + template > && \ + (std::is_same_v> || \ std::is_same_v>)>> #define HOLOSCAN_COMPONENT_FORWARD_ARGS(class_name) \ HOLOSCAN_COMPONENT_FORWARD_TEMPLATE() \ @@ -49,6 +50,10 @@ namespace holoscan { +namespace gxf { +class GXFExecutor; +} // namespace gxf + /** * @brief Base class for all components. * @@ -56,9 +61,9 @@ namespace holoscan { * `holoscan::Condition`, and `holoscan::Resource`. * It is used to define the common interface for all components. */ -class Component { +class ComponentBase { public: - Component() = default; + ComponentBase() = default; /** * @brief Construct a new Component object. @@ -66,12 +71,12 @@ class Component { * @param args The arguments to be passed to the component. */ HOLOSCAN_COMPONENT_FORWARD_TEMPLATE() - explicit Component(ArgT&& arg, ArgsT&&... args) { + explicit ComponentBase(ArgT&& arg, ArgsT&&... args) { add_arg(std::forward(arg)); (add_arg(std::forward(args)), ...); } - virtual ~Component() = default; + virtual ~ComponentBase() = default; /** * @brief Get the identifier of the component. @@ -164,7 +169,18 @@ class Component { std::string description() const; protected: - friend class Executor; + friend class holoscan::Executor; + // Make GXFExecutor a friend class so it can call protected initialization methods + friend class holoscan::gxf::GXFExecutor; + + // Make Fragment a friend class so it can call reset_graph_entities + friend class holoscan::Fragment; + + /// Update parameters based on the specified arguments + void update_params_from_args(std::unordered_map& params); + + /// Reset the GXF GraphEntity of any arguments that have one + virtual void reset_graph_entities(); int64_t id_ = -1; ///< The ID of the component. std::string name_ = ""; ///< Name of the component @@ -172,6 +188,32 @@ class Component { std::vector args_; ///< List of arguments }; +/** + * @brief Common class for all non-Operator components + * + * This class is the base class for all non-Operator components including + * `holoscan::Condition`, `holoscan::Resource`, `holoscan::NetworkContext`, `holoscan::Scheduler` + * It is used to define the common interface for all components. + * + * `holoscan::Operator` does not inherit from this class as it uses `holosccan::OperatorSpec` + * instead of `holoscan::ComponentSpec`. + */ +class Component : public ComponentBase { + protected: + // Make GXFExecutor a friend class so it can call protected initialization methods + friend class holoscan::gxf::GXFExecutor; + + using ComponentBase::update_params_from_args; + + /// Update parameters based on the specified arguments + void update_params_from_args(); + + /// Set the parameters based on defaults (sets GXF parameters for GXF operators) + virtual void set_parameters() {} + + std::shared_ptr spec_; ///< The component specification. +}; + } // namespace holoscan #endif /* HOLOSCAN_CORE_COMPONENT_HPP */ diff --git a/include/holoscan/core/condition.hpp b/include/holoscan/core/condition.hpp index dfcacd37..dd9f513d 100644 --- a/include/holoscan/core/condition.hpp +++ b/include/holoscan/core/condition.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -95,6 +95,9 @@ namespace holoscan { +// Forward declarations +class Operator; + enum class ConditionType { kNone, ///< No condition kMessageAvailable, ///< Default for input port (nvidia::gxf::MessageAvailableSchedulingTerm) @@ -206,8 +209,12 @@ class Condition : public Component { YAML::Node to_yaml_node() const override; protected: - std::shared_ptr spec_; ///< The component specification. - bool is_initialized_ = false; ///< Whether the condition is initialized. + // Add friend classes that can call reset_graph_entites + friend class holoscan::Operator; + + using Component::reset_graph_entities; + + bool is_initialized_ = false; ///< Whether the condition is initialized. }; } // namespace holoscan diff --git a/include/holoscan/core/conditions/gxf/asynchronous.hpp b/include/holoscan/core/conditions/gxf/asynchronous.hpp index 999e0c00..8f133168 100644 --- a/include/holoscan/core/conditions/gxf/asynchronous.hpp +++ b/include/holoscan/core/conditions/gxf/asynchronous.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -71,6 +71,8 @@ class AsynchronousCondition : public gxf::GXFCondition { */ AsynchronousEventState event_state() const; + nvidia::gxf::AsynchronousSchedulingTerm* get() const; + private: AsynchronousEventState event_state_{AsynchronousEventState::READY}; }; diff --git a/include/holoscan/core/conditions/gxf/boolean.hpp b/include/holoscan/core/conditions/gxf/boolean.hpp index 27782e50..16002b5d 100644 --- a/include/holoscan/core/conditions/gxf/boolean.hpp +++ b/include/holoscan/core/conditions/gxf/boolean.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -39,6 +39,8 @@ class BooleanCondition : public gxf::GXFCondition { void setup(ComponentSpec& spec) override; + nvidia::gxf::BooleanSchedulingTerm* get() const; + private: Parameter enable_tick_; }; diff --git a/include/holoscan/core/conditions/gxf/downstream_affordable.hpp b/include/holoscan/core/conditions/gxf/downstream_affordable.hpp index 6e5be644..ee2cf141 100644 --- a/include/holoscan/core/conditions/gxf/downstream_affordable.hpp +++ b/include/holoscan/core/conditions/gxf/downstream_affordable.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -44,7 +44,11 @@ class DownstreamMessageAffordableCondition : public gxf::GXFCondition { void initialize() override { GXFCondition::initialize(); } + // TODO(GXF4): Expected setTransmitter(Handle value) + // TODO(GXF4): Expected setMinSize(uint64_t value) + private: + // TODO(GXF4): this is now a std::set> transmitters_ Parameter> transmitter_; Parameter min_size_; }; diff --git a/include/holoscan/core/conditions/gxf/message_available.hpp b/include/holoscan/core/conditions/gxf/message_available.hpp index 95519cac..1ea924fb 100644 --- a/include/holoscan/core/conditions/gxf/message_available.hpp +++ b/include/holoscan/core/conditions/gxf/message_available.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -39,21 +39,24 @@ class MessageAvailableCondition : public gxf::GXFCondition { void receiver(std::shared_ptr receiver) { receiver_ = receiver; } std::shared_ptr receiver() { return receiver_.get(); } - void min_size(size_t min_size) { min_size_ = min_size; } + void min_size(uint64_t min_size); size_t min_size() { return min_size_; } - void front_stage_max_size(size_t front_stage_max_size) { - front_stage_max_size_ = front_stage_max_size; - } + void front_stage_max_size(size_t front_stage_max_size); size_t front_stage_max_size() { return front_stage_max_size_; } void setup(ComponentSpec& spec) override; void initialize() override { GXFCondition::initialize(); } + nvidia::gxf::MessageAvailableSchedulingTerm* get() const; + + // TODO(GXF4): Expected setReceiver(Handle value) + private: + // TODO(GXF4): this is now a std::set> receivers_ Parameter> receiver_; - Parameter min_size_; + Parameter min_size_; Parameter front_stage_max_size_; }; diff --git a/include/holoscan/core/conditions/gxf/periodic.hpp b/include/holoscan/core/conditions/gxf/periodic.hpp index 659fe22c..feb8a062 100644 --- a/include/holoscan/core/conditions/gxf/periodic.hpp +++ b/include/holoscan/core/conditions/gxf/periodic.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -115,6 +115,8 @@ class PeriodicCondition : public gxf::GXFCondition { */ int64_t last_run_timestamp(); + nvidia::gxf::PeriodicSchedulingTerm* get() const; + private: Parameter recess_period_; int64_t recess_period_ns_ = 0; diff --git a/include/holoscan/core/dataflow_tracker.hpp b/include/holoscan/core/dataflow_tracker.hpp index 0d22f397..75f19ce2 100644 --- a/include/holoscan/core/dataflow_tracker.hpp +++ b/include/holoscan/core/dataflow_tracker.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -252,7 +252,7 @@ class DataFlowTracker { uint64_t num_last_messages_to_discard_ = kDefaultNumLastMessagesToDiscard; ///< The number of messages to discard at the end of the - ///< execution of an application graph. + ///< execution of an application graph. bool is_file_logging_enabled_ = false; ///< The variable to indicate if file logging is enabled. std::string logger_filename_; ///< The name of the log file. diff --git a/include/holoscan/core/domain/tensor.hpp b/include/holoscan/core/domain/tensor.hpp index cc1e699c..81261612 100644 --- a/include/holoscan/core/domain/tensor.hpp +++ b/include/holoscan/core/domain/tensor.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,52 +26,22 @@ #include #include -namespace holoscan { - -/** - * @brief Class that wraps a DLManagedTensor with a memory data reference. - * - * This class is used to wrap a DLManagedTensor (`tensor`) with a shared pointer to the memory - * data (`memory_ref`). This is useful when the memory data is not owned by the DLManagedTensor, but - * its lifetime is tied to the DLManagedTensor. - * - * In Holoscan SDK, this class is used to wrap a DLManagedTensor that is created by other libraries, - * such as CuPy, with a shared pointer to the memory data so that the memory data is reference - * counted and can be safely used/destroyed by the Holoscan SDK. - */ -struct DLManagedTensorCtx { - DLManagedTensor tensor; ///< The DLManagedTensor to wrap. - std::shared_ptr memory_ref; ///< The memory data reference. -}; +#include -/** - * @brief Class to wrap the deleter of a DLManagedTensor. - * - * This class is used with DLManagedTensorCtx class to wrap the DLManagedTensor. - * - * A shared pointer to this class in DLManagedTensorCtx class is used as the deleter of the - * DLManagedTensorCtx::memory_ref - * - * When the last reference to the DLManagedTensorCtx object is released, - * DLManagedTensorCtx::memory_ref will also be destroyed, which will call the deleter function - * of the DLManagedTensor object. - * - */ -class DLManagedMemoryBuffer { - public: - explicit DLManagedMemoryBuffer(DLManagedTensor* self); - ~DLManagedMemoryBuffer(); +namespace holoscan { - private: - DLManagedTensor* self_ = nullptr; -}; +// TODO: keep old class name as an alias? +// also differs in that DLManagedTensorContext has additional members dl_shape and dl_strides +// using DLManagedTensorCtx = nvidia::gxf::DLManagedTensorContext; +using DLManagedTensorContext = nvidia::gxf::DLManagedTensorContext; +using DLManagedMemoryBuffer = nvidia::gxf::DLManagedMemoryBuffer; /** * @brief Tensor class. * * A Tensor is a multi-dimensional array of elements of a single data type. * - * The Tensor class is a wrapper around the DLManagedTensorCtx struct that holds the + * The Tensor class is a wrapper around the DLManagedTensorContext struct that holds the * DLManagedTensor object. * (https://dmlc.github.io/dlpack/latest/c_api.html#_CPPv415DLManagedTensor). * @@ -83,11 +53,11 @@ class Tensor { Tensor() = default; /** - * @brief Construct a new Tensor from an existing DLManagedTensorCtx. + * @brief Construct a new Tensor from an existing DLManagedTensorContext. * - * @param ctx A shared pointer to the DLManagedTensorCtx to be used in Tensor construction. + * @param ctx A shared pointer to the DLManagedTensorContext to be used in Tensor construction. */ - explicit Tensor(std::shared_ptr& ctx) : dl_ctx_(ctx) {} + explicit Tensor(std::shared_ptr& ctx) : dl_ctx_(ctx) {} /** * @brief Construct a new Tensor from an existing DLManagedTensor pointer. @@ -187,14 +157,14 @@ class Tensor { DLManagedTensor* to_dlpack(); /** - * @brief Get the internal DLManagedTensorCtx of the Tensor. + * @brief Get the internal DLManagedTensorContext of the Tensor. * - * @return A shared pointer to the Tensor's DLManagedTensorCtx. + * @return A shared pointer to the Tensor's DLManagedTensorContext. */ - std::shared_ptr& dl_ctx() { return dl_ctx_; } + std::shared_ptr& dl_ctx() { return dl_ctx_; } protected: - std::shared_ptr dl_ctx_; ///< The DLManagedTensorCtx object. + std::shared_ptr dl_ctx_; ///< The DLManagedTensorContext object. }; /** diff --git a/include/holoscan/core/execution_context.hpp b/include/holoscan/core/execution_context.hpp index d0ac3d6a..11221ec3 100644 --- a/include/holoscan/core/execution_context.hpp +++ b/include/holoscan/core/execution_context.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -40,14 +40,14 @@ class ExecutionContext { * * @return The pointer to the input context. */ - InputContext* input() const { return input_context_;} + InputContext* input() const { return input_context_; } /** * @brief Get the output context. * * @return The pointer to the output context. */ - OutputContext* output() const { return output_context_;} + OutputContext* output() const { return output_context_; } /** * @brief Get the context. diff --git a/include/holoscan/core/executor.hpp b/include/holoscan/core/executor.hpp index a8623f70..9fe75006 100644 --- a/include/holoscan/core/executor.hpp +++ b/include/holoscan/core/executor.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -111,6 +111,30 @@ class Executor { */ virtual std::shared_ptr extension_manager() { return extension_manager_; } + /** + * @brief Set the exception. + * + * This method is called by the framework to store the exception that occurred during the + * execution of the fragment. + * If the exception is set, this exception is rethrown by the framework after the execution of + * the fragment. + * + * @param e The exception to store. + */ + void exception(const std::exception_ptr& e) { exception_ = e; } + + /** + * @brief Get the stored exception. + * + * This method is called by the framework to get the stored exception that occurred during the + * execution of the fragment. + * If the exception is set, this exception is rethrown by the framework after the execution of + * the fragment. + * + * @return The reference to the stored exception. + */ + const std::exception_ptr& exception() { return exception_; } + protected: friend class Fragment; // make Fragment a friend class to access protected members of // Executor (add_receivers()). @@ -212,6 +236,7 @@ class Executor { Fragment* fragment_ = nullptr; ///< The fragment of the executor. void* context_ = nullptr; ///< The context. std::shared_ptr extension_manager_; ///< The extension manager. + std::exception_ptr exception_; ///< The stored exception. }; } // namespace holoscan diff --git a/include/holoscan/core/executors/gxf/gxf_executor.hpp b/include/holoscan/core/executors/gxf/gxf_executor.hpp index 5fe5025f..803c4c7b 100644 --- a/include/holoscan/core/executors/gxf/gxf_executor.hpp +++ b/include/holoscan/core/executors/gxf/gxf_executor.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -27,12 +27,25 @@ #include #include #include +#include +#include +#include #include #include "../../app_driver.hpp" #include "../../executor.hpp" #include "../../graph.hpp" #include "../../gxf/gxf_extension_manager.hpp" +#include "gxf/app/graph_entity.hpp" + +namespace holoscan { + +// Forward declarations +class Arg; +class Condition; +class Resource; + +} // namespace holoscan namespace holoscan::gxf { @@ -109,9 +122,10 @@ class GXFExecutor : public holoscan::Executor { * * @param fragment The fragment that this operator belongs to. * @param gxf_context The GXF context. - * @param eid The GXF entity ID. + * @param eid The GXF entity ID. (Deprecated: now ignored. The eid is obtained from op instead) * @param io_spec The input port specification. * @param bind_port If true, bind the port to the existing GXF Receiver component. Otherwise, + * @param op The operator to which this port is being added. * create a new GXF Receiver component. */ static void create_input_port(Fragment* fragment, gxf_context_t gxf_context, gxf_uid_t eid, @@ -133,9 +147,10 @@ class GXFExecutor : public holoscan::Executor { * * @param fragment The fragment that this operator belongs to. * @param gxf_context The GXF context. - * @param eid The GXF entity ID. + * @param eid The GXF entity ID. (Deprecated: now ignored. The eid is obtained from op instead) * @param io_spec The output port specification. * @param bind_port If true, bind the port to the existing GXF Transmitter component. Otherwise, + * @param op The operator to which this port is being added. * create a new GXF Transmitter component. */ static void create_output_port(Fragment* fragment, gxf_context_t gxf_context, gxf_uid_t eid, @@ -193,7 +208,7 @@ class GXFExecutor : public holoscan::Executor { bool initialize_gxf_graph(OperatorGraph& graph); void activate_gxf_graph(); - bool run_gxf_graph(); + void run_gxf_graph(); bool connection_items(std::vector>& connection_items); void add_operator_to_entity_group(gxf_context_t context, gxf_uid_t entity_group_gid, @@ -222,7 +237,122 @@ class GXFExecutor : public holoscan::Executor { std::vector> connection_items_; /// The list of implicit broadcast entities to be added to the network entity group. - std::list implicit_broadcast_entities_; + std::list> implicit_broadcast_entities_; + + std::shared_ptr util_entity_; + std::shared_ptr gpu_device_entity_; + std::shared_ptr scheduler_entity_; + std::shared_ptr network_context_entity_; + std::shared_ptr connections_entity_; + + private: + // Map of connections indexed by source port uid and stores a pair of the target operator name + // and target port name + using TargetPort = std::pair; + using TargetsInfo = std::tuple>; + using TargetConnectionsMapType = std::unordered_map; + + using BroadcastEntityMapType = std::unordered_map< + holoscan::OperatorGraph::NodeType, + std::unordered_map>>; + + /** @brief Initialize all GXF Resources in the map and assign them to graph_entity. + * + * Utility function grouping common code across `initialize_network_context` and + * `intialize_scheduler`. + * + * @param resources Unordered map of GXF resources. + * @param gxf_uid_t The entity to which the resources will be assigned. + * @param graph_entity nvidia::gxf::GraphEntity pointer for the resources. + * + */ + void initialize_gxf_resources( + std::unordered_map>& resources, gxf_uid_t eid, + std::shared_ptr graph_entity); + + /** @brief Create a GXF Connection component between a transmitter and receiver. + * + * The Connection object created will belong to connections_entity_. + * + * @param source_cid The GXF Transmitter component ID. + * @param target_cid The GXF Receiver component ID. + * @return The GXF status code. + */ + gxf_result_t add_connection(gxf_uid_t source_cid, gxf_uid_t target_cid); + + /** @brief Create Broadcast components and add their nvidia::gxf::GraphEntity to + * broadcast_entites. + * + * This is a helper method that gets called by initialize_fragment. + * + * Creates broadcast components for any output ports of `op` that connect to more than one + * input port. + * + * Does not add any transmitter to the Broadcast entity. The transmitters will be added later + * when the incoming edges to the respective operators are processed. + * + * Any connected ports of the operator are removed from port_map_val. + * + * @param op The operator to create broadcast components for. + * @param broadcast_entities The mapping of broadcast graph entities. + * @param connections TODO + */ + void create_broadcast_components(holoscan::OperatorGraph::NodeType op, + BroadcastEntityMapType& broadcast_entities, + const TargetConnectionsMapType& connections); + + /** @brief Add connection between the prior Broadcast component and the current operator's input + * port(s). + * + * Creates a transmitter on the broadcast component and connects it to the input port of `op`. + * + * Any connected ports of the operator are removed from port_map_val. + * + * @param broadcast_entities The mapping of broadcast graph entities. + * @param op The broadcast entity's output will connect to the input port of this operator. + * @param prev_op The operator connected to the input of the broadcast entity. The capacity + * and policy of the transmitter added to the broadcast entity will be copied from the transmitter + * on the broadcasted output port of this operator. + * @param port_map_val The port mapping between prev_op and op. + */ + void connect_broadcast_to_previous_op(const BroadcastEntityMapType& broadcast_entities, + holoscan::OperatorGraph::NodeType op, + holoscan::OperatorGraph::NodeType prev_op, + holoscan::OperatorGraph::EdgeDataType port_map_val); + + /// Indicate whether this executor was created by a Holoscan Application. + bool is_holoscan() const; + + /// Helper function that adds a GXF Condition to the specified graph entity + bool add_condition_to_graph_entity(std::shared_ptr condition, + std::shared_ptr graph_entity); + + /// Helper function that adds a GXF Resource to the specified graph entity. + bool add_resource_to_graph_entity(std::shared_ptr resource, + std::shared_ptr graph_entity); + + /* @brief Add an IOspec connector resource and any conditions to the graph entity. + * + * Helper function for add_component_arg_to_graph_entity. + * + * @param io_spec Pointer to the IOSpec object to update. + * @param graph_entity The graph entity this IOSpec will be associated with. + * @return true if the IOSpec's components were all successfully added to the graph entity. + */ + bool add_iospec_to_graph_entity(IOSpec* io_spec, + std::shared_ptr graph_entity); + + /* @brief Add any GXF resources and conditions present in the arguments to the provided graph + * entity. + * + * Handles Component, Resource and IOSpec arguments and vectors of each of these. + * + * @param io_spec Pointer to the IOSpec object to update. + * @param graph_entity The graph entity this IOSpec will be associated with. + * @return true if the IOSpec's components were all successfully added to the graph entity. + */ + void add_component_args_to_graph_entity(std::vector& args, + std::shared_ptr graph_entity); }; } // namespace holoscan::gxf diff --git a/include/holoscan/core/executors/gxf/gxf_parameter_adaptor.hpp b/include/holoscan/core/executors/gxf/gxf_parameter_adaptor.hpp index f6440050..cedab034 100644 --- a/include/holoscan/core/executors/gxf/gxf_parameter_adaptor.hpp +++ b/include/holoscan/core/executors/gxf/gxf_parameter_adaptor.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -135,30 +135,20 @@ class GXFParameterAdaptor { break; } case ArgElementType::kInt8: { - HOLOSCAN_LOG_ERROR("GXF does not support int8_t parameter for key '{}'", key); - return GXF_FAILURE; + if constexpr (std::is_same_v) { + return GxfParameterSetInt8(context, uid, key, value); + } + break; } case ArgElementType::kUnsigned8: { - // GXF Doesn't support uint8_t parameter so use a workaround with - // GxfParameterSetFromYamlNode. if constexpr (std::is_same_v) { - YAML::Node yaml_node; - // uint8_t is not supported natively by yaml-cpp so push it as a uint32_t - // so that GXF can handle it. - yaml_node.push_back(static_cast(value)); - YAML::Node value_node = yaml_node[0]; - return GxfParameterSetFromYamlNode(context, uid, key, &value_node, ""); + return GxfParameterSetUInt8(context, uid, key, value); } break; } case ArgElementType::kInt16: { - // GXF Doesn't support int16_t parameter so use a workaround with - // GxfParameterSetFromYamlNode. if constexpr (std::is_same_v) { - YAML::Node yaml_node; - yaml_node.push_back(value); - YAML::Node value_node = yaml_node[0]; - return GxfParameterSetFromYamlNode(context, uid, key, &value_node, ""); + return GxfParameterSetInt16(context, uid, key, value); } break; } @@ -310,35 +300,9 @@ class GXFParameterAdaptor { } case ArgContainerType::kVector: { switch (arg_type.element_type()) { - case ArgElementType::kInt8: { - HOLOSCAN_LOG_ERROR( - "GXF does not support std::vector parameter " - "for key '{}'", - key); - return GXF_FAILURE; - } - case ArgElementType::kUnsigned8: { - // GXF Doesn't support std::vector parameter so use a workaround with - // GxfParameterSetFromYamlNode. - if constexpr (std::is_same_v>) { - // Create vector of Handles - YAML::Node yaml_node; - for (auto& item : value) { - // uint8_t is not supported natively by yaml-cpp so push it as a uint32_t - // so that GXF can handle it. - yaml_node.push_back(static_cast(item)); - } - return GxfParameterSetFromYamlNode(context, uid, key, &yaml_node, ""); - } else if constexpr (std::is_same_v>>) { - YAML::Node yaml_node; - for (const std::vector& vec : value) { - for (uint32_t item : vec) { yaml_node.push_back(item); } - } - return GxfParameterSetFromYamlNode(context, uid, key, &yaml_node, ""); - } - break; - } case ArgElementType::kBoolean: + case ArgElementType::kInt8: + case ArgElementType::kUnsigned8: case ArgElementType::kInt16: case ArgElementType::kUnsigned16: case ArgElementType::kInt32: @@ -350,11 +314,13 @@ class GXFParameterAdaptor { case ArgElementType::kComplex64: case ArgElementType::kComplex128: case ArgElementType::kString: { - // GXF Doesn't support std::vector parameter so use a workaround with - // GxfParameterSetFromYamlNode. + // GXF Doesn't support std::vector or std::vector> parameter + // types so use a workaround with GxfParameterSetFromYamlNode. if constexpr (holoscan::is_one_of_v< typename holoscan::type_info::element_type, bool, + int8_t, + uint8_t, int16_t, uint16_t, int32_t, diff --git a/include/holoscan/core/forward_def.hpp b/include/holoscan/core/forward_def.hpp index 2d3ced54..d1b803fb 100644 --- a/include/holoscan/core/forward_def.hpp +++ b/include/holoscan/core/forward_def.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -36,6 +36,7 @@ class CodecRegistry; class Condition; enum class ConditionType; class Config; +class ComponentBase; class Component; class ComponentSpec; class Endpoint; @@ -98,6 +99,7 @@ class UcxContext; // Schedulers enum class SchedulerType; +class EventBasedScheduler; class GreedyScheduler; class MultiThreadScheduler; @@ -128,6 +130,7 @@ class Receiver; class RealtimeClock; class SerializationBuffer; class StdComponentSerializer; +class StdEntitySerializer; class Transmitter; class UcxComponentSerializer; class UcxEntitySerializer; @@ -136,7 +139,6 @@ class UcxReceiver; class UcxSerializationBuffer; class UcxTransmitter; class UnboundedAllocator; -class VideoStreamSerializer; // Domain objects class Tensor; diff --git a/include/holoscan/core/fragment.hpp b/include/holoscan/core/fragment.hpp index 4a9706c0..abd11ae7 100644 --- a/include/holoscan/core/fragment.hpp +++ b/include/holoscan/core/fragment.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -39,6 +39,11 @@ namespace holoscan { +namespace gxf { +// Forward declarations +class GXFExecutor; +} // namespace gxf + // key = operator name, value = (input port names, output port names, multi-receiver names) using FragmentPortMap = std::unordered_map>> - std::shared_ptr make_operator(const StringT& name, ArgsT&&... args) { + std::shared_ptr make_operator(StringT name, ArgsT&&... args) { HOLOSCAN_LOG_DEBUG("Creating operator '{}'", name); auto op = std::make_shared(std::forward(args)...); op->name(name); @@ -319,7 +324,7 @@ class Fragment { */ template >> - std::shared_ptr make_resource(const StringT& name, ArgsT&&... args) { + std::shared_ptr make_resource(StringT name, ArgsT&&... args) { HOLOSCAN_LOG_DEBUG("Creating resource '{}'", name); auto resource = std::make_shared(std::forward(args)...); resource->name(name); @@ -356,7 +361,7 @@ class Fragment { */ template >> - std::shared_ptr make_condition(const StringT& name, ArgsT&&... args) { + std::shared_ptr make_condition(StringT name, ArgsT&&... args) { HOLOSCAN_LOG_DEBUG("Creating condition '{}'", name); auto condition = std::make_shared(std::forward(args)...); condition->name(name); @@ -394,7 +399,7 @@ class Fragment { */ template >> - std::shared_ptr make_scheduler(const StringT& name, ArgsT&&... args) { + std::shared_ptr make_scheduler(StringT name, ArgsT&&... args) { HOLOSCAN_LOG_DEBUG("Creating scheduler '{}'", name); auto scheduler = std::make_shared(std::forward(args)...); scheduler->name(name); @@ -432,7 +437,7 @@ class Fragment { */ template >> - std::shared_ptr make_network_context(const StringT& name, ArgsT&&... args) { + std::shared_ptr make_network_context(StringT name, ArgsT&&... args) { HOLOSCAN_LOG_DEBUG("Creating network context '{}'", name); auto network_context = std::make_shared(std::forward(args)...); network_context->name(name); @@ -643,6 +648,7 @@ class Fragment { protected: friend class Application; // to access 'scheduler_' in Application friend class AppDriver; + friend class gxf::GXFExecutor; template std::shared_ptr make_config(ArgsT&&... args) { @@ -664,11 +670,16 @@ class Fragment { return std::make_unique(std::forward(args)...); } + /// Cleanup helper that will by called by GXFExecutor prior to GxfContextDestroy. + void reset_graph_entities(); + + // Note: Maintain the order of declarations (executor_ and graph_) to ensure proper destruction + // of the executor's context. std::string name_; ///< The name of the fragment. Application* app_ = nullptr; ///< The application that this fragment belongs to. std::shared_ptr config_; ///< The configuration of the fragment. - std::unique_ptr graph_; ///< The graph of the fragment. std::shared_ptr executor_; ///< The executor for the fragment. + std::unique_ptr graph_; ///< The graph of the fragment. std::shared_ptr scheduler_; ///< The scheduler used by the executor std::shared_ptr network_context_; ///< The network_context used by the executor std::shared_ptr data_flow_tracker_; ///< The DataFlowTracker for the fragment diff --git a/include/holoscan/core/gxf/entity.hpp b/include/holoscan/core/gxf/entity.hpp index 2bd1af38..7bb0e9db 100644 --- a/include/holoscan/core/gxf/entity.hpp +++ b/include/holoscan/core/gxf/entity.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -28,7 +28,7 @@ #pragma GCC diagnostic pop #include "gxf/multimedia/video.hpp" -#include "holoscan/core/gxf/gxf_tensor.hpp" +#include "gxf/std/tensor.hpp" #include "holoscan/core/gxf/gxf_utils.hpp" #include "holoscan/core/type_traits.hpp" @@ -67,38 +67,24 @@ class Entity : public nvidia::gxf::Entity { typename = std::enable_if_t && holoscan::is_one_of_v>> std::shared_ptr get(const char* name = nullptr, bool log_errors = true) const { - bool is_holoscan_gxf_tensor = true; // We should use nullptr as a default name because In GXF, 'nullptr' should be used with // GxfComponentFind() if we want to get the first component of the given type. - // We first try to get holoscan::gxf::GXFTensor from GXF Entity. + // Try to get nvidia::gxf::Tensor from GXF Entity. gxf_tid_t tid; auto tid_result = - GxfComponentTypeId(context(), nvidia::TypenameAsString(), &tid); + GxfComponentTypeId(context(), nvidia::TypenameAsString(), &tid); if (tid_result != GXF_SUCCESS) { - if (log_errors) { HOLOSCAN_LOG_ERROR("Unable to get component type id: {}", tid_result); } + if (log_errors) { + HOLOSCAN_LOG_ERROR( + "Unable to get component type id from 'nvidia::gxf::Tensor' (error code: {})", + tid_result); + } return nullptr; } gxf_uid_t cid; auto cid_result = GxfComponentFind(context(), eid(), tid, name, nullptr, &cid); - if (cid_result != GXF_SUCCESS) { - // Then, we try to get nvidia::gxf::Tensor from GXF Entity. - tid_result = - GxfComponentTypeId(context(), nvidia::TypenameAsString(), &tid); - if (tid_result != GXF_SUCCESS) { - if (log_errors) { - HOLOSCAN_LOG_ERROR( - "Unable to get component type id from 'nvidia::gxf::Tensor' (error code: {})", - tid_result); - } - return nullptr; - } - - cid_result = GxfComponentFind(context(), eid(), tid, name, nullptr, &cid); - is_holoscan_gxf_tensor = false; - } - if (cid_result != GXF_SUCCESS) { if (log_errors) { HOLOSCAN_LOG_ERROR("Unable to find component from the name '{}' (error code: {})", @@ -108,21 +94,19 @@ class Entity : public nvidia::gxf::Entity { return nullptr; } - if (is_holoscan_gxf_tensor) { - // We don't need to create DLManagedTensorCtx struct again because it is already created in - // GXFTensor. (~150ns) - auto handle = nvidia::gxf::Handle::Create(context(), cid); - auto tensor = handle->get()->as_tensor(); - return tensor; - } else { - // Create a holoscan::Tensor object from the newly constructed GXF Tensor object. (~680 ns) - auto handle = nvidia::gxf::Handle::Create(context(), cid); - // Mutex-protected conversion (Issue 4272363) - auto gxf_tensor = holoscan::gxf::GXFTensor(*handle->get(), cid); - auto tensor = gxf_tensor.as_tensor(); - return tensor; + // Create a holoscan::Tensor object from the newly constructed GXF Tensor object. (~680 ns) + auto handle = nvidia::gxf::Handle::Create(context(), cid); + + auto maybe_dl_ctx = (*handle->get()).toDLManagedTensorContext(); + if (!maybe_dl_ctx) { + HOLOSCAN_LOG_ERROR( + "Failed to get std::shared_ptr from nvidia::gxf::Tensor"); + return nullptr; } + std::shared_ptr tensor = std::make_shared(maybe_dl_ctx.value()); + return tensor; } + // Adds a component with given type template && @@ -130,16 +114,17 @@ class Entity : public nvidia::gxf::Entity { void add(std::shared_ptr& data, const char* name = nullptr) { gxf_tid_t tid; HOLOSCAN_GXF_CALL_FATAL( - GxfComponentTypeId(context(), nvidia::TypenameAsString(), &tid)); + GxfComponentTypeId(context(), nvidia::TypenameAsString(), &tid)); gxf_uid_t cid; HOLOSCAN_GXF_CALL_FATAL(GxfComponentAdd(context(), eid(), tid, name, &cid)); - auto handle = nvidia::gxf::Handle::Create(context(), cid); - holoscan::gxf::GXFTensor* tensor_ptr = handle->get(); + auto handle = nvidia::gxf::Handle::Create(context(), cid); + nvidia::gxf::Tensor* tensor_ptr = handle->get(); - // Copy the member data (std::shared_ptr) from the Tensor to GXFTensor - *tensor_ptr = GXFTensor(data->dl_ctx()); + // Copy the member data (std::shared_ptr) from the Tensor to the + // nvidia::gxf::Tensor + *tensor_ptr = nvidia::gxf::Tensor(data->dl_ctx()); } }; diff --git a/include/holoscan/core/gxf/gxf_component.hpp b/include/holoscan/core/gxf/gxf_component.hpp index 1f4a1d9f..2763c247 100644 --- a/include/holoscan/core/gxf/gxf_component.hpp +++ b/include/holoscan/core/gxf/gxf_component.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,8 +21,11 @@ #include #include +#include #include +#include +#include "../parameter.hpp" #include "./gxf_utils.hpp" namespace holoscan::gxf { @@ -49,31 +52,33 @@ class GXFComponent { std::string& gxf_cname() { return gxf_cname_; } void gxf_cname(const std::string& name) { gxf_cname_ = name; } - void* gxf_cptr() { return gxf_cptr_; } + std::shared_ptr gxf_graph_entity() { return gxf_graph_entity_; } - void gxf_initialize() { - if (gxf_context_ == nullptr) { - HOLOSCAN_LOG_ERROR("Initializing with null GXF context"); - return; - } - if (gxf_eid_ == 0) { - HOLOSCAN_LOG_ERROR("Initializing with null GXF eid"); - return; - } - - HOLOSCAN_GXF_CALL(GxfComponentTypeId(gxf_context_, gxf_typename(), &gxf_tid_)); - HOLOSCAN_GXF_CALL( - GxfComponentAdd(gxf_context_, gxf_eid_, gxf_tid_, gxf_cname().c_str(), &gxf_cid_)); - HOLOSCAN_GXF_CALL(GxfComponentPointer( - gxf_context_, gxf_cid_, gxf_tid_, reinterpret_cast(&gxf_cptr_))); + void gxf_graph_entity(std::shared_ptr graph_entity) { + gxf_graph_entity_ = graph_entity; } + void* gxf_cptr() { return gxf_cptr_; } + + nvidia::gxf::Handle gxf_component() { return gxf_component_; } + + void gxf_initialize(); + + /// Set a given parameter on the underlying GXF component + void set_gxf_parameter(const std::string& component_name, const std::string& key, + ParameterWrapper& param_wrap); + + void reset_gxf_graph_entity() { gxf_graph_entity_.reset(); } + protected: gxf_context_t gxf_context_ = nullptr; gxf_uid_t gxf_eid_ = 0; gxf_tid_t gxf_tid_ = {}; gxf_uid_t gxf_cid_ = 0; + std::shared_ptr gxf_graph_entity_; std::string gxf_cname_; + // TODO: remove gxf_cptr_ and use the Component Handle everywhere instead? + nvidia::gxf::Handle gxf_component_; void* gxf_cptr_ = nullptr; }; diff --git a/include/holoscan/core/gxf/gxf_condition.hpp b/include/holoscan/core/gxf/gxf_condition.hpp index a3f1ec74..2a4d930e 100644 --- a/include/holoscan/core/gxf/gxf_condition.hpp +++ b/include/holoscan/core/gxf/gxf_condition.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -34,6 +34,8 @@ class GXFCondition : public holoscan::Condition, public gxf::GXFComponent { GXFCondition(const std::string& name, nvidia::gxf::SchedulingTerm* term); void initialize() override; + + void add_to_graph_entity(Operator* op); }; } // namespace holoscan::gxf diff --git a/include/holoscan/core/gxf/gxf_execution_context.hpp b/include/holoscan/core/gxf/gxf_execution_context.hpp index 1feba04e..99cc919c 100644 --- a/include/holoscan/core/gxf/gxf_execution_context.hpp +++ b/include/holoscan/core/gxf/gxf_execution_context.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -49,8 +49,7 @@ class GXFExecutionContext : public holoscan::ExecutionContext { * @param gxf_input_context The shared pointer to the GXFInputContext object. * @param gxf_output_context The shared pointer to the GXFOutputContext object. */ - GXFExecutionContext(gxf_context_t context, - std::shared_ptr gxf_input_context, + GXFExecutionContext(gxf_context_t context, std::shared_ptr gxf_input_context, std::shared_ptr gxf_output_context); /** diff --git a/include/holoscan/core/gxf/gxf_extension_registrar.hpp b/include/holoscan/core/gxf/gxf_extension_registrar.hpp index a149d793..ee62f1e1 100644 --- a/include/holoscan/core/gxf/gxf_extension_registrar.hpp +++ b/include/holoscan/core/gxf/gxf_extension_registrar.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -217,7 +217,7 @@ class GXFExtensionRegistrar { * @param tid The type ID of the extension to use. */ void reset(gxf_context_t context, const char* extension_name, - const char* extension_description = "", gxf_tid_t tid = {0, 0}) { + const char* extension_description = "", gxf_tid_t tid = {0, 0}) { context_ = context; factory_ = std::make_unique(); allocated_tids_.clear(); @@ -235,12 +235,8 @@ class GXFExtensionRegistrar { } // Set the extension information. - const nvidia::gxf::Expected result = factory_->setInfo(extension_tid_, - extension_name, - extension_description, - "NVIDIA", - "1.0.0", - "Apache 2.0"); + const nvidia::gxf::Expected result = factory_->setInfo( + extension_tid_, extension_name, extension_description, "NVIDIA", "1.0.0", "Apache 2.0"); if (!result) { HOLOSCAN_LOG_ERROR("Unable to set the GXF extension information: {}", result.error()); return; diff --git a/include/holoscan/core/gxf/gxf_network_context.hpp b/include/holoscan/core/gxf/gxf_network_context.hpp index 302258e6..d12a6c2a 100644 --- a/include/holoscan/core/gxf/gxf_network_context.hpp +++ b/include/holoscan/core/gxf/gxf_network_context.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -45,6 +45,16 @@ class GXFNetworkContext : public holoscan::NetworkContext, public GXFComponent { * @return The type name of the GXF network context. */ virtual const char* gxf_typename() const = 0; + + protected: + // Make Fragment a friend class so it can call reset_graph_entities + friend class holoscan::Fragment; + + /// Set the parameters based on defaults (sets GXF parameters for GXF operators) + void set_parameters() override; + + /// Reset the GXF GraphEntity of all components associated with the network context + void reset_graph_entities() override; }; } // namespace holoscan::gxf diff --git a/include/holoscan/core/gxf/gxf_operator.hpp b/include/holoscan/core/gxf/gxf_operator.hpp index 7204d70c..320a0224 100644 --- a/include/holoscan/core/gxf/gxf_operator.hpp +++ b/include/holoscan/core/gxf/gxf_operator.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -68,6 +68,8 @@ class GXFOperator : public holoscan::Operator { */ gxf_context_t gxf_context() const { return gxf_context_; } + // Note: now can get eid() from graph_entity_.eid() + /** * @brief Set GXF entity ID. * @@ -170,6 +172,10 @@ class GXFOperator : public holoscan::Operator { } protected: + gxf_uid_t add_codelet_to_graph_entity() override; + + void set_parameters() override; + /** * @brief Register the GXF parameter adaptor for the given type. * @@ -219,6 +225,7 @@ class GXFOperator : public holoscan::Operator { return GXF_FAILURE; }); } + gxf_context_t gxf_context_ = nullptr; ///< The GXF context. gxf_uid_t gxf_eid_ = 0; ///< GXF entity ID gxf_uid_t gxf_cid_ = 0; ///< The GXF component ID. diff --git a/include/holoscan/core/gxf/gxf_resource.hpp b/include/holoscan/core/gxf/gxf_resource.hpp index 93a66aa1..34261067 100644 --- a/include/holoscan/core/gxf/gxf_resource.hpp +++ b/include/holoscan/core/gxf/gxf_resource.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -36,6 +36,8 @@ class GXFResource : public holoscan::Resource, public gxf::GXFComponent { GXFResource(const std::string& name, nvidia::gxf::Component* component); void initialize() override; + + void add_to_graph_entity(Operator* op); }; } // namespace holoscan::gxf diff --git a/include/holoscan/core/gxf/gxf_scheduler.hpp b/include/holoscan/core/gxf/gxf_scheduler.hpp index bdf42028..b3ca5c61 100644 --- a/include/holoscan/core/gxf/gxf_scheduler.hpp +++ b/include/holoscan/core/gxf/gxf_scheduler.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -61,6 +61,16 @@ class GXFScheduler : public holoscan::Scheduler, public GXFComponent { * @return The GXF clock pointer used by the scheduler. */ virtual nvidia::gxf::Clock* gxf_clock(); + + protected: + // Make Fragment a friend class so it can call reset_graph_entities + friend class holoscan::Fragment; + + /// Set the parameters based on defaults (sets GXF parameters for GXF operators) + void set_parameters() override; + + /// Reset the GXF GraphEntity of all components associated with the scheduler + void reset_graph_entities() override; }; } // namespace holoscan::gxf diff --git a/include/holoscan/core/gxf/gxf_tensor.hpp b/include/holoscan/core/gxf/gxf_tensor.hpp deleted file mode 100644 index 05784b66..00000000 --- a/include/holoscan/core/gxf/gxf_tensor.hpp +++ /dev/null @@ -1,145 +0,0 @@ -/* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef HOLOSCAN_CORE_GXF_GXF_TENSOR_HPP -#define HOLOSCAN_CORE_GXF_GXF_TENSOR_HPP - -#include -#include -#include - -#include "gxf/std/tensor.hpp" - -#include "holoscan/core/common.hpp" -#include "holoscan/core/domain/tensor.hpp" - -namespace holoscan::gxf { - -class GXFMemoryBuffer; // forward declaration - -/** - * @brief Class to wrap GXF Tensor holding DLPack tensor structure. - * - * This class inherits nvidia::gxf::Tensor and is used with DLManagedTensorCtx class to wrap - * the GXF Tensor. - */ -class GXFTensor : public nvidia::gxf::Tensor { - public: - GXFTensor() = default; - - /** - * @brief Construct a new GXFTensor object. - * - * @param dl_ctx DLManagedTensorCtx object to wrap. - */ - explicit GXFTensor(std::shared_ptr& dl_ctx); - - /** - * @brief Construct a new GXFTensor object from a GXF Tensor. - * - * This constructor wraps a GXF Tensor object. When the GXF Tensor object is modified - * to point to a shared memory buffer, updates should be protected using a mutex. - * To mitigate thread contention when different threads access different tensors, - * this method uses hash-selected mutexes. - * - * A set of mutexes is allocated in a static array. To select which mutex to lock, - * this method uses a simple hash function based on the provided ID. The selected - * mutex ensures safe access to the Tensor's data pointer across multiple threads. - * - * For the ID (`id`), the GXF component's ID can be used to indicate a specific - * GXF Tensor object. If no ID is provided, the mutex associated with ID 0 is used. - * If the ID is -1, no mutex is utilized. - * - * @param tensor The GXF Tensor object to be converted. - * @param id The ID associated with the GXF Tensor, representing the GXF component's ID. - * Defaults to 0. - * @return The GXFTensor object created from the provided GXF Tensor. - */ - explicit GXFTensor(nvidia::gxf::Tensor& tensor, int64_t id = 0); - - /** - * @brief Get DLDevice object from the GXF Tensor. - * - * @return DLDevice object. - */ - DLDevice device() const; - - /** - * @brief Get DLDataType object from the GXF Tensor. - * - * @return DLDataType object. - */ - DLDataType dtype() const; - - /** - * @brief Convert GXF Tensor to Holoscan Tensor. - * - * @return holoscan::Tensor object converted from GXF Tensor. - */ - std::shared_ptr as_tensor(); - - /** - * @brief Create GXF Tensor object from Holoscan Tensor. - * - * @param tensor Holoscan Tensor object to convert. - * @return The shared pointer object to the GXFTensor object that is created from the given - * Holoscan Tensor object. - */ - static std::shared_ptr from_tensor(std::shared_ptr tensor); - - /** - * @brief Get the internal DLManagedTensorCtx of the GXFTensor. - * - * @return A shared pointer to the Tensor's DLManagedTensorCtx. - */ - std::shared_ptr& dl_ctx() { return dl_ctx_; } - - protected: - std::shared_ptr dl_ctx_; -}; - -/** - * @brief Class to wrap the nvidia::gxf::MemoryBuffer object. - * - * This class inherits nvidia::gxf::MemoryBuffer and is used with DLManagedTensorCtx class to wrap - * the GXF Tensor. - * - * A shared pointer to this class in DLManagedTensorCtx class is used as the deleter of the - * DLManagedTensorCtx::memory_ref - * - * When the last reference to the DLManagedTensorCtx object is released, - * DLManagedTensorCtx::memory_ref will also be destroyed, which will call the deleter function - * of the DLManagedTensor object. - * - * This class holds shape and strides data of DLTensor object so that the data is released together - * with the DLManagedTensor object. - */ -class GXFMemoryBuffer : public nvidia::gxf::MemoryBuffer { - public: - using nvidia::gxf::MemoryBuffer::MemoryBuffer; - - explicit GXFMemoryBuffer(nvidia::gxf::MemoryBuffer&& other) - : nvidia::gxf::MemoryBuffer(std::forward(other)) {} - - std::vector dl_shape; ///< Shape of the GXF Tensor. - std::vector dl_strides; ///< Strides of the GXF Tensor. This is used to calculate the - ///< strides of the DLTensor. -}; - -} // namespace holoscan::gxf - -#endif /* HOLOSCAN_CORE_GXF_GXF_TENSOR_HPP */ diff --git a/include/holoscan/core/gxf/gxf_utils.hpp b/include/holoscan/core/gxf/gxf_utils.hpp index 89f121ee..5076f046 100644 --- a/include/holoscan/core/gxf/gxf_utils.hpp +++ b/include/holoscan/core/gxf/gxf_utils.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -92,48 +92,6 @@ namespace holoscan::gxf { -/** - * @brief Add a connection between two components. - * - * @param context The GXF context. - * @param source_cid The source component ID. - * @param target_cid The target component ID. - * @return The result code. - */ -inline gxf_result_t add_connection(gxf_context_t context, gxf_uid_t source_cid, - gxf_uid_t target_cid) { - gxf_result_t code; - gxf_uid_t connect_eid; - const GxfEntityCreateInfo connect_entity_create_info = {nullptr, GXF_ENTITY_CREATE_PROGRAM_BIT}; - HOLOSCAN_GXF_CALL(GxfCreateEntity(context, &connect_entity_create_info, &connect_eid)); - - gxf_tid_t connect_tid; - HOLOSCAN_GXF_CALL(GxfComponentTypeId(context, "nvidia::gxf::Connection", &connect_tid)); - gxf_uid_t connect_cid; - HOLOSCAN_GXF_CALL(GxfComponentAdd(context, connect_eid, connect_tid, "", &connect_cid)); - - HOLOSCAN_GXF_CALL(GxfParameterSetHandle(context, connect_cid, "source", source_cid)); - code = GxfParameterSetHandle(context, connect_cid, "target", target_cid); - return code; -} - -/** - * @brief Create a GXF Component. - * - * @param context The GXF context. - * @param component_type_name The GXF Component type. - * @param component_name The name of the component. - * @param eid The entity ID to which the component will be added. - * @param cid The newly created component's ID will be returned here. - */ -inline void create_gxf_component(gxf_context_t context, const char* component_type_name, - const char* component_name, gxf_uid_t eid, gxf_uid_t* cid) { - gxf_tid_t tid; - HOLOSCAN_GXF_CALL(GxfComponentTypeId(context, component_type_name, &tid)); - - HOLOSCAN_GXF_CALL_FATAL(GxfComponentAdd(context, eid, tid, component_name, cid)); -} - /** * @brief Get the entity ID of the component. * @@ -141,11 +99,7 @@ inline void create_gxf_component(gxf_context_t context, const char* component_ty * @param cid The component ID. * @return The result code. */ -inline gxf_uid_t get_component_eid(gxf_context_t context, gxf_uid_t cid) { - gxf_uid_t eid; - HOLOSCAN_GXF_CALL_FATAL(GxfComponentEntity(context, cid, &eid)); - return eid; -} +gxf_uid_t get_component_eid(gxf_context_t context, gxf_uid_t cid); /** * @brief Get the full component name of the component. @@ -154,18 +108,7 @@ inline gxf_uid_t get_component_eid(gxf_context_t context, gxf_uid_t cid) { * @param cid The component ID. * @return The full component name. */ -inline std::string get_full_component_name(gxf_context_t context, gxf_uid_t cid) { - const char* cname; - HOLOSCAN_GXF_CALL_FATAL(GxfComponentName(context, cid, &cname)); - gxf_uid_t eid; - HOLOSCAN_GXF_CALL_FATAL(GxfComponentEntity(context, cid, &eid)); - const char* ename; - HOLOSCAN_GXF_CALL_FATAL(GxfComponentName(context, eid, &ename)); - - std::stringstream sstream; - sstream << ename << "/" << cname; - return sstream.str(); -} +std::string get_full_component_name(gxf_context_t context, gxf_uid_t cid); /** * @brief Create a name from the prefix and the index. @@ -174,11 +117,7 @@ inline std::string get_full_component_name(gxf_context_t context, gxf_uid_t cid) * @param index The index. * @return The created name (`_`). */ -inline std::string create_name(const char* prefix, int index) { - std::stringstream sstream; - sstream << prefix << "_" << index; - return sstream.str(); -} +std::string create_name(const char* prefix, int index); /** * @brief Create a name from the prefix and the name. @@ -187,11 +126,7 @@ inline std::string create_name(const char* prefix, int index) { * @param name The name. * @return The created name (`_`). */ -inline std::string create_name(const char* prefix, const std::string& name) { - std::stringstream sstream; - sstream << prefix << "_" << name; - return sstream.str(); -} +std::string create_name(const char* prefix, const std::string& name); /** * @brief Return a component ID from the handle name. @@ -316,51 +251,11 @@ inline gxf_uid_t find_component_handle(gxf_context_t context, gxf_uid_t componen * @return `true` if a component matching the criteria is found, `false` otherwise. * @see GxfComponentFind */ -inline bool has_component(gxf_context_t context, gxf_uid_t eid, gxf_tid_t tid = GxfTidNull(), - const char* name = nullptr, int32_t* offset = nullptr, - gxf_uid_t* cid = nullptr) { - gxf_uid_t temp_cid = 0; - auto result = GxfComponentFind(context, eid, tid, name, offset, cid ? cid : &temp_cid); - if (result == GXF_SUCCESS) { - return true; - } else { - return false; - } -} - -inline gxf_uid_t add_entity_group(void* context, std::string name) { - gxf_uid_t entity_group_gid = kNullUid; - HOLOSCAN_GXF_CALL_FATAL(GxfCreateEntityGroup(context, name.c_str(), &entity_group_gid)); - return entity_group_gid; -} - -inline std::pair create_gpu_device_entity(void* context, - std::string entity_name) { - // Get GPU device type id - gxf_tid_t device_tid = GxfTidNull(); - HOLOSCAN_GXF_CALL_FATAL(GxfComponentTypeId(context, "nvidia::gxf::GPUDevice", &device_tid)); +bool has_component(gxf_context_t context, gxf_uid_t eid, gxf_tid_t tid = GxfTidNull(), + const char* name = nullptr, int32_t* offset = nullptr, gxf_uid_t* cid = nullptr); - // Create a GPUDevice entity - gxf_uid_t device_eid = kNullUid; - GxfEntityCreateInfo entity_create_info = {entity_name.c_str(), GXF_ENTITY_CREATE_PROGRAM_BIT}; - HOLOSCAN_GXF_CALL_FATAL(GxfCreateEntity(context, &entity_create_info, &device_eid)); - GXF_ASSERT_NE(device_eid, kNullUid); - return std::make_pair(device_tid, device_eid); -} - -inline gxf_uid_t create_gpu_device_component(void* context, gxf_tid_t device_tid, - gxf_uid_t device_eid, std::string component_name, - int32_t dev_id = 0) { - // Create the GPU device component - gxf_uid_t device_cid = kNullUid; - HOLOSCAN_GXF_CALL_FATAL( - GxfComponentAdd(context, device_eid, device_tid, component_name.c_str(), &device_cid)); - GXF_ASSERT_NE(device_cid, kNullUid); - - // set the device ID parameter - HOLOSCAN_GXF_CALL_FATAL(GxfParameterSetInt32(context, device_cid, "dev_id", dev_id)); - return device_cid; -} +/// Create a GXF entity group with the specified name +gxf_uid_t add_entity_group(void* context, std::string name); } // namespace holoscan::gxf diff --git a/include/holoscan/core/gxf/gxf_wrapper.hpp b/include/holoscan/core/gxf/gxf_wrapper.hpp index 1fdd8adf..76e43bf9 100644 --- a/include/holoscan/core/gxf/gxf_wrapper.hpp +++ b/include/holoscan/core/gxf/gxf_wrapper.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ #include "holoscan/core/gxf/gxf_operator.hpp" #include "gxf/std/codelet.hpp" -#include "gxf/std/parameter_parser_std.hpp" +#include "gxf/core/parameter_parser_std.hpp" namespace holoscan::gxf { @@ -49,6 +49,8 @@ class GXFWrapper : public nvidia::gxf::Codelet { void set_operator(Operator* op) { op_ = op; } private: + void store_exception(); + Operator* op_ = nullptr; }; diff --git a/include/holoscan/core/io_context.hpp b/include/holoscan/core/io_context.hpp index 9f7f3e6b..4d7226d1 100644 --- a/include/holoscan/core/io_context.hpp +++ b/include/holoscan/core/io_context.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/include/holoscan/core/messagelabel.hpp b/include/holoscan/core/messagelabel.hpp index 212a5726..96e9ece3 100644 --- a/include/holoscan/core/messagelabel.hpp +++ b/include/holoscan/core/messagelabel.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -242,8 +242,8 @@ class MessageLabel { /** * @brief Convert the MessageLabel to a string. * - * @return std::string The formatted string representing the MessageLabel with all the paths and the - * Operators with their publish and receive timestamps. + * @return std::string The formatted string representing the MessageLabel with all the paths and + * the Operators with their publish and receive timestamps. */ std::string to_string() const; diff --git a/include/holoscan/core/network_context.hpp b/include/holoscan/core/network_context.hpp index bdff735c..28aac9c5 100644 --- a/include/holoscan/core/network_context.hpp +++ b/include/holoscan/core/network_context.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -238,7 +238,8 @@ class NetworkContext : public Component { YAML::Node to_yaml_node() const override; protected: - std::shared_ptr spec_; ///< The component specification. + /// Reset the GXF GraphEntity of any components associated with the scheduler + void reset_graph_entities() override; std::unordered_map> resources_; ///< The resources used by the network context. diff --git a/include/holoscan/core/network_contexts/gxf/ucx_context.hpp b/include/holoscan/core/network_contexts/gxf/ucx_context.hpp index beb2dcab..9e9c98aa 100644 --- a/include/holoscan/core/network_contexts/gxf/ucx_context.hpp +++ b/include/holoscan/core/network_contexts/gxf/ucx_context.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,6 +22,8 @@ #include #include +#include + #include "../../gxf/gxf_network_context.hpp" #include "../../resources/gxf/ucx_entity_serializer.hpp" @@ -46,6 +48,8 @@ class UcxContext : public gxf::GXFNetworkContext { void setup(ComponentSpec& spec) override; void initialize() override; + nvidia::gxf::UcxContext* get() const; + private: Parameter> entity_serializer_; // TODO: support GPUDevice nvidia::gxf::Resource diff --git a/include/holoscan/core/operator.hpp b/include/holoscan/core/operator.hpp index b5dc0f14..6daf524d 100644 --- a/include/holoscan/core/operator.hpp +++ b/include/holoscan/core/operator.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -40,6 +40,9 @@ #include "./operator_spec.hpp" #include "./resource.hpp" +#include "gxf/core/gxf.h" +#include "gxf/app/graph_entity.hpp" + #define HOLOSCAN_OPERATOR_FORWARD_TEMPLATE() \ template >& resources() { return resources_; } - using Component::add_arg; + using ComponentBase::add_arg; /** * @brief Add a condition to the operator. @@ -550,6 +558,13 @@ class Operator : public Component { */ YAML::Node to_yaml_node() const override; + /** + * @brief Get the GXF GraphEntity object corresponding to this operator + * + * @return graph entity corresponding to the operator + */ + std::shared_ptr graph_entity() { return graph_entity_; } + protected: // Making the following classes as friend classes to allow them to access // get_consolidated_input_label, num_published_messages_map, update_input_message_label, @@ -559,6 +574,40 @@ class Operator : public Component { friend class AnnotatedDoubleBufferTransmitter; friend class DFFTCollector; + // Make GXFExecutor a friend class so it can call protected initialization methods + friend class holoscan::gxf::GXFExecutor; + // Fragment should be able to call reset_graph_entities + friend class Fragment; + + /** + * @brief This function creates a GraphEntity corresponding to the operator + * @param context The GXF context. + * @param name The name of the entity to create. + * @return The GXF entity eid corresponding to the graph entity. + */ + gxf_uid_t initialize_graph_entity(void* context, const std::string& entity_prefix = ""); + + /** + * @brief Add this operator as the codelet in the GXF GraphEntity + * + * @return The codelet component id corresponding to GXF codelet. + */ + virtual gxf_uid_t add_codelet_to_graph_entity(); + + /// Initialize conditions and add GXF conditions to graph_entity_; + void initialize_conditions(); + + /// Initialize resources and add GXF resources to graph_entity_; + void initialize_resources(); + + using ComponentBase::update_params_from_args; + + /// Update parameters based on the specified arguments + void update_params_from_args(); + + /// Set the parameters based on defaults (sets GXF parameters for GXF operators) + virtual void set_parameters(); + /** * @brief This function returns a consolidated MessageLabel for all the input ports of an * Operator. If there is no input port (root Operator), then a new MessageLabel with the current @@ -673,19 +722,24 @@ class Operator : public Component { }); } + /// Reset the GXF GraphEntity of any components associated with this operator + virtual void reset_graph_entities(); + OperatorType operator_type_ = OperatorType::kNative; ///< The type of the operator. std::shared_ptr spec_; ///< The operator spec of the operator. std::unordered_map> conditions_; ///< The conditions of the operator. std::unordered_map> - resources_; ///< The resources used by the operator. + resources_; ///< The resources used by the operator. + std::shared_ptr graph_entity_; ///< GXF graph entity corresponding to + ///< the Operator private: - /** - * @brief Set the operator codelet or any other backend codebase. - */ + /// Set the operator codelet or any other backend codebase. void set_op_backend(); + bool has_ucx_connector(); ///< Check if the operator has any UCX connectors. + /// The MessageLabel objects corresponding to the input ports indexed by the input port. std::unordered_map input_message_labels; diff --git a/include/holoscan/core/parameter.hpp b/include/holoscan/core/parameter.hpp index 5672fb89..c1ca7289 100644 --- a/include/holoscan/core/parameter.hpp +++ b/include/holoscan/core/parameter.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -210,9 +210,7 @@ class MetaParameter { * * @return The reference to the optional value of the parameter. */ - std::optional& try_get() { - return value_; - } + std::optional& try_get() { return value_; } /** * @brief Provides a pointer to the object managed by the shared pointer pointed to by the diff --git a/include/holoscan/core/resource.hpp b/include/holoscan/core/resource.hpp index 63b3b4bd..5c121ad3 100644 --- a/include/holoscan/core/resource.hpp +++ b/include/holoscan/core/resource.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -82,6 +82,11 @@ namespace holoscan { +// Forward declarations +class NetworkContext; +class Scheduler; +class Operator; + /** * @brief Base class for all resources. * @@ -202,8 +207,14 @@ class Resource : public Component { YAML::Node to_yaml_node() const override; protected: + // Add friend classes that can call reset_graph_entites + friend class holoscan::NetworkContext; + friend class holoscan::Scheduler; + friend class holoscan::Operator; + + using Component::reset_graph_entities; + ResourceType resource_type_ = ResourceType::kNative; ///< The type of the resource. - std::shared_ptr spec_; ///< The component specification. bool is_initialized_ = false; ///< Whether the resource is initialized. }; diff --git a/include/holoscan/core/resources/gxf/allocator.hpp b/include/holoscan/core/resources/gxf/allocator.hpp index d6ce00be..91b1ec6f 100644 --- a/include/holoscan/core/resources/gxf/allocator.hpp +++ b/include/holoscan/core/resources/gxf/allocator.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -47,6 +47,11 @@ class Allocator : public gxf::GXFResource { virtual nvidia::byte* allocate(uint64_t size, MemoryStorageType type); virtual void free(nvidia::byte* pointer); + + // Get the block size of this allocator, defaults to 1 for byte-based allocators + uint64_t block_size(); + + nvidia::gxf::Allocator* get() const; }; } // namespace holoscan diff --git a/include/holoscan/core/resources/gxf/block_memory_pool.hpp b/include/holoscan/core/resources/gxf/block_memory_pool.hpp index 63500f97..42d208af 100644 --- a/include/holoscan/core/resources/gxf/block_memory_pool.hpp +++ b/include/holoscan/core/resources/gxf/block_memory_pool.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -49,6 +49,14 @@ class BlockMemoryPool : public Allocator { void setup(ComponentSpec& spec) override; + // Returns the storage type of the memory blocks + nvidia::gxf::MemoryStorageType storage_type() const; + + // Returns the total number of blocks + uint64_t num_blocks() const; + + nvidia::gxf::BlockMemoryPool* get() const; + private: Parameter storage_type_; Parameter block_size_; diff --git a/include/holoscan/core/resources/gxf/clock.hpp b/include/holoscan/core/resources/gxf/clock.hpp index 0a9f3ced..ee1e8b4c 100644 --- a/include/holoscan/core/resources/gxf/clock.hpp +++ b/include/holoscan/core/resources/gxf/clock.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -55,13 +55,14 @@ class Clock : public gxf::GXFResource { */ template void sleep_for(std::chrono::duration duration) { - int64_t duration_ns = - std::chrono::duration_cast(duration).count(); + int64_t duration_ns = std::chrono::duration_cast(duration).count(); sleep_for(duration_ns); } /// @brief Waits until the given target time virtual void sleep_until(int64_t target_time_ns) = 0; + + nvidia::gxf::Clock* get() const; }; } // namespace holoscan diff --git a/include/holoscan/core/resources/gxf/cuda_stream_pool.hpp b/include/holoscan/core/resources/gxf/cuda_stream_pool.hpp index 4b26b8e3..50ee770f 100644 --- a/include/holoscan/core/resources/gxf/cuda_stream_pool.hpp +++ b/include/holoscan/core/resources/gxf/cuda_stream_pool.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -48,6 +48,8 @@ class CudaStreamPool : public Allocator { void setup(ComponentSpec& spec) override; + nvidia::gxf::CudaStreamPool* get() const; + private: Parameter dev_id_; Parameter stream_flags_; diff --git a/include/holoscan/core/resources/gxf/double_buffer_receiver.hpp b/include/holoscan/core/resources/gxf/double_buffer_receiver.hpp index 60d6b6ca..0ef1cfd7 100644 --- a/include/holoscan/core/resources/gxf/double_buffer_receiver.hpp +++ b/include/holoscan/core/resources/gxf/double_buffer_receiver.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -53,6 +53,8 @@ class DoubleBufferReceiver : public Receiver { */ void track(); + nvidia::gxf::DoubleBufferReceiver* get() const; + Parameter capacity_; Parameter policy_; diff --git a/include/holoscan/core/resources/gxf/double_buffer_transmitter.hpp b/include/holoscan/core/resources/gxf/double_buffer_transmitter.hpp index 0bf6af43..1ebbb908 100644 --- a/include/holoscan/core/resources/gxf/double_buffer_transmitter.hpp +++ b/include/holoscan/core/resources/gxf/double_buffer_transmitter.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -53,6 +53,8 @@ class DoubleBufferTransmitter : public Transmitter { */ void track(); + nvidia::gxf::DoubleBufferTransmitter* get() const; + Parameter capacity_; Parameter policy_; diff --git a/include/holoscan/core/resources/gxf/manual_clock.hpp b/include/holoscan/core/resources/gxf/manual_clock.hpp index 393d27a6..17aa4d40 100644 --- a/include/holoscan/core/resources/gxf/manual_clock.hpp +++ b/include/holoscan/core/resources/gxf/manual_clock.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -54,6 +54,8 @@ class ManualClock : public Clock { /// @brief Waits until the given target time void sleep_until(int64_t target_time_ns) override; + nvidia::gxf::ManualClock* get() const; + private: Parameter initial_timestamp_; }; diff --git a/include/holoscan/core/resources/gxf/realtime_clock.hpp b/include/holoscan/core/resources/gxf/realtime_clock.hpp index 496a9320..3339f1df 100644 --- a/include/holoscan/core/resources/gxf/realtime_clock.hpp +++ b/include/holoscan/core/resources/gxf/realtime_clock.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -60,6 +60,8 @@ class RealtimeClock : public Clock { */ void set_time_scale(double time_scale); + nvidia::gxf::RealtimeClock* get() const; + private: Parameter initial_time_offset_; Parameter initial_time_scale_; diff --git a/include/holoscan/core/resources/gxf/receiver.hpp b/include/holoscan/core/resources/gxf/receiver.hpp index d15a1a0d..a4590764 100644 --- a/include/holoscan/core/resources/gxf/receiver.hpp +++ b/include/holoscan/core/resources/gxf/receiver.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -38,6 +38,8 @@ class Receiver : public gxf::GXFResource { Receiver(const std::string& name, nvidia::gxf::Receiver* component); const char* gxf_typename() const override { return "nvidia::gxf::Receiver"; } + + nvidia::gxf::Receiver* get() const; }; } // namespace holoscan diff --git a/include/holoscan/core/resources/gxf/serialization_buffer.hpp b/include/holoscan/core/resources/gxf/serialization_buffer.hpp index c2ec1674..f137cfa3 100644 --- a/include/holoscan/core/resources/gxf/serialization_buffer.hpp +++ b/include/holoscan/core/resources/gxf/serialization_buffer.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,11 +22,11 @@ #include #include +#include + #include "../../gxf/gxf_resource.hpp" #include "./unbounded_allocator.hpp" -#include - namespace holoscan { constexpr size_t kDefaultSerializationBufferSize = 1 << 12; // 4 kB @@ -49,6 +49,8 @@ class SerializationBuffer : public gxf::GXFResource { void initialize() override; + nvidia::gxf::SerializationBuffer* get() const; + private: Parameter> allocator_; Parameter buffer_size_; diff --git a/include/holoscan/core/resources/gxf/std_component_serializer.hpp b/include/holoscan/core/resources/gxf/std_component_serializer.hpp index 3460ccc4..72a3cf5a 100644 --- a/include/holoscan/core/resources/gxf/std_component_serializer.hpp +++ b/include/holoscan/core/resources/gxf/std_component_serializer.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,6 +21,9 @@ #include #include +// TODO: provide get() method once upstream issue with missing GXF header is resolved +// #include + #include "../../gxf/gxf_resource.hpp" #include "./unbounded_allocator.hpp" @@ -42,6 +45,8 @@ class StdComponentSerializer : public gxf::GXFResource { void initialize() override; + // nvidia::gxf::StdComponentSerializer* get() const; + private: Parameter> allocator_; }; diff --git a/include/holoscan/core/resources/gxf/video_stream_serializer.hpp b/include/holoscan/core/resources/gxf/std_entity_serializer.hpp similarity index 55% rename from include/holoscan/core/resources/gxf/video_stream_serializer.hpp rename to include/holoscan/core/resources/gxf/std_entity_serializer.hpp index d4dfe17f..79212c31 100644 --- a/include/holoscan/core/resources/gxf/video_stream_serializer.hpp +++ b/include/holoscan/core/resources/gxf/std_entity_serializer.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,39 +15,41 @@ * limitations under the License. */ -#ifndef HOLOSCAN_CORE_RESOURCES_GXF_VIDEO_STREAM_SERIALIZER_HPP -#define HOLOSCAN_CORE_RESOURCES_GXF_VIDEO_STREAM_SERIALIZER_HPP +#ifndef HOLOSCAN_CORE_RESOURCES_GXF_STD_ENTITY_SERIALIZER_HPP +#define HOLOSCAN_CORE_RESOURCES_GXF_STD_ENTITY_SERIALIZER_HPP #include #include +#include + #include "../../gxf/gxf_resource.hpp" namespace holoscan { /** - * @brief Video stream entity serializer. + * @brief Standard GXF entity serializer. * - * Used by VideoStreamReplayerOp to deserialize video streams and by VideoStreamRecorderOp to - * serialize video streams. + * The VideoStreamSerializer entity serializer uses this as its entity serializer. */ -class VideoStreamSerializer : public gxf::GXFResource { +class StdEntitySerializer : public gxf::GXFResource { public: - HOLOSCAN_RESOURCE_FORWARD_ARGS_SUPER(VideoStreamSerializer, GXFResource) - VideoStreamSerializer() = default; + HOLOSCAN_RESOURCE_FORWARD_ARGS_SUPER(StdEntitySerializer, GXFResource) + StdEntitySerializer() = default; - const char* gxf_typename() const override { - return "nvidia::holoscan::stream_playback::VideoStreamSerializer"; - } + const char* gxf_typename() const override { return "nvidia::gxf::StdEntitySerializer"; } void setup(ComponentSpec& spec) override; void initialize() override; + nvidia::gxf::StdEntitySerializer* get() const; + private: Parameter>> component_serializers_; + Parameter verbose_warning_; }; } // namespace holoscan -#endif /* HOLOSCAN_CORE_RESOURCES_GXF_VIDEO_STREAM_SERIALIZER_HPP */ +#endif /* HOLOSCAN_CORE_RESOURCES_GXF_STD_ENTITY_SERIALIZER_HPP */ diff --git a/include/holoscan/core/resources/gxf/transmitter.hpp b/include/holoscan/core/resources/gxf/transmitter.hpp index 3b4b749b..4ec0bbd5 100644 --- a/include/holoscan/core/resources/gxf/transmitter.hpp +++ b/include/holoscan/core/resources/gxf/transmitter.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -38,6 +38,8 @@ class Transmitter : public gxf::GXFResource { Transmitter(const std::string& name, nvidia::gxf::Transmitter* component); const char* gxf_typename() const override { return "nvidia::gxf::Transmitter"; } + + nvidia::gxf::Transmitter* get() const; }; } // namespace holoscan diff --git a/include/holoscan/core/resources/gxf/ucx_component_serializer.hpp b/include/holoscan/core/resources/gxf/ucx_component_serializer.hpp index b951f072..0b8fb205 100644 --- a/include/holoscan/core/resources/gxf/ucx_component_serializer.hpp +++ b/include/holoscan/core/resources/gxf/ucx_component_serializer.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,6 +21,8 @@ #include #include +#include + #include "../../gxf/gxf_resource.hpp" #include "./allocator.hpp" @@ -44,6 +46,8 @@ class UcxComponentSerializer : public gxf::GXFResource { void initialize() override; + nvidia::gxf::UcxComponentSerializer* get() const; + private: Parameter> allocator_; }; diff --git a/include/holoscan/core/resources/gxf/ucx_entity_serializer.hpp b/include/holoscan/core/resources/gxf/ucx_entity_serializer.hpp index 14c2fd6f..efed2807 100644 --- a/include/holoscan/core/resources/gxf/ucx_entity_serializer.hpp +++ b/include/holoscan/core/resources/gxf/ucx_entity_serializer.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,6 +21,8 @@ #include #include +#include + #include "../../gxf/gxf_resource.hpp" namespace holoscan { @@ -41,6 +43,8 @@ class UcxEntitySerializer : public gxf::GXFResource { void initialize() override; + nvidia::gxf::UcxEntitySerializer* get() const; + private: Parameter>> component_serializers_; Parameter verbose_warning_; diff --git a/include/holoscan/core/resources/gxf/ucx_holoscan_component_serializer.hpp b/include/holoscan/core/resources/gxf/ucx_holoscan_component_serializer.hpp index 0b789547..27cf301d 100644 --- a/include/holoscan/core/resources/gxf/ucx_holoscan_component_serializer.hpp +++ b/include/holoscan/core/resources/gxf/ucx_holoscan_component_serializer.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -29,9 +29,9 @@ namespace holoscan { /** * @brief UCX-based Holoscan component serializer. * - * Used by UcxEntitySerializer to serialize and deserialize Holoscan SDK classes - * holoscan::Message and holoscan::Tensor (via holoscan::gxf::GXFTensor). See the - * CodecRegistry class for adding serialization codecs for additional holoscan::Message types. + * Used by UcxEntitySerializer to serialize and deserialize Holoscan SDK class holoscan::Message. + * See the CodecRegistry class for adding serialization codecs for additional holoscan::Message + * types. */ class UcxHoloscanComponentSerializer : public gxf::GXFResource { public: diff --git a/include/holoscan/core/resources/gxf/ucx_receiver.hpp b/include/holoscan/core/resources/gxf/ucx_receiver.hpp index f7f38730..c0672239 100644 --- a/include/holoscan/core/resources/gxf/ucx_receiver.hpp +++ b/include/holoscan/core/resources/gxf/ucx_receiver.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,6 +21,8 @@ #include #include +#include + #include "./receiver.hpp" #include "holoscan/core/resources/gxf/ucx_serialization_buffer.hpp" @@ -54,10 +56,12 @@ class UcxReceiver : public Receiver { Parameter capacity_; Parameter policy_; + nvidia::gxf::UcxReceiver* get() const; + private: Parameter address_; Parameter port_; - Parameter> buffer_; + Parameter> buffer_; // TODO: support GPUDevice nvidia::gxf::Resource // nvidia::gxf::Resource> gpu_device_; }; diff --git a/include/holoscan/core/resources/gxf/ucx_serialization_buffer.hpp b/include/holoscan/core/resources/gxf/ucx_serialization_buffer.hpp index 713fa497..1e5174d6 100644 --- a/include/holoscan/core/resources/gxf/ucx_serialization_buffer.hpp +++ b/include/holoscan/core/resources/gxf/ucx_serialization_buffer.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,6 +22,8 @@ #include #include +#include + #include "../../gxf/gxf_resource.hpp" #include "./serialization_buffer.hpp" #include "./unbounded_allocator.hpp" @@ -45,11 +47,11 @@ constexpr size_t kDefaultUcxSerializationBufferSize = 7168; // 7 kB * All non-tensor entities get serialized to this buffer, which will be transmitted in an * active message header by UcxTransmitter. */ -class UcxSerializationBuffer : public SerializationBuffer { +class UcxSerializationBuffer : public gxf::GXFResource { public: - HOLOSCAN_RESOURCE_FORWARD_ARGS_SUPER(UcxSerializationBuffer, SerializationBuffer) + HOLOSCAN_RESOURCE_FORWARD_ARGS_SUPER(UcxSerializationBuffer, GXFResource) UcxSerializationBuffer() = default; - UcxSerializationBuffer(const std::string& name, nvidia::gxf::SerializationBuffer* component); + UcxSerializationBuffer(const std::string& name, nvidia::gxf::UcxSerializationBuffer* component); const char* gxf_typename() const override { return "nvidia::gxf::UcxSerializationBuffer"; } @@ -57,6 +59,8 @@ class UcxSerializationBuffer : public SerializationBuffer { void initialize() override; + nvidia::gxf::UcxSerializationBuffer* get() const; + private: Parameter> allocator_; Parameter buffer_size_; diff --git a/include/holoscan/core/resources/gxf/ucx_transmitter.hpp b/include/holoscan/core/resources/gxf/ucx_transmitter.hpp index 141268a4..f3b62c60 100644 --- a/include/holoscan/core/resources/gxf/ucx_transmitter.hpp +++ b/include/holoscan/core/resources/gxf/ucx_transmitter.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,6 +21,8 @@ #include #include +#include + #include "./transmitter.hpp" #include "holoscan/core/resources/gxf/ucx_serialization_buffer.hpp" @@ -66,13 +68,15 @@ class UcxTransmitter : public Transmitter { Parameter capacity_; Parameter policy_; + nvidia::gxf::UcxTransmitter* get() const; + private: Parameter receiver_address_; Parameter local_address_; Parameter port_; Parameter local_port_; Parameter maximum_connection_retries_; - Parameter> buffer_; + Parameter> buffer_; // TODO: support GPUDevice nvidia::gxf::Resource // nvidia::gxf::Resource> gpu_device_; }; diff --git a/include/holoscan/core/scheduler.hpp b/include/holoscan/core/scheduler.hpp index 8d68bb4f..f4c5db1e 100644 --- a/include/holoscan/core/scheduler.hpp +++ b/include/holoscan/core/scheduler.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -75,13 +75,13 @@ namespace holoscan { -enum class SchedulerType { kDefault, kGreedy, kMultiThread }; +enum class SchedulerType { kDefault, kGreedy, kMultiThread, kEventBased }; /** * @brief Base class for all schedulers. * - * This class is the base class for all schedulers including `holoscan::MultiThreadScheduler` and - * `holoscan::GreedyScheduler`. + * This class is the base class for all schedulers including `holoscan::MultiThreadScheduler`, + * `holoscan::GreedyScheduler` and `holoscan::EventBasedScheduler`. * It is used to define the common interface for all schedulers. */ class Scheduler : public Component { @@ -239,7 +239,8 @@ class Scheduler : public Component { YAML::Node to_yaml_node() const override; protected: - std::shared_ptr spec_; ///< The component specification. + /// Reset the GXF GraphEntity of any components associated with the scheduler + void reset_graph_entities() override; std::unordered_map> resources_; ///< The resources used by the scheduler. diff --git a/include/holoscan/core/schedulers/gxf/event_based_scheduler.hpp b/include/holoscan/core/schedulers/gxf/event_based_scheduler.hpp new file mode 100644 index 00000000..63bd1b2d --- /dev/null +++ b/include/holoscan/core/schedulers/gxf/event_based_scheduler.hpp @@ -0,0 +1,65 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef HOLOSCAN_CORE_SCHEDULERS_GXF_EVENT_BASED_SCHEDULER_HPP +#define HOLOSCAN_CORE_SCHEDULERS_GXF_EVENT_BASED_SCHEDULER_HPP + +#include +#include +#include + +#include +#include "../../gxf/gxf_scheduler.hpp" +#include "../../resources/gxf/clock.hpp" +#include "../../resources/gxf/realtime_clock.hpp" + +namespace holoscan { + +class EventBasedScheduler : public gxf::GXFScheduler { + public: + HOLOSCAN_SCHEDULER_FORWARD_ARGS_SUPER(EventBasedScheduler, gxf::GXFScheduler) + EventBasedScheduler() = default; + + const char* gxf_typename() const override { return "nvidia::gxf::EventBasedScheduler"; } + + std::shared_ptr clock() override { return clock_.get(); } + + void setup(ComponentSpec& spec) override; + void initialize() override; + + // Parameter getters used for printing scheduler description (e.g. for Python __repr__) + int64_t worker_thread_number() { return worker_thread_number_; } + bool stop_on_deadlock() { return stop_on_deadlock_; } + int64_t stop_on_deadlock_timeout() { return stop_on_deadlock_timeout_; } + // could return std::optional, but just using int64_t simplifies the Python bindings + int64_t max_duration_ms() { return max_duration_ms_.has_value() ? max_duration_ms_.get() : -1; } + + nvidia::gxf::EventBasedScheduler* get() const; + + private: + Parameter> clock_; + Parameter worker_thread_number_; + Parameter stop_on_deadlock_; + Parameter max_duration_ms_; + Parameter stop_on_deadlock_timeout_; // in ms + // The following parameter needs to wait on ThreadPool support + // Parameter thread_pool_allocation_auto_; +}; + +} // namespace holoscan + +#endif /* HOLOSCAN_CORE_SCHEDULERS_GXF_EVENT_BASED_SCHEDULER_HPP */ diff --git a/include/holoscan/core/schedulers/gxf/greedy_scheduler.hpp b/include/holoscan/core/schedulers/gxf/greedy_scheduler.hpp index 64fa39ae..b547dcf2 100644 --- a/include/holoscan/core/schedulers/gxf/greedy_scheduler.hpp +++ b/include/holoscan/core/schedulers/gxf/greedy_scheduler.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,6 +22,7 @@ #include #include +#include #include "../../gxf/gxf_scheduler.hpp" #include "../../resources/gxf/clock.hpp" #include "../../resources/gxf/realtime_clock.hpp" @@ -47,6 +48,8 @@ class GreedyScheduler : public gxf::GXFScheduler { // could return std::optional, but just using int64_t simplifies the Python bindings int64_t max_duration_ms() { return max_duration_ms_.has_value() ? max_duration_ms_.get() : -1; } + nvidia::gxf::GreedyScheduler* get() const; + private: Parameter> clock_; Parameter stop_on_deadlock_; diff --git a/include/holoscan/core/schedulers/gxf/multithread_scheduler.hpp b/include/holoscan/core/schedulers/gxf/multithread_scheduler.hpp index d0696626..f299ac80 100644 --- a/include/holoscan/core/schedulers/gxf/multithread_scheduler.hpp +++ b/include/holoscan/core/schedulers/gxf/multithread_scheduler.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,6 +22,7 @@ #include #include +#include #include "../../gxf/gxf_scheduler.hpp" #include "../../resources/gxf/clock.hpp" #include "../../resources/gxf/realtime_clock.hpp" @@ -48,6 +49,8 @@ class MultiThreadScheduler : public gxf::GXFScheduler { // could return std::optional, but just using int64_t simplifies the Python bindings int64_t max_duration_ms() { return max_duration_ms_.has_value() ? max_duration_ms_.get() : -1; } + nvidia::gxf::MultiThreadScheduler* get() const; + private: Parameter> clock_; Parameter worker_thread_number_; diff --git a/include/holoscan/core/services/common/forward_op.hpp b/include/holoscan/core/services/common/forward_op.hpp index f0742627..af162a0f 100644 --- a/include/holoscan/core/services/common/forward_op.hpp +++ b/include/holoscan/core/services/common/forward_op.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -34,6 +34,16 @@ namespace holoscan::ops { * * The ForwardOp receives data (from the 'in' input port) and forwards it to the next * operator (through the 'out' output port). + * + * ==Named Inputs== + * + * - **in** : gxf::Entity + * - The input data to forward. + * + * ==Named Outputs== + * + * - **out** : gxf::Entity + * - The forwarded data. */ class ForwardOp : public holoscan::Operator { public: diff --git a/include/holoscan/holoscan.hpp b/include/holoscan/holoscan.hpp index 1a51850d..2042e645 100644 --- a/include/holoscan/holoscan.hpp +++ b/include/holoscan/holoscan.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -60,6 +60,7 @@ #include "./core/resources/gxf/cuda_stream_pool.hpp" #include "./core/resources/gxf/serialization_buffer.hpp" #include "./core/resources/gxf/std_component_serializer.hpp" +#include "./core/resources/gxf/std_entity_serializer.hpp" #include "./core/resources/gxf/unbounded_allocator.hpp" #include "./core/resources/gxf/ucx_component_serializer.hpp" #include "./core/resources/gxf/ucx_entity_serializer.hpp" @@ -67,9 +68,9 @@ #include "./core/resources/gxf/ucx_receiver.hpp" #include "./core/resources/gxf/ucx_serialization_buffer.hpp" #include "./core/resources/gxf/ucx_transmitter.hpp" -#include "./core/resources/gxf/video_stream_serializer.hpp" // Schedulers +#include "./core/schedulers/gxf/event_based_scheduler.hpp" #include "./core/schedulers/gxf/greedy_scheduler.hpp" #include "./core/schedulers/gxf/multithread_scheduler.hpp" diff --git a/include/holoscan/logger/logger.hpp b/include/holoscan/logger/logger.hpp index 12e37a8b..aad1f9a2 100644 --- a/include/holoscan/logger/logger.hpp +++ b/include/holoscan/logger/logger.hpp @@ -159,15 +159,6 @@ class Logger { static void set_pattern(std::string pattern = "", bool* is_overridden_by_env = nullptr); static std::string& pattern(); - static bool should_backtrace(); - static void disable_backtrace(); - static void enable_backtrace(size_t n_messages); - static void dump_backtrace(); - - static void flush(); - static LogLevel flush_level(); - static void flush_on(LogLevel level); - template static void log(const char* file, int line, const char* function_name, LogLevel level, const FormatT& format, ArgsT&&... args) { diff --git a/include/holoscan/operators/aja_source/aja_source.hpp b/include/holoscan/operators/aja_source/aja_source.hpp index 1f01f3d4..bb0ee972 100644 --- a/include/holoscan/operators/aja_source/aja_source.hpp +++ b/include/holoscan/operators/aja_source/aja_source.hpp @@ -37,21 +37,40 @@ namespace holoscan::ops { /** * @brief Operator class to get the video stream from AJA capture card. * - * **Named inputs:** - * - *overlay_buffer_input*: `nvidia::gxf::VideoBuffer` (optional) - * - The operator does not require a message on this input port in order for ``compute`` to - * be called. If a message is found, and `enable_overlay` is true, the image will be - * mixed with the image captured by the AJA card. If `enable_overlay` is false, any message - * on this port will be ignored. + * ==Named Inputs== * - * **Named outputs:** - * - *video_buffer_output*: `nvidia::gxf::VideoBuffer` - * - The output video frame from the AJA capture card. If ``overlay_rdma`` is true, this - * video buffer will be on the device, otherwise it will be in pinned host memory. - * - *overlay_buffer_output*: `nvidia::gxf::VideoBuffer` (optional) - * - This output port will only emit a video buffer when ``enable_overlay`` is true. If - * ``overlay_rdma`` is true, this video buffer will be on the device, otherwise it will be - * in pinned host memory. + * - **overlay_buffer_input** : `nvidia::gxf::VideoBuffer` (optional) + * - The operator does not require a message on this input port in order for `compute` to + * be called. If a message is found, and `enable_overlay` is true, the image will be + * mixed with the image captured by the AJA card. If `enable_overlay` is false, any message + * on this port will be ignored. + * + * ==Named Outputs== + * + * - **video_buffer_output** : `nvidia::gxf::VideoBuffer` + * - The output video frame from the AJA capture card. If `overlay_rdma` is true, this + * video buffer will be on the device, otherwise it will be in pinned host memory. + * - **overlay_buffer_output** : `nvidia::gxf::VideoBuffer` (optional) + * - This output port will only emit a video buffer when `enable_overlay` is true. If + * `overlay_rdma` is true, this video buffer will be on the device, otherwise it will be + * in pinned host memory. + * + * ==Parameters== + * + * - **device**: The device to target (e.g., "0" for device 0). Optional (default: "0"). + * - **channel**: The camera `NTV2Channel` to use for output (e.g., `NTV2Channel::NTV2_CHANNEL1` + * (`0`) or "NTV2_CHANNEL1" (in YAML) for the first channel). Optional (default: + * `NTV2Channel::NTV2_CHANNEL1` in C++ or `"NTV2_CHANNEL1"` in YAML). + * - **width**: Width of the video stream. Optional (default: `1920`). + * - **height**: Height of the video stream. Optional (default: `1080`). + * - **framerate**: Frame rate of the video stream. Optional (default: `60`). + * - **rdma**: Boolean indicating whether RDMA is enabled. Optional (default: `false`). + * - **enable_overlay**: Boolean indicating whether a separate overlay channel is enabled. Optional + * (default: `false`). + * - **overlay_channel**: The camera `NTV2Channel` to use for overlay output. Optional (default: + * `NTV2Channel::NTV2_CHANNEL2` in C++ or `"NTV2_CHANNEL2"` in YAML). + * - **overlay_rdma**: Boolean indicating whether RDMA is enabled for the overlay. Optional + * (default: `true`). */ class AJASourceOp : public holoscan::Operator { public: diff --git a/include/holoscan/operators/async_ping_rx/async_ping_rx.hpp b/include/holoscan/operators/async_ping_rx/async_ping_rx.hpp index f52194b8..44cd78fd 100644 --- a/include/holoscan/operators/async_ping_rx/async_ping_rx.hpp +++ b/include/holoscan/operators/async_ping_rx/async_ping_rx.hpp @@ -28,11 +28,18 @@ namespace holoscan::ops { /** - * @brief Simple asynchronous receiver operator + * @brief Simple asynchronous receiver operator. * - * **Named inputs:** - * - *in*: any - * - A received value. + * ==Named Inputs== + * + * - **in** : any + * - A received value. + * + * ==Parameters== + * + * - **delay**: Ping delay in ms. Optional (default: `10L`) + * - **async_condition**: AsynchronousCondition adding async support to the operator. + * Optional (default: `nullptr`) */ class AsyncPingRxOp : public Operator { public: diff --git a/include/holoscan/operators/async_ping_tx/async_ping_tx.hpp b/include/holoscan/operators/async_ping_tx/async_ping_tx.hpp index 32e95c32..b4e70b0c 100644 --- a/include/holoscan/operators/async_ping_tx/async_ping_tx.hpp +++ b/include/holoscan/operators/async_ping_tx/async_ping_tx.hpp @@ -30,10 +30,18 @@ namespace holoscan::ops { /** * @brief Simple asynchronous transmitter operator. * - * **Named outputs:** - * - *out*: int - * - An index value that increments by one on each call to `compute`. The starting value - * is 1. + * ==Named Outputs== + * + * - **out** : int + * - An index value that increments by one on each call to `compute`. The starting value + * is 1. + * + * ==Parameters== + * + * - **delay**: Ping delay in ms. Optional (default: `10L`) + * - **count**: Ping count. Optional (default: `0UL`) + * - **async_condition**: AsynchronousCondition adding async support to the operator. + * Optional (default: `nullptr`) */ class AsyncPingTxOp : public Operator { public: diff --git a/include/holoscan/operators/bayer_demosaic/bayer_demosaic.hpp b/include/holoscan/operators/bayer_demosaic/bayer_demosaic.hpp index dcc3a69f..6746b2f0 100644 --- a/include/holoscan/operators/bayer_demosaic/bayer_demosaic.hpp +++ b/include/holoscan/operators/bayer_demosaic/bayer_demosaic.hpp @@ -34,21 +34,58 @@ namespace holoscan::ops { /** * @brief Operator class to demosaic the input video stream. * - * **Named inputs:** - * - *receiver*: `nvidia::gxf::Tensor` or `nvidia::gxf::VideoBuffer` - * - The input video frame to process. If the input is a VideoBuffer it must be an 8-bit - * unsigned grayscale video (nvidia::gxf::VideoFormat::GXF_VIDEO_FORMAT_GRAY). The video - * buffer may be in either host or device memory (a host->device copy is performed if - * needed). If a video buffer is not found, the input port message is searched for a tensor - * with the name specified by `in_tensor_name`. This must be a device tensor in either - * 8-bit or 16-bit unsigned integer format. + * ==Named Inputs== * - * **Named outputs:** - * - *transmitter*: `nvidia::gxf::Tensor` - * - The output video frame after demosaicing. This will be a 3-channel RGB image if - * `alpha_value` is true, otherwise it will be a 4-channel RGBA image. The data type - * will be either 8-bit or 16-bit unsigned integer (matching the bit depth of the input). - * The name of the tensor that is output is controlled by `out_tensor_name`. + * - **receiver** : `nvidia::gxf::Tensor` or `nvidia::gxf::VideoBuffer` + * - The input video frame to process. If the input is a VideoBuffer it must be an 8-bit + * unsigned grayscale video (`nvidia::gxf::VideoFormat::GXF_VIDEO_FORMAT_GRAY`). The video + * buffer may be in either host or device memory (a host->device copy is performed if + * needed). If a video buffer is not found, the input port message is searched for a tensor + * with the name specified by `in_tensor_name`. This must be a device tensor in either + * 8-bit or 16-bit unsigned integer format. + * + * ==Named Outputs== + * + * - **transmitter** : `nvidia::gxf::Tensor` + * - The output video frame after demosaicing. This will be a 3-channel RGB image if + * `alpha_value` is true, otherwise it will be a 4-channel RGBA image. The data type + * will be either 8-bit or 16-bit unsigned integer (matching the bit depth of the input). + * The name of the tensor that is output is controlled by `out_tensor_name`. + * + * ==Parameters== + * + * - **pool**: Memory pool allocator (holoscan::Allocator) used by the operator. + * - **cuda_stream_pool**: `holoscan::CudaStreamPool` instance (`std::shared_ptr`) + * to allocate CUDA streams. Optional (default: `nullptr`). + * - **in_tensor_name**: The name of the input tensor. Optional (default: `""`). + * - **out_tensor_name**: The name of the output tensor. Optional (default: `""`). + * - **interpolation_mode**: The interpolation model to be used for demosaicing. Values available + * at: + * https://docs.nvidia.com/cuda/npp/nppdefs.html?highlight=Two%20parameter%20cubic%20filter#c.NppiInterpolationMode + * - NPPI_INTER_UNDEFINED (`0`): Undefined filtering interpolation mode. + * - NPPI_INTER_NN (`1`): Nearest neighbor filtering. + * - NPPI_INTER_LINEAR (`2`): Linear interpolation. + * - NPPI_INTER_CUBIC (`4`): Cubic interpolation. + * - NPPI_INTER_CUBIC2P_BSPLINE (`5`): Two-parameter cubic filter (B=1, C=0) + * - NPPI_INTER_CUBIC2P_CATMULLROM (`6`): Two-parameter cubic filter (B=0, C=1/2) + * - NPPI_INTER_CUBIC2P_B05C03 (`7`): Two-parameter cubic filter (B=1/2, C=3/10) + * - NPPI_INTER_SUPER (`8`): Super sampling. + * - NPPI_INTER_LANCZOS (`16`): Lanczos filtering. + * - NPPI_INTER_LANCZOS3_ADVANCED (`17`): Generic Lanczos filtering with order 3. + * - NPPI_SMOOTH_EDGE (`0x8000000`): Smooth edge filtering. + * + * Optional (default: `0`). + * - **bayer_grid_pos**: The Bayer grid position. Values available at: + * https://docs.nvidia.com/cuda/npp/nppdefs.html?highlight=Two%20parameter%20cubic%20filter#c.NppiBayerGridPosition + * - NPPI_BAYER_BGGR (`0`): Default registration position BGGR. + * - NPPI_BAYER_RGGB (`1`): Registration position RGGB. + * - NPPI_BAYER_GBRG (`2`): Registration position GBRG. + * - NPPI_BAYER_GRBG (`3`): Registration position GRBG. + * + * Optional (default: `2`). + * - **generate_alpha**: Generate alpha channel. Optional (default: `false`). + * - **alpha_value**: Alpha value to be generated if `generate_alpha` is set to `true`. Optional + * (default: `255`). */ class BayerDemosaicOp : public holoscan::Operator { public: diff --git a/include/holoscan/operators/format_converter/format_converter.hpp b/include/holoscan/operators/format_converter/format_converter.hpp index 91f686e1..d23f4458 100644 --- a/include/holoscan/operators/format_converter/format_converter.hpp +++ b/include/holoscan/operators/format_converter/format_converter.hpp @@ -51,23 +51,74 @@ enum class FormatConversionType { /** * @brief Operator class to convert the data format of the input data. * - * **Named inputs:** - * - *source_video*: `nvidia::gxf::Tensor` or `nvidia::gxf::VideoBuffer` - * - The input video frame to process. If the input is a VideoBuffer it must be in format - * GXF_VIDEO_FORMAT_RGBA, GXF_VIDEO_FORMAT_RGB or GXF_VIDEO_FORMAT_NV12. This video - * buffer may be in either host or device memory (a host->device copy is performed if - * needed). If a video buffer is not found, the input port message is searched for a tensor - * with the name specified by `in_tensor_name`. This must be a device tensor in one of - * several supported formats (unsigned 8-bit int or float32 graycale, unsigned 8-bit int - * RGB or RGBA YUV420 or NV12). + * ==Named Inputs== * - * **Named outputs:** - * - *tensor*: `nvidia::gxf::Tensor` - * - The output video frame after processing. The shape, data type and number of channels - * of this output tensor will depend on the specific parameters that were set for this - * operator. The name of the Tensor transmitted on this port is determined by - * `out_tensor_name`. + * - **source_video** : `nvidia::gxf::Tensor` or `nvidia::gxf::VideoBuffer` + * - The input video frame to process. If the input is a VideoBuffer it must be in format + * GXF_VIDEO_FORMAT_RGBA, GXF_VIDEO_FORMAT_RGB or GXF_VIDEO_FORMAT_NV12. This video + * buffer may be in either host or device memory (a host->device copy is performed if + * needed). If a video buffer is not found, the input port message is searched for a tensor + * with the name specified by `in_tensor_name`. This must be a device tensor in one of + * several supported formats (unsigned 8-bit int or float32 graycale, unsigned 8-bit int + * RGB or RGBA YUV420 or NV12). * + * ==Named Outputs== + * + * - **tensor** : `nvidia::gxf::Tensor` + * - The output video frame after processing. The shape, data type and number of channels + * of this output tensor will depend on the specific parameters that were set for this + * operator. The name of the Tensor transmitted on this port is determined by + * `out_tensor_name`. + * + * ==Parameters== + * + * - **pool**: Memory pool allocator (holoscan::Allocator) used by the operator. + * - **out_dtype**: Destination data type. The available options are: + * - `"rgb888"` + * - `"uint8"` + * - `"float32"` + * - `"rgba8888"` + * - `"yuv420"` + * - `"nv12"` + * - **in_dtype**: Source data type. The available options are: + * - `"rgb888"` + * - `"uint8"` + * - `"float32"` + * - `"rgba8888"` + * - `"yuv420"` + * - `"nv12"` + * Optional (default: `"rgb888"`). + * - **in_tensor_name**: The name of the input tensor. Optional (default: `""`). + * - **out_tensor_name**: The name of the output tensor. Optional (default: `""`). + * - **scale_min**: Output will be clipped to this minimum value. Optional (default: `0.0`). + * - **scale_max**: Output will be clipped to this maximum value. Optional (default: `1.0`). + * - **alpha_value**: Unsigned integer in range [0, 255], indicating the alpha channel value to use + * when converting from RGB to RGBA. Optional (default: `255`). + * - **resize_height**: Desired height for the (resized) output. Height will be unchanged if + * `resize_height` is `0`. Optional (default: `0`). + * - **resize_width**: Desired width for the (resized) output. Width will be unchanged if + * `resize_width` is `0`. Optional (default: `0`). + * - **resize_mode**: Resize mode enum value corresponding to NPP's NppiInterpolationMode. + * Values available at: + * https://docs.nvidia.com/cuda/npp/nppdefs.html?highlight=Two%20parameter%20cubic%20filter#c.NppiInterpolationMode + * - NPPI_INTER_UNDEFINED (`0`): Undefined filtering interpolation mode. + * - NPPI_INTER_NN (`1`): Nearest neighbor filtering. + * - NPPI_INTER_LINEAR (`2`): Linear interpolation. + * - NPPI_INTER_CUBIC (`4`): Cubic interpolation. + * - NPPI_INTER_CUBIC2P_BSPLINE (`5`): Two-parameter cubic filter (B=1, C=0) + * - NPPI_INTER_CUBIC2P_CATMULLROM (`6`): Two-parameter cubic filter (B=0, C=1/2) + * - NPPI_INTER_CUBIC2P_B05C03 (`7`): Two-parameter cubic filter (B=1/2, C=3/10) + * - NPPI_INTER_SUPER (`8`): Super sampling. + * - NPPI_INTER_LANCZOS (`16`): Lanczos filtering. + * - NPPI_INTER_LANCZOS3_ADVANCED (`17`): Generic Lanczos filtering with order 3. + * - NPPI_SMOOTH_EDGE (`0x8000000`): Smooth edge filtering. + * + * Optional (default: `0`). The default value `0` (NPPI_INTER_UNDEFINED) which would be + * equivalent to `4` (NPPI_INTER_CUBIC). + * - **channel_order**: Sequence of integers describing how channel values are permuted. + * Optional (default: `[0, 1, 2]` for 3-channel images and `[0, 1, 2, 3]` for 4-channel images). + * - **cuda_stream_pool**: `holoscan::CudaStreamPool` instance to allocate CUDA streams. + * Optional (default: `nullptr`). */ class FormatConverterOp : public holoscan::Operator { public: diff --git a/include/holoscan/operators/holoviz/holoviz.hpp b/include/holoscan/operators/holoviz/holoviz.hpp index 77fd7445..4543b1c7 100644 --- a/include/holoscan/operators/holoviz/holoviz.hpp +++ b/include/holoscan/operators/holoviz/holoviz.hpp @@ -52,147 +52,151 @@ struct BufferInfo; * can be set at creation time using the `tensors` parameter or at runtime when passing input * specifications to the `input_specs` port. * - * **Named inputs:** - * - *receivers*: multi-receiver accepting `nvidia::gxf::Tensor` and/or - * `nvidia::gxf::VideoBuffer` - * - Any number of upstream ports may be connected to this `receivers` port. This port can - * accept either VideoBuffers or Tensors. These inputs can be in either host or device - * memory. Each tensor or video buffer will result in a layer. The operator autodetects the - * layer type for certain input types (e.g. a video buffer will result in an image layer). For - * other input types or more complex use cases, input specifications can be provided either at - * initialization time as a parameter or dynamically at run time (via `input_specs`). On each - * call to `compute`, tensors corresponding to all names specified in the `tensors` parameter - * must be found or an exception will be raised. Any extra, named tensors not present in the - * `tensors` parameter specification (or optional, dynamic `input_specs` input) will be - * ignored. - * - *input_specs*: `std::vector` (optional) - * - A list of `InputSpec` objects. This port can be used to dynamically update the overlay - * specification at run time. No inputs are required on this port in order for the operator - * to `compute`. - * - *render_buffer_input*: `nvidia::gxf::VideoBuffer` (optional) - * - An empty render buffer can optionally be provided. The video buffer must have format - * GXF_VIDEO_FORMAT_RGBA and be in device memory. This input port only exists if - * `enable_render_buffer_input` was set to true, in which case `compute` will only be - * called when a message arrives on this input. + * ==Named Inputs== * - * **Named outputs:** - * - *render_buffer_output*: `nvidia::gxf::VideoBuffer` (optional) - * - Output for a filled render buffer. If an input render buffer is specified, it is using - * that one, else it allocates a new buffer. The video buffer will have format - * GXF_VIDEO_FORMAT_RGBA and will be in device memory. This output is useful for offline - * rendering or headless mode. This output port only exists if `enable_render_buffer_output` - * was set to true. - * - *camera_pose_output*: `std::array` (optional) - * - The camera pose. The parameters returned represent the values of a 4x4 row major - * projection matrix. This output port only exists if `enable_camera_pose_output` was set to - * true. + * - **receivers** : multi-receiver accepting `nvidia::gxf::Tensor` and/or + * `nvidia::gxf::VideoBuffer` + * - Any number of upstream ports may be connected to this `receivers` port. This port can + * accept either VideoBuffers or Tensors. These inputs can be in either host or device + * memory. Each tensor or video buffer will result in a layer. The operator autodetects the + * layer type for certain input types (e.g. a video buffer will result in an image layer). + * For other input types or more complex use cases, input specifications can be provided + * either at initialization time as a parameter or dynamically at run time (via + * `input_specs`). On each call to `compute`, tensors corresponding to all names specified + * in the `tensors` parameter must be found or an exception will be raised. Any extra, + * named tensors not present in the `tensors` parameter specification (or optional, dynamic + * `input_specs` input) will be ignored. + * - **input_specs** : `std::vector` (optional) + * - A list of `InputSpec` objects. This port can be used to dynamically update the overlay + * specification at run time. No inputs are required on this port in order for the operator + * to `compute`. + * - **render_buffer_input** : `nvidia::gxf::VideoBuffer` (optional) + * - An empty render buffer can optionally be provided. The video buffer must have format + * GXF_VIDEO_FORMAT_RGBA and be in device memory. This input port only exists if + * `enable_render_buffer_input` was set to true, in which case `compute` will only be + * called when a message arrives on this input. * - * 1. Parameters + * ==Named Outputs== * - * - **`receivers`**: List of input queues to component accepting `gxf::Tensor` or - * `gxf::VideoBuffer` - * - type: `std::vector>` - * - **`enable_render_buffer_input`**: Enable `render_buffer_input`, (default: `false`) - * - type: `bool` - * - **`render_buffer_input`**: Input for an empty render buffer, type `gxf::VideoBuffer` - * - type: `gxf::Handle` - * - **`enable_render_buffer_output`**: Enable `render_buffer_output`, (default: `false`) - * - type: `bool` - * - **`render_buffer_output`**: Output for a filled render buffer. If an input render buffer is - * specified at `render_buffer_input` it uses that one, otherwise it allocates a new buffer. - * - type: `gxf::Handle` - * - **`enable_camera_pose_output`**: Enable `camera_pose_output`, (default: `false`) - * - type: `bool` - * - **`camera_pose_output`**: Output the camera pose. The camera parameters are returned in a - * 4x4 row major projection matrix. - * - type: `std::array` - * - **`tensors`**: List of input tensor specifications (default: `[]`) - * - type: `std::vector` - * - **`name`**: name of the tensor containing the input data to display - * - type: `std::string` - * - **`type`**: input type (default `"unknown"`) - * - type: `std::string` - * - possible values: - * **`unknown`**: unknown type, the operator tries to guess the type by inspecting the - * tensor - * **`color`**: RGB or RGBA color 2d image - * **`color_lut`**: single channel 2d image, color is looked up - * **`points`**: point primitives, one coordinate (x, y) per primitive - * **`lines`**: line primitives, two coordinates (x0, y0) and (x1, y1) per primitive - * **`line_strip`**: line strip primitive, a line primitive i is defined by each - * coordinate (xi, yi) and the following (xi+1, yi+1) - * **`triangles`**: triangle primitive, three coordinates (x0, y0), (x1, y1) and (x2, y2) - * per primitive - * **`crosses`**: cross primitive, a cross is defined by the center coordinate and the - * size (xi, yi, si) - * **`rectangles`**: axis aligned rectangle primitive, each rectangle is defined by two - * coordinates (xi, yi) and (xi+1, yi+1) - * **`ovals`**: oval primitive, an oval primitive is defined by the center coordinate and - * the axis sizes (xi, yi, sxi, syi) - * **`text`**: text is defined by the top left coordinate and the size (x, y, s) per - * string, text strings are defined by InputSpec member **`text`** - * **`depth_map`**: single channel 2d array where each element represents a depth value. - * The data is rendered as a 3d object using points, lines or triangles. The color for - * the elements can be specified through `depth_map_color`. Supported format: 8-bit - * unsigned normalized format that has a single 8-bit depth component - * **`depth_map_color`**: RGBA 2d image, same size as the depth map. One color value for - * each element of the depth map grid. Supported format: 32-bit unsigned normalized - * format that has an 8-bit R component in byte 0, an 8-bit G component in byte 1, an - * 8-bit B component in byte 2, and an 8-bit A component in byte 3. - * - **`opacity`**: layer opacity, 1.0 is fully opaque, 0.0 is fully transparent (default: - * `1.0`) - * - type: `float` - * - **`priority`**: layer priority, determines the render order, layers with higher priority - * values are rendered on top of layers with lower priority values (default: `0`) - * - type: `int32_t` - * - **`color`**: RGBA color of rendered geometry (default: `[1.f, 1.f, 1.f, 1.f]`) - * - type: `std::vector` - * - **`line_width`**: line width for geometry made of lines (default: `1.0`) - * - type: `float` - * - **`point_size`**: point size for geometry made of points (default: `1.0`) - * - type: `float` - * - **`text`**: array of text strings, used when `type` is text. (default: `[]`) - * - type: `std::vector` - * - **`depth_map_render_mode`**: depth map render mode (default: `points`) - * -type `std::string` - * - possible values: - * **`points`**: render as points - * **`lines`**: render as lines - * **`triangles`**: render as triangles - * - **`color_lut`**: Color lookup table for tensors of type 'color_lut', vector of four float - * RGBA values - * - type: `std::vector>` - * - **`window_title`**: Title on window canvas (default: `Holoviz`) - * - type: `std::string` - * - **`display_name`**: In exclusive mode, name of display to use as shown with xrandr (default: - * `DP-0`) - * - type: `std::string` - * - **`width`**: Window width or display resolution width if in exclusive or fullscreen mode - * (default: `1920`) - * - type: `uint32_t` - * - **`height`**: Window height or display resolution height if in exclusive or fullscreen mode - * (default: `1080`) - * - type: `uint32_t` - * - **`framerate`**: Display framerate if in exclusive mode (default: `60`) - * - type: `uint32_t` - * - **`use_exclusive_display`**: Enable exclusive display (default: `false`) - * - type: `bool` - * - **`fullscreen`**: Enable fullscreen window (default: `false`) - * - type: `bool` - * - **`headless`**: Enable headless mode. No window is opened, the render buffer is output to - * `render_buffer_output`. (default: `false`) - * - type: `bool` - * - **`window_close_scheduling_term`**: BooleanSchedulingTerm to stop the codelet from ticking - * when the window is closed - * - type: `gxf::Handle` - * - **`allocator`**: Allocator used to allocate memory for `render_buffer_output` - * - type: `gxf::Handle` - * - **`font_path`**: File path for the font used for rendering text. - * - type: `std::string` - * - **`cuda_stream_pool`**: Instance of gxf::CudaStreamPool - * - type: `gxf::Handle` + * - **render_buffer_output** : `nvidia::gxf::VideoBuffer` (optional) + * - Output for a filled render buffer. If an input render buffer is specified, it is using + * that one, else it allocates a new buffer. The video buffer will have format + * GXF_VIDEO_FORMAT_RGBA and will be in device memory. This output is useful for offline + * rendering or headless mode. This output port only exists if `enable_render_buffer_output` + * was set to true. + * - **camera_pose_output** : `std::array` (optional) + * - The camera pose. The parameters returned represent the values of a 4x4 row major + * projection matrix. This output port only exists if `enable_camera_pose_output` was set to + * true. * - * 2. Displaying Color Images + * ==Parameters== + * + * - **receivers**: List of input queues to component accepting `gxf::Tensor` or + * `gxf::VideoBuffer`. + * - type: `std::vector>` + * - **enable_render_buffer_input**: Enable `render_buffer_input` (default: `false`) + * - type: `bool` + * - **render_buffer_input**: Input for an empty render buffer, type `gxf::VideoBuffer`. + * - type: `gxf::Handle` + * - **enable_render_buffer_output**: Enable `render_buffer_output` (default: `false`) + * - type: `bool` + * - **render_buffer_output**: Output for a filled render buffer. If an input render buffer is + * specified at `render_buffer_input` it uses that one, otherwise it allocates a new buffer. + * - type: `gxf::Handle` + * - **enable_camera_pose_output**: Enable `camera_pose_output` (default: `false`) + * - type: `bool` + * - **camera_pose_output**: Output the camera pose. The camera parameters are returned in a + * 4x4 row major projection matrix. + * - type: `std::array` + * - **tensors**: List of input tensor specifications (default: `[]`) + * - type: `std::vector` + * - **name**: name of the tensor containing the input data to display + * - type: `std::string` + * - **type**: input type (default `"unknown"`) + * - type: `std::string` + * - possible values: + * - **unknown**: unknown type, the operator tries to guess the type by inspecting the + * tensor. + * - **color**: RGB or RGBA color 2d image. + * - **color_lut**: single channel 2d image, color is looked up. + * - **points**: point primitives, one coordinate (x, y) per primitive. + * - **lines**: line primitives, two coordinates (x0, y0) and (x1, y1) per primitive. + * - **line_strip**: line strip primitive, a line primitive i is defined by each + * coordinate (xi, yi) and the following (xi+1, yi+1). + * - **triangles**: triangle primitive, three coordinates (x0, y0), (x1, y1) and (x2, y2) + * per primitive. + * - **crosses**: cross primitive, a cross is defined by the center coordinate and the + * size (xi, yi, si). + * - **rectangles**: axis aligned rectangle primitive, each rectangle is defined by two + * coordinates (xi, yi) and (xi+1, yi+1). + * - **ovals**: oval primitive, an oval primitive is defined by the center coordinate and + * the axis sizes (xi, yi, sxi, syi). + * - **text**: text is defined by the top left coordinate and the size (x, y, s) per + * string, text strings are defined by InputSpec member **text**. + * - **depth_map**: single channel 2d array where each element represents a depth value. + * The data is rendered as a 3d object using points, lines or triangles. The color for + * the elements can be specified through `depth_map_color`. Supported format: 8-bit + * unsigned normalized format that has a single 8-bit depth component. + * - **depth_map_color**: RGBA 2d image, same size as the depth map. One color value for + * each element of the depth map grid. Supported format: 32-bit unsigned normalized + * format that has an 8-bit R component in byte 0, an 8-bit G component in byte 1, an + * 8-bit B component in byte 2, and an 8-bit A component in byte 3. + * - **opacity**: layer opacity, 1.0 is fully opaque, 0.0 is fully transparent (default: + * `1.0`) + * - type: `float` + * - **priority**: layer priority, determines the render order, layers with higher priority + * values are rendered on top of layers with lower priority values (default: `0`) + * - type: `int32_t` + * - **color**: RGBA color of rendered geometry (default: `[1.f, 1.f, 1.f, 1.f]`) + * - type: `std::vector` + * - **line_width**: line width for geometry made of lines (default: `1.0`) + * - type: `float` + * - **point_size**: point size for geometry made of points (default: `1.0`) + * - type: `float` + * - **text**: array of text strings, used when `type` is text. (default: `[]`) + * - type: `std::vector` + * - **depth_map_render_mode**: depth map render mode (default: `points`) + * - type: `std::string` + * - possible values: + * - **points**: render as points + * - **lines**: render as lines + * - **triangles**: render as triangles + * - **color_lut**: Color lookup table for tensors of type 'color_lut', vector of four float + * RGBA values + * - type: `std::vector>` + * - **window_title**: Title on window canvas (default: `"Holoviz"`) + * - type: `std::string` + * - **display_name**: In exclusive mode, name of display to use as shown with xrandr (default: + * `DP-0`) + * - type: `std::string` + * - **width**: Window width or display resolution width if in exclusive or fullscreen mode + * (default: `1920`) + * - type: `uint32_t` + * - **height**: Window height or display resolution height if in exclusive or fullscreen mode + * (default: `1080`) + * - type: `uint32_t` + * - **framerate**: Display framerate if in exclusive mode (default: `60`) + * - type: `uint32_t` + * - **use_exclusive_display**: Enable exclusive display (default: `false`) + * - type: `bool` + * - **fullscreen**: Enable fullscreen window (default: `false`) + * - type: `bool` + * - **headless**: Enable headless mode. No window is opened, the render buffer is output to + * `render_buffer_output`. (default: `false`) + * - type: `bool` + * - **window_close_scheduling_term**: BooleanSchedulingTerm to stop the codelet from ticking + * when the window is closed + * - type: `gxf::Handle` + * - **allocator**: Allocator used to allocate memory for `render_buffer_output` + * - type: `gxf::Handle` + * - **font_path**: File path for the font used for rendering text (default: `""`) + * - type: `std::string` + * - **cuda_stream_pool**: Instance of gxf::CudaStreamPool + * - type: `gxf::Handle` + * + * ==Notes== + * + * 1. Displaying Color Images * * Image data can either be on host or device (GPU). Multiple image formats are supported * - R 8 bit unsigned @@ -214,7 +218,7 @@ struct BufferInfo; * - R 16 bit unsigned * - R 32 bit unsigned * - * 3. Drawing Geometry + * 2. Drawing Geometry * * In all cases, `x` and `y` are normalized coordinates in the range `[0, 1]`. The `x` and `y` * correspond to the horizontal and vertical axes of the display, respectively. The origin `(0, @@ -241,8 +245,8 @@ struct BufferInfo; * calculated using the aspect ratio of the window. The index of each coordinate references a * text string from the `text` parameter and the index is clamped to the size of the text * array. For example, if there is one item set for the `text` parameter, e.g. - * `text=['my_text']` and three coordinates, then `my_text` is rendered three times. If - * `text=['first text', 'second text']` and three coordinates are specified, then `first text` + * `text=["my_text"]` and three coordinates, then `my_text` is rendered three times. If + * `text=["first text", "second text"]` and three coordinates are specified, then `first text` * is rendered at the first coordinate, `second text` at the second coordinate and then `second * text` again at the third coordinate. The `text` string array is fixed and can't be changed * after initialization. To hide text which should not be displayed, specify coordinates @@ -254,7 +258,7 @@ struct BufferInfo; * previous coordinate. * - 3D Triangles are defined by a set of three `(x, y, z)` coordinate tuples. * - * 4. Displaying Depth Maps + * 3. Displaying Depth Maps * * When `type` is `depth_map` the provided data is interpreted as a rectangular array of depth * values. Additionally a 2d array with a color value for each point in the grid can be specified @@ -270,7 +274,7 @@ struct BufferInfo; * - Look Around (LMB + ALT | LMB + CTRL + SHIFT) * - Zoom (Mouse wheel + SHIFT) * - * 5. Output + * 4. Output * * By default a window is opened to display the rendering, but the extension can also be run in * headless mode with the `headless` parameter. @@ -279,7 +283,6 @@ struct BufferInfo; * parameter. This reduces the latency by avoiding the desktop compositor. * * The rendered framebuffer can be output to `render_buffer_output`. - * */ class HolovizOp : public Operator { public: diff --git a/include/holoscan/operators/inference/inference.hpp b/include/holoscan/operators/inference/inference.hpp index 2c4d2b52..65ad4258 100644 --- a/include/holoscan/operators/inference/inference.hpp +++ b/include/holoscan/operators/inference/inference.hpp @@ -39,18 +39,50 @@ namespace holoscan::ops { /** * @brief Inference Operator class to perform single/multi model inference. * - * **Named inputs:** - * - *receivers*: multi-receiver accepting `nvidia::gxf::Tensor`(s) - * - Any number of upstream ports may be connected to this `receivers` port. The operator - * will search across all messages for tensors matching those specified in - * `in_tensor_names`. These are the set of input tensors used by the models in - * `inference_map`. + * ==Named Inputs== * - * **Named outputs:** - * - *transmitter*: `nvidia::gxf::Tensor`(s) - * - A message containing tensors corresponding to the inference results from all models - * will be emitted. The names of the tensors transmitted correspond to those in - * `out_tensor_names`. + * - **receivers** : multi-receiver accepting `nvidia::gxf::Tensor`(s) + * - Any number of upstream ports may be connected to this `receivers` port. The operator + * will search across all messages for tensors matching those specified in + * `in_tensor_names`. These are the set of input tensors used by the models in + * `inference_map`. + * + * ==Named Outputs== + * + * - **transmitter** : `nvidia::gxf::Tensor`(s) + * - A message containing tensors corresponding to the inference results from all models + * will be emitted. The names of the tensors transmitted correspond to those in + * `out_tensor_names`. + * + * ==Parameters== + * + * For more details on `InferenceOp` parameters, see + * [Customizing the Inference + * Operator](https://docs.nvidia.com/holoscan/sdk-user-guide/examples/byom.html#customizing-the-inference-operator) + * or refer to [Inference](https://docs.nvidia.com/holoscan/sdk-user-guide/inference.html). + * + * - **backend**: Backend to use for inference. Set `"trt"` for TensorRT, `"torch"` for LibTorch + * and `"onnxrt"` for the ONNX runtime. + * - **allocator**: Memory allocator to use for the output. + * - **inference_map**: Tensor to model map. + * - **model_path_map**: Path to the ONNX model to be loaded. + * - **pre_processor_map**: Pre processed data to model map. + * - **device_map**: Mapping of model (`DataMap`) to GPU ID for inference. Optional. + * - **backend_map**: Mapping of model (`DataMap`) to backend type for inference. + * Backend options: `"trt"` or `"torch"`. Optional. + * - **in_tensor_names**: Input tensors (`std::vector`). Optional. + * - **out_tensor_names**: Output tensors (`std::vector`). Optional. + * - **infer_on_cpu**: Whether to run the computation on the CPU instead of GPU. Optional + * (default: `false`). + * - **parallel_inference**: Whether to enable parallel execution. Optional (default: `true`). + * - **input_on_cuda**: Whether the input buffer is on the GPU. Optional (default: `true`). + * - **output_on_cuda**: Whether the output buffer is on the GPU. Optional (default: `true`). + * - **transmit_on_cuda**: Whether to transmit the message on the GPU. Optional (default: `true`). + * - **enable_fp16**: Use 16-bit floating point computations. Optional (default: `false`). + * - **is_engine_path**: Whether the input model path mapping is for trt engine files. Optional + * (default: `false`). + * - **cuda_stream_pool**: `holoscan::CudaStreamPool` instance to allocate CUDA streams. Optional + * (default: `nullptr`). */ class InferenceOp : public holoscan::Operator { public: diff --git a/include/holoscan/operators/inference_processor/inference_processor.hpp b/include/holoscan/operators/inference_processor/inference_processor.hpp index a9e7985f..cce1fa6c 100644 --- a/include/holoscan/operators/inference_processor/inference_processor.hpp +++ b/include/holoscan/operators/inference_processor/inference_processor.hpp @@ -15,8 +15,8 @@ * limitations under the License. */ -#ifndef HOLOSCAN_OPERATORS_HOLOINFER_PROCESSOR_INFERENCE_PROCESSOR_HPP -#define HOLOSCAN_OPERATORS_HOLOINFER_PROCESSOR_INFERENCE_PROCESSOR_HPP +#ifndef HOLOSCAN_OPERATORS_INFERENCE_PROCESSOR_INFERENCE_PROCESSOR_HPP +#define HOLOSCAN_OPERATORS_INFERENCE_PROCESSOR_INFERENCE_PROCESSOR_HPP #include #include @@ -39,18 +39,38 @@ namespace holoscan::ops { /** * @brief Inference Processor Operator class to perform operations per input tensor. * - * **Named inputs:** - * - *receivers*: multi-receiver accepting `nvidia::gxf::Tensor`(s) - * - Any number of upstream ports may be connected to this `receivers` port. The operator - * will search across all messages for tensors matching those specified in - * `in_tensor_names`. These are the set of input tensors used by the processing operations - * specified in `process_map`. + * ==Named Inputs== * - * **Named outputs:** - * - *transmitter*: `nvidia::gxf::Tensor`(s) - * - A message containing tensors corresponding to the processed results from operations - * will be emitted. The names of the tensors transmitted correspond to those in - * `out_tensor_names`. + * - **receivers** : multi-receiver accepting `nvidia::gxf::Tensor`(s) + * - Any number of upstream ports may be connected to this `receivers` port. The operator + * will search across all messages for tensors matching those specified in + * `in_tensor_names`. These are the set of input tensors used by the processing operations + * specified in `process_map`. + * + * ==Named Outputs== + * + * - **transmitter** : `nvidia::gxf::Tensor`(s) + * - A message containing tensors corresponding to the processed results from operations + * will be emitted. The names of the tensors transmitted correspond to those in + * `out_tensor_names`. + * + * ==Parameters== + * + * - **allocator**: Memory allocator to use for the output. + * - **process_operations**: Operations (`DataVecMap`) in sequence on tensors. + * - **processed_map**: Input-output tensor mapping (`DataVecMap`) + * - **in_tensor_names**: Names of input tensors (`std::vector`) in the order to be fed + * into the operator. Optional. + * - **out_tensor_names**: Names of output tensors (`std::vector`) in the order to be + * fed into the operator. Optional. + * - **input_on_cuda**: Whether the input buffer is on the GPU. Optional (default: `false`). + * - **output_on_cuda**: Whether the output buffer is on the GPU. Optional (default: `false`). + * - **transmit_on_cuda**: Whether to transmit the message on the GPU. Optional (default: `false`). + * - **cuda_stream_pool**: `holoscan::CudaStreamPool` instance to allocate CUDA streams. + * Optional (default: `nullptr`). + * - **config_path**: File path to the config file. Optional (default: `""`). + * - **disable_transmitter**: If `true`, disable the transmitter output port of the operator. + * Optional (default: `false`). */ class InferenceProcessorOp : public holoscan::Operator { public: @@ -151,4 +171,4 @@ class InferenceProcessorOp : public holoscan::Operator { }; } // namespace holoscan::ops -#endif /* HOLOSCAN_OPERATORS_HOLOINFER_PROCESSOR_INFERENCE_PROCESSOR_HPP */ +#endif /* HOLOSCAN_OPERATORS_INFERENCE_PROCESSOR_INFERENCE_PROCESSOR_HPP */ diff --git a/include/holoscan/operators/ping_rx/ping_rx.hpp b/include/holoscan/operators/ping_rx/ping_rx.hpp index dc0aa050..68ad8aea 100644 --- a/include/holoscan/operators/ping_rx/ping_rx.hpp +++ b/include/holoscan/operators/ping_rx/ping_rx.hpp @@ -22,13 +22,16 @@ namespace holoscan::ops { - /** - * @brief Simple receiver operator + * @brief Simple receiver operator. + * + * This is an example of a native operator with one input port. + * On each tick, it receives an integer from the "in" port. * - * **Named inputs:** - * - *in*: any - * - A received value. + * ==Named Inputs== + * + * - **in** : any + * - A received value. */ class PingRxOp : public Operator { public: @@ -41,7 +44,6 @@ class PingRxOp : public Operator { void compute(InputContext& op_input, OutputContext&, ExecutionContext&) override; }; - } // namespace holoscan::ops #endif /* HOLOSCAN_OPERATORS_PING_RX_HPP */ diff --git a/include/holoscan/operators/ping_tx/ping_tx.hpp b/include/holoscan/operators/ping_tx/ping_tx.hpp index 02b841d2..1a5e652e 100644 --- a/include/holoscan/operators/ping_tx/ping_tx.hpp +++ b/include/holoscan/operators/ping_tx/ping_tx.hpp @@ -25,10 +25,12 @@ namespace holoscan::ops { /** * @brief Simple transmitter operator. * - * **Named outputs:** - * - *out*: int - * - An index value that increments by one on each call to `compute`. The starting value - * is 1. + * On each tick, it transmits an integer to the "out" port. + * + * ==Named Outputs== + * + * - **out** : int + * - An index value that increments by one on each call to `compute`. The starting value is 1. */ class PingTxOp : public Operator { public: diff --git a/include/holoscan/operators/segmentation_postprocessor/segmentation_postprocessor.hpp b/include/holoscan/operators/segmentation_postprocessor/segmentation_postprocessor.hpp index 176c0ff6..af133744 100644 --- a/include/holoscan/operators/segmentation_postprocessor/segmentation_postprocessor.hpp +++ b/include/holoscan/operators/segmentation_postprocessor/segmentation_postprocessor.hpp @@ -37,16 +37,27 @@ namespace holoscan::ops { /** * @brief Operator carrying out post-processing operations on segmentation outputs. * - * **Named inputs:** - * - *in_tensor*: `nvidia::gxf::Tensor` - * - Expects a message containing a 32-bit floating point tensor with name - * `in_tensor_name`. The expected data layout of this tensor is HWC, NCHW or NHWC format as - * specified via `data_format`. + * ==Named Inputs== * - * **Named outputs:** - * - *out_tensor*: `nvidia::gxf::Tensor` - * - Emits a message containing a tensor named "out_tensor" that contains the segmentation - * labels. This tensor will have unsigned 8-bit integer data type and shape (H, W, 1). + * - **in_tensor** : `nvidia::gxf::Tensor` + * - Expects a message containing a 32-bit floating point tensor with name + * `in_tensor_name`. The expected data layout of this tensor is HWC, NCHW or NHWC format as + * specified via `data_format`. + * + * ==Named Outputs== + * + * - **out_tensor** : `nvidia::gxf::Tensor` + * - Emits a message containing a tensor named "out_tensor" that contains the segmentation + * labels. This tensor will have unsigned 8-bit integer data type and shape (H, W, 1). + * + * ==Parameters== + * + * - **allocator**: Memory allocator to use for the output. + * - **in_tensor_name**: Name of the input tensor. Optional (default: `""`). + * - **network_output_type**: Network output type (e.g. 'softmax'). Optional (default: `"softmax"`). + * - **data_format**: Data format of network output. Optional (default: `"hwc"`). + * - **cuda_stream_pool**: `holoscan::CudaStreamPool` instance to allocate CUDA streams. + * Optional (default: `nullptr`). */ class SegmentationPostprocessorOp : public Operator { public: diff --git a/include/holoscan/operators/v4l2_video_capture/v4l2_video_capture.hpp b/include/holoscan/operators/v4l2_video_capture/v4l2_video_capture.hpp index 03552d31..d07c7a79 100644 --- a/include/holoscan/operators/v4l2_video_capture/v4l2_video_capture.hpp +++ b/include/holoscan/operators/v4l2_video_capture/v4l2_video_capture.hpp @@ -33,18 +33,44 @@ namespace holoscan::ops { * * Inputs a video stream from a V4L2 node, including USB cameras and HDMI IN. * - Input stream is on host. If no pixel format is specified in the yaml configuration file, the - * pixel format will be automatically selected. However, only `AB24` and `YUYV` are then supported. - * If a pixel format is specified in the yaml file, then this format will be used. However, note - * that the operator then expects that this format can be encoded as RGBA32. If not, the behaviour - * is undefined. + * pixel format will be automatically selected. However, only `AB24` and `YUYV` are then + * supported. + * If a pixel format is specified in the yaml file, then this format will be used. However, note + * that the operator then expects that this format can be encoded as RGBA32. If not, the behavior + * is undefined. * - Output stream is on host. Always RGBA32 at this time. * * Use `holoscan::ops::FormatConverterOp` to move data from the host to a GPU device. * - * **Named outputs:** - * - *signal*: `nvidia::gxf::VideoBuffer` - * - Emits a message containing a video buffer on the host with format - * GXF_VIDEO_FORMAT_RGBA. + * ==Named Outputs== + * + * - **signal** : `nvidia::gxf::VideoBuffer` + * - A message containing a video buffer on the host with format + * GXF_VIDEO_FORMAT_RGBA. + * + * ==Parameters== + * + * - **allocator**: Memory allocator to use for the output. + * - **device**: The device to target (e.g. "/dev/video0" for device 0). + * Default value is `"/dev/video0"`. + * - **width**: Width of the video stream. Optional (default: `0`). + * - **height**: Height of the video stream. Optional (default: `0`). + * - **num_buffers**: Number of V4L2 buffers to use. Optional (default: `4`). + * - **pixel_format**: Video stream pixel format (little endian four character code (fourcc)). + * Default value is `"auto"`. + * - **exposure_time**: Exposure time of the camera sensor in multiples of 100 μs (e.g. setting + * exposure_time to 100 is 10 ms). Optional (default: auto exposure, or camera sensor default). + * Use `v4l2-ctl -d /dev/ -L` for a range of values supported by your device. + * - When not set by the user, V4L2_CID_EXPOSURE_AUTO is set to V4L2_EXPOSURE_AUTO, or to + * V4L2_EXPOSURE_APERTURE_PRIORITY if the former is not supported. + * - When set by the user, V4L2_CID_EXPOSURE_AUTO is set to V4L2_EXPOSURE_SHUTTER_PRIORITY, or to + * V4L2_EXPOSURE_MANUAL if the former is not supported. The provided value is then used to set + * V4L2_CID_EXPOSURE_ABSOLUTE. + * - **gain**: Gain of the camera sensor. Optional (default: auto gain, or camera sensor default). + * Use `v4l2-ctl -d /dev/ -L` for a range of values supported by your device. + * - When not set by the user, V4L2_CID_AUTOGAIN is set to false (if supported). + * - When set by the user, V4L2_CID_AUTOGAIN is set to true (if supported). The provided value is + * then used to set V4L2_CID_GAIN. */ class V4L2VideoCaptureOp : public Operator { public: @@ -68,12 +94,17 @@ class V4L2VideoCaptureOp : public Operator { Parameter height_; Parameter num_buffers_; Parameter pixel_format_; + Parameter exposure_time_; + Parameter gain_; void v4l2_initialize(); void v4l2_requestbuffers(); void v4l2_check_formats(); void v4l2_set_mode(); void v4l2_set_formats(); + bool v4l2_camera_supports_control(int cid, const char* control_name); + void v4l2_set_camera_control(v4l2_control control, const char* control_name, bool warn); + void v4l2_set_camera_settings(); void v4l2_start(); void v4l2_read_buffer(v4l2_buffer& buf); diff --git a/include/holoscan/operators/video_stream_recorder/video_stream_recorder.hpp b/include/holoscan/operators/video_stream_recorder/video_stream_recorder.hpp index b91be040..2287d3b5 100644 --- a/include/holoscan/operators/video_stream_recorder/video_stream_recorder.hpp +++ b/include/holoscan/operators/video_stream_recorder/video_stream_recorder.hpp @@ -30,11 +30,22 @@ namespace holoscan::ops { /** - * @brief Operator class to record the video stream to a file. + * @brief Operator class to record a video stream to a file. * - * **Named input:** - * - *input*: `nvidia::gxf::Tensor` - * - A message containing a video frame to serialize to disk. + * ==Named Inputs== + * + * - **input** : `nvidia::gxf::Tensor` + * - A message containing a video frame to serialize to disk. The input tensor can be on either + * the CPU or GPU. This data location will be recorded as part of the metadata serialized to + * disk and if the data is later read back in via `VideoStreamReplayerOp`, the tensor output of + * that operator will be on the same device (CPU or GPU). + * + * ==Parameters== + * + * - **directory**: Directory path for storing files. + * - **basename**: User specified file name without extension. + * - **flush_on_tick**: Flushes output buffer on every tick when `true`. + * Optional (default: `false`). */ class VideoStreamRecorderOp : public holoscan::Operator { public: diff --git a/include/holoscan/operators/video_stream_replayer/video_stream_replayer.hpp b/include/holoscan/operators/video_stream_replayer/video_stream_replayer.hpp index a62e6b3f..b5fa5a29 100644 --- a/include/holoscan/operators/video_stream_replayer/video_stream_replayer.hpp +++ b/include/holoscan/operators/video_stream_replayer/video_stream_replayer.hpp @@ -31,10 +31,28 @@ namespace holoscan::ops { /** * @brief Operator class to replay a video stream from a file. * - * **Named outputs:** - * - *output*: `nvidia::gxf::Tensor` - * - A message containing a video frame deserialized from disk. + * ==Named Outputs== * + * - **output** : `nvidia::gxf::Tensor` + * - A message containing a video frame deserialized from disk. Depending on the metadata in the + * file being read, this tensor could be on either CPU or GPU. For the data used in examples + * distributed with the SDK, the tensor will be an unnamed GPU tensor (name == ""). + * + * ==Parameters== + * + * - **directory**: Directory path for reading files from. + * - **basename**: User specified file name without extension. + * - **batch_size**: Number of entities to read and publish for one tick. Optional (default: `1`). + * - **ignore_corrupted_entities**: If an entity could not be deserialized, it is ignored by + * default; otherwise a failure is generated. Optional (default: `true`). + * - **frame_rate**: Frame rate to replay. If zero value is specified, it follows timings in + * timestamps. Optional (default: `0.0`). + * - **realtime**: Playback video in realtime, based on frame_rate or timestamps. + * Optional (default: `true`). + * - **repeat**: Repeat video stream in a loop. Optional (default: `false`). + * - **count**: Number of frame counts to playback. If zero value is specified, it is ignored. + * If the count is less than the number of frames in the video, it would finish early. + * Optional (default: `0`). */ class VideoStreamReplayerOp : public holoscan::Operator { public: diff --git a/include/holoscan/utils/cuda_stream_handler.hpp b/include/holoscan/utils/cuda_stream_handler.hpp index 2f20729f..fbb30a59 100644 --- a/include/holoscan/utils/cuda_stream_handler.hpp +++ b/include/holoscan/utils/cuda_stream_handler.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,13 +19,13 @@ #define INCLUDE_HOLOSCAN_UTILS_CUDA_STREAM_HANDLER_HPP #include -#include #include #include "../core/operator_spec.hpp" #include "../core/parameter.hpp" #include "../core/resources/gxf/cuda_stream_pool.hpp" #include "gxf/cuda/cuda_stream.hpp" +// keep the following two gxf/cuda imports in the header for backwards compatibility with 1.0 #include "gxf/cuda/cuda_stream_id.hpp" #include "gxf/cuda/cuda_stream_pool.hpp" @@ -48,8 +48,8 @@ namespace holoscan { * call CudaStreamHandler::from_messages(). This will synchronize with multiple streams. * - when executing CUDA functions CudaStreamHandler::get() to get the CUDA stream which should * be used by your CUDA function - * - before publishing the output message(s) of your operator call CudaStreamHandler::to_message() on - * each message. This will add the CUDA stream used by the CUDA functions in your operator to + * - before publishing the output message(s) of your operator call CudaStreamHandler::to_message() + * on each message. This will add the CUDA stream used by the CUDA functions in your operator to * the output message. */ class CudaStreamHandler { @@ -57,15 +57,7 @@ class CudaStreamHandler { /** * @brief Destroy the CudaStreamHandler object */ - ~CudaStreamHandler() { - for (auto&& event : cuda_events_) { - const cudaError_t result = cudaEventDestroy(event); - if (cudaSuccess != result) { - HOLOSCAN_LOG_ERROR("Failed to destroy CUDA event: %s", cudaGetErrorString(result)); - } - } - cuda_events_.clear(); - } + ~CudaStreamHandler(); /** * Define the parameters used by this class. @@ -73,13 +65,7 @@ class CudaStreamHandler { * @param spec OperatorSpec to define the cuda_stream_pool parameter * @param required if set then it's required that the CUDA stream pool is specified */ - void define_params(OperatorSpec& spec, bool required = false) { - spec.param(cuda_stream_pool_, - "cuda_stream_pool", - "CUDA Stream Pool", - "Instance of gxf::CudaStreamPool."); - cuda_stream_pool_required_ = required; - } + void define_params(OperatorSpec& spec, bool required = false); /** * Define the parameters used by this class. @@ -90,16 +76,7 @@ class CudaStreamHandler { * @param spec OperatorSpec to define the cuda_stream_pool parameter * @param required if set then it's required that the CUDA stream pool is specified */ - void defineParams(OperatorSpec& spec, bool required = false) { - static bool warned = false; - if (!warned) { - warned = true; - HOLOSCAN_LOG_WARN( - "CudaStreamHandler's `defineParams` method has been renamed to `define_params`. " - "The old name is deprecated and may be removed in a future release."); - } - return define_params(spec, required); - } + void defineParams(OperatorSpec& spec, bool required = false); /** * Get the CUDA stream for the operation from the incoming message @@ -109,23 +86,7 @@ class CudaStreamHandler { * @return gxf_result_t */ gxf_result_t from_message(gxf_context_t context, - const nvidia::gxf::Expected& message) { - // if the message contains a stream use this - const auto maybe_cuda_stream_id = message.value().get(); - if (maybe_cuda_stream_id) { - const auto maybe_cuda_stream_handle = nvidia::gxf::Handle::Create( - context, maybe_cuda_stream_id.value()->stream_cid); - if (maybe_cuda_stream_handle) { - message_cuda_stream_handle_ = maybe_cuda_stream_handle.value(); - } - } else { - // if no stream had been found, allocate a stream and use that - gxf_result_t result = allocate_internal_stream(context); - if (result != GXF_SUCCESS) { return result; } - message_cuda_stream_handle_ = cuda_stream_handle_; - } - return GXF_SUCCESS; - } + const nvidia::gxf::Expected& message); /** * Get the CUDA stream for the operation from the incoming message @@ -138,16 +99,7 @@ class CudaStreamHandler { * @return gxf_result_t */ gxf_result_t fromMessage(gxf_context_t context, - const nvidia::gxf::Expected& message) { - static bool warned = false; - if (!warned) { - warned = true; - HOLOSCAN_LOG_WARN( - "CudaStreamHandler's `fromMessage` method has been renamed to `from_message`. " - "The old name is deprecated and may be removed in a future release."); - } - return from_message(context, message); - } + const nvidia::gxf::Expected& message); /** * Get the CUDA stream for the operation from the incoming messages @@ -157,64 +109,7 @@ class CudaStreamHandler { * @return gxf_result_t */ gxf_result_t from_messages(gxf_context_t context, - const std::vector& messages) { - const gxf_result_t result = allocate_internal_stream(context); - if (result != GXF_SUCCESS) { return result; } - - if (!cuda_stream_handle_) { - // if no CUDA stream can be allocated because no stream pool is set, then don't sync - // with incoming streams. CUDA operations of this operator will use the default stream - // which sync with all other streams by default. - return GXF_SUCCESS; - } - - // iterate through all messages and use events to chain incoming streams with the internal - // stream - auto event_it = cuda_events_.begin(); - for (auto& msg : messages) { - const auto maybe_cuda_stream_id = msg.get(); - if (maybe_cuda_stream_id) { - const auto maybe_cuda_stream_handle = nvidia::gxf::Handle::Create( - context, maybe_cuda_stream_id.value()->stream_cid); - if (maybe_cuda_stream_handle) { - const cudaStream_t cuda_stream = maybe_cuda_stream_handle.value()->stream().value(); - cudaError_t result; - - // allocate a new event if needed - if (event_it == cuda_events_.end()) { - cudaEvent_t cuda_event; - result = cudaEventCreateWithFlags(&cuda_event, cudaEventDisableTiming); - if (cudaSuccess != result) { - HOLOSCAN_LOG_ERROR("Failed to create input CUDA event: %s", - cudaGetErrorString(result)); - return GXF_FAILURE; - } - cuda_events_.push_back(cuda_event); - event_it = cuda_events_.end(); - --event_it; - } - - result = cudaEventRecord(*event_it, cuda_stream); - if (cudaSuccess != result) { - HOLOSCAN_LOG_ERROR("Failed to record event for message stream: %s", - cudaGetErrorString(result)); - return GXF_FAILURE; - } - result = cudaStreamWaitEvent(cuda_stream_handle_->stream().value(), *event_it); - if (cudaSuccess != result) { - HOLOSCAN_LOG_ERROR("Failed to record wait on message event: %s", - cudaGetErrorString(result)); - return GXF_FAILURE; - } - ++event_it; - } - } - } - message_cuda_stream_handle_ = cuda_stream_handle_; - return GXF_SUCCESS; - } - - + const std::vector& messages); /** * Get the CUDA stream for the operation from the incoming messages * @@ -226,16 +121,7 @@ class CudaStreamHandler { * @return gxf_result_t */ gxf_result_t fromMessages(gxf_context_t context, - const std::vector& messages) { - static bool warned = false; - if (!warned) { - warned = true; - HOLOSCAN_LOG_WARN( - "CudaStreamHandler's `fromMessages` method has been renamed to `from_messages`. " - "The old name is deprecated and may be removed in a future release."); - } - return from_messages(context, messages); - } + const std::vector& messages); /** * Add the used CUDA stream to the outgoing message @@ -243,18 +129,7 @@ class CudaStreamHandler { * @param message * @return gxf_result_t */ - gxf_result_t to_message(nvidia::gxf::Expected& message) { - if (message_cuda_stream_handle_) { - const auto maybe_stream_id = - message.value().add("cuda_stream_id_"); - if (!maybe_stream_id) { - HOLOSCAN_LOG_ERROR("Failed to add CUDA stream id to output message."); - return nvidia::gxf::ToResultCode(maybe_stream_id); - } - maybe_stream_id.value()->stream_cid = message_cuda_stream_handle_.cid(); - } - return GXF_SUCCESS; - } + gxf_result_t to_message(nvidia::gxf::Expected& message); /** * Add the used CUDA stream to the outgoing message @@ -265,16 +140,7 @@ class CudaStreamHandler { * @param message * @return gxf_result_t */ - gxf_result_t toMessage(nvidia::gxf::Expected& message) { - static bool warned = false; - if (!warned) { - warned = true; - HOLOSCAN_LOG_WARN( - "CudaStreamHandler's `toMessage` method has been renamed to `to_message`. " - "The old name is deprecated and may be removed in a future release."); - } - return to_message(message); - } + gxf_result_t toMessage(nvidia::gxf::Expected& message); /** * Get the CUDA stream handle which should be used for CUDA commands @@ -282,14 +148,7 @@ class CudaStreamHandler { * @param context * @return nvidia::gxf::Handle */ - nvidia::gxf::Handle get_stream_handle(gxf_context_t context) { - // If there is a message stream handle, return this - if (message_cuda_stream_handle_) { return message_cuda_stream_handle_; } - - // else allocate an internal CUDA stream and return it - allocate_internal_stream(context); - return cuda_stream_handle_; - } + nvidia::gxf::Handle get_stream_handle(gxf_context_t context); /** * Get the CUDA stream handle which should be used for CUDA commands @@ -300,16 +159,7 @@ class CudaStreamHandler { * @param context * @return nvidia::gxf::Handle */ - nvidia::gxf::Handle getStreamHandle(gxf_context_t context) { - static bool warned = false; - if (!warned) { - warned = true; - HOLOSCAN_LOG_WARN( - "CudaStreamHandler's `getStreamHandle` method has been renamed to `get_stream_handle`. " - "The old name is deprecated and may be removed in a future release."); - } - return get_stream_handle(context); - } + nvidia::gxf::Handle getStreamHandle(gxf_context_t context); /** * Get the CUDA stream which should be used for CUDA commands. @@ -319,18 +169,7 @@ class CudaStreamHandler { * @param context * @return cudaStream_t */ - cudaStream_t get_cuda_stream(gxf_context_t context) { - const nvidia::gxf::Handle cuda_stream_handle = - get_stream_handle(context); - if (cuda_stream_handle) { return cuda_stream_handle->stream().value(); } - if (!default_stream_warning_) { - default_stream_warning_ = true; - HOLOSCAN_LOG_WARN( - "Parameter `cuda_stream_pool` is not set, using the default CUDA stream for CUDA " - "operations."); - } - return cudaStreamDefault; - } + cudaStream_t get_cuda_stream(gxf_context_t context); /** * Get the CUDA stream which should be used for CUDA commands. @@ -343,16 +182,7 @@ class CudaStreamHandler { * @param context * @return cudaStream_t */ - cudaStream_t getCudaStream(gxf_context_t context) { - static bool warned = false; - if (!warned) { - warned = true; - HOLOSCAN_LOG_WARN( - "CudaStreamHandler's `getCudaStream` method has been renamed to `get_cuda_stream`. " - "The old name is deprecated and may be removed in a future release."); - } - return get_cuda_stream(context); - } + cudaStream_t getCudaStream(gxf_context_t context); private: /** @@ -361,36 +191,7 @@ class CudaStreamHandler { * @param context * @return gxf_result_t */ - gxf_result_t allocate_internal_stream(gxf_context_t context) { - // Create the CUDA stream if it does not yet exist. - if (!cuda_stream_handle_) { - // Check if a cuda stream pool is given. - const bool has_cuda_stream_pool_ = cuda_stream_pool_.has_value() && cuda_stream_pool_.get(); - if (!has_cuda_stream_pool_) { - // If the cuda stream pool is required return an error - if (cuda_stream_pool_required_) { - HOLOSCAN_LOG_ERROR("'cuda_stream_pool' is required but not set."); - return GXF_FAILURE; - } - return GXF_SUCCESS; - } - - // get Handle to underlying nvidia::gxf::CudaStreamPool from - // std::shared_ptr - const auto cuda_stream_pool = nvidia::gxf::Handle::Create( - context, cuda_stream_pool_.get()->gxf_cid()); - if (cuda_stream_pool) { - // allocate a stream - auto maybe_stream = cuda_stream_pool.value()->allocateStream(); - if (!maybe_stream) { - HOLOSCAN_LOG_ERROR("Failed to allocate CUDA stream"); - return nvidia::gxf::ToResultCode(maybe_stream); - } - cuda_stream_handle_ = std::move(maybe_stream.value()); - } - } - return GXF_SUCCESS; - } + gxf_result_t allocate_internal_stream(gxf_context_t context); /// if set then it's required that the CUDA stream pool is specified, if this is not the case /// an error is generated diff --git a/include/holoscan/utils/yaml_parser.hpp b/include/holoscan/utils/yaml_parser.hpp index 63997867..c4f3a769 100644 --- a/include/holoscan/utils/yaml_parser.hpp +++ b/include/holoscan/utils/yaml_parser.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -28,68 +28,18 @@ #include #include +#include "common/yaml_parser.hpp" // YAML parser for std::complex types #include "../core/common.hpp" -namespace { -bool is_space(unsigned char c) { - return c == ' '; -} -} // namespace - -/** - * Custom YAML parser for std::complex types - * - * Handles parsing of strings containing a complex floating point value. - * - * Examples of valid strings are: - * "1.0 + 2.5j" - * "-1.0 - 3i" - * "1+3.3j" - * - * There may be 0 or 1 space between a + or - sign and the digits. - * Either "i" or "j" must appear immediately after the second number. - */ -template -struct YAML::convert> { - static Node encode(const std::complex& data) { - Node node; - node = - std::string{fmt::format("{}{}{}j", data.real(), (data.imag() < 0) ? "" : "+", data.imag())}; - return node; - } - - static bool decode(const Node& node, std::complex& data) { - if (!node.IsScalar()) { - HOLOSCAN_LOG_ERROR("complex decode: expected a scalar"); - return false; - } - std::string value = node.as(); - - std::regex complex_reg("\\s*([+-]?\\s?\\d*\\.?\\d+)\\s?([+-]{1}\\s?\\d*\\.?\\d+)[ij]{1}\\s*$"); - std::smatch m; - if (std::regex_search(value, m, complex_reg)) { - if (m.size() != 3) { - HOLOSCAN_LOG_ERROR("unexpected match size: {}, matched: {}", m.size(), m.str(0)); - } - // extract the real and imaginary components of the number - std::string real_str = m.str(1); - std::string imag_str = m.str(2); - - // remove any white space around + or - (necessary for std::stod to work) - real_str.erase(std::remove_if(real_str.begin(), real_str.end(), is_space), real_str.end()); - imag_str.erase(std::remove_if(imag_str.begin(), imag_str.end(), is_space), imag_str.end()); - - // format real and imaginary strings as floating point - double real = std::stod(real_str); - double imag = std::stod(imag_str); - data = std::complex(real, imag); - } else { - HOLOSCAN_LOG_ERROR("failed to match expected regex for complex"); - return false; - } - return true; - } -}; +// Note: GXF provides a custom YAML parser for std::complex types. +// +// Examples of valid strings are: +// "1.0 + 2.5j" +// "-1.0 - 3i" +// "1+3.3j" +// +// There may be 0 or 1 space between a + or - sign and the digits. +// Either "i" or "j" must appear immediately after the second number. namespace holoscan { diff --git a/modules/holoinfer/src/CMakeLists.txt b/modules/holoinfer/src/CMakeLists.txt index 6ee34d9c..d1ec21d2 100644 --- a/modules/holoinfer/src/CMakeLists.txt +++ b/modules/holoinfer/src/CMakeLists.txt @@ -1,5 +1,5 @@ # -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -45,6 +45,8 @@ if(HOLOSCAN_BUILD_LIBTORCH) $ PUBLIC $ + $ + $ $ ) endif() @@ -98,6 +100,8 @@ target_include_directories(${PROJECT_NAME} $ PUBLIC $ + $ + $ $ ) diff --git a/modules/holoinfer/src/include/holoinfer.hpp b/modules/holoinfer/src/include/holoinfer.hpp index 616fc7a5..ac1e3835 100644 --- a/modules/holoinfer/src/include/holoinfer.hpp +++ b/modules/holoinfer/src/include/holoinfer.hpp @@ -61,6 +61,9 @@ class _HOLOSCAN_EXTERNAL_API_ InferContext { * @returns Map of model as key mapped to the output dimension (of inferred data) */ DimType get_output_dimensions() const; + + private: + std::string unique_id_; }; /** diff --git a/modules/holoinfer/src/include/holoinfer_utils.hpp b/modules/holoinfer/src/include/holoinfer_utils.hpp index 2db7a4b4..c8419081 100644 --- a/modules/holoinfer/src/include/holoinfer_utils.hpp +++ b/modules/holoinfer/src/include/holoinfer_utils.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -34,7 +34,7 @@ #include "gxf/std/allocator.hpp" #include "gxf/std/clock.hpp" #include "gxf/std/codelet.hpp" -#include "gxf/std/parameter_parser_std.hpp" +#include "gxf/core/parameter_parser_std.hpp" #include "gxf/std/receiver.hpp" #include "gxf/std/tensor.hpp" #include "gxf/std/timestamp.hpp" diff --git a/modules/holoinfer/src/infer/trt/utils.cpp b/modules/holoinfer/src/infer/trt/utils.cpp index e74c2197..899d268d 100644 --- a/modules/holoinfer/src/infer/trt/utils.cpp +++ b/modules/holoinfer/src/infer/trt/utils.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -33,7 +33,7 @@ bool valid_file_path(const std::string& filepath) { } bool generate_engine_path(const NetworkOptions& options, const std::string& onnx_model_path, - std::string& engine_path) { + std::string& engine_path) { cudaDeviceProp device_prop; auto status = cudaGetDeviceProperties(&device_prop, options.device_index); if (status != cudaSuccess) { @@ -45,17 +45,12 @@ bool generate_engine_path(const NetworkOptions& options, const std::string& onnx gpu_name.erase(remove(gpu_name.begin(), gpu_name.end(), ' '), gpu_name.end()); engine_path.reserve(1024); - engine_path = std::filesystem::path(onnx_model_path).replace_extension("").string() + "." - + gpu_name + "." - + std::to_string(device_prop.major) + "." - + std::to_string(device_prop.minor) + "." - + std::to_string(device_prop.multiProcessorCount) - + ".trt." - + std::to_string(NV_TENSORRT_MAJOR) + "." - + std::to_string(NV_TENSORRT_MINOR) + "." - + std::to_string(NV_TENSORRT_PATCH) + "." - + std::to_string(NV_TENSORRT_BUILD) - + ".engine"; + engine_path = + std::filesystem::path(onnx_model_path).replace_extension("").string() + "." + gpu_name + "." + + std::to_string(device_prop.major) + "." + std::to_string(device_prop.minor) + "." + + std::to_string(device_prop.multiProcessorCount) + ".trt." + + std::to_string(NV_TENSORRT_MAJOR) + "." + std::to_string(NV_TENSORRT_MINOR) + "." + + std::to_string(NV_TENSORRT_PATCH) + "." + std::to_string(NV_TENSORRT_BUILD) + ".engine"; if (options.use_fp16) { engine_path += ".fp16"; diff --git a/modules/holoinfer/src/infer/trt/utils.hpp b/modules/holoinfer/src/infer/trt/utils.hpp index 39b263f1..046dd8cd 100644 --- a/modules/holoinfer/src/infer/trt/utils.hpp +++ b/modules/holoinfer/src/infer/trt/utils.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -75,7 +75,7 @@ bool valid_file_path(const std::string& filepath); * @brief Build the (trt engine) network */ bool generate_engine_path(const NetworkOptions& options, const std::string& model_path, - std::string& engine_name); + std::string& engine_name); /** * @brief Build the (trt engine) network diff --git a/modules/holoinfer/src/manager/infer_manager.cpp b/modules/holoinfer/src/manager/infer_manager.cpp index 58c6d7f4..1b65013a 100644 --- a/modules/holoinfer/src/manager/infer_manager.cpp +++ b/modules/holoinfer/src/manager/infer_manager.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -728,37 +728,116 @@ DimType ManagerInfer::get_output_dimensions() const { InferContext::InferContext() { try { - manager = std::make_unique(); + if (g_managers.find("current_manager") != g_managers.end()) { + HOLOSCAN_LOG_WARN("Inference context exists, cleaning up"); + g_managers.at("current_manager").reset(); + g_managers.erase("current_manager"); + } + g_managers.insert({"current_manager", std::make_shared()}); } catch (const std::bad_alloc&) { throw; } } InferStatus InferContext::execute_inference(DataMap& data_map, DataMap& output_data_map) { InferStatus status = InferStatus(); + + if (g_managers.find(unique_id_) == g_managers.end()) { + status.set_code(holoinfer_code::H_ERROR); + status.set_message("Inference manager, Error: Inference manager not created or is not set up."); + return status; + } + try { + g_manager = g_managers.at(unique_id_); + if (data_map.size() == 0) { status.set_code(holoinfer_code::H_ERROR); status.set_message("Inference manager, Error: Data map empty for inferencing"); return status; } - status = manager->execute_inference(data_map, output_data_map); - } catch (...) { + status = g_manager->execute_inference(data_map, output_data_map); + } catch (const std::exception& e) { status.set_code(holoinfer_code::H_ERROR); - status.set_message("Inference manager, Error in inference"); + status.set_message(std::string("Inference manager, Error in inference setup: ") + e.what()); return status; } + return status; } InferStatus InferContext::set_inference_params(std::shared_ptr& inference_specs) { - return manager->set_inference_params(inference_specs); + InferStatus status = InferStatus(); + if (g_managers.size() == 0) { + status.set_code(holoinfer_code::H_ERROR); + status.set_message("Inference manager, Error: Inference Manager not initiated"); + return status; + } + + try { + auto multi_model_map = inference_specs->get_path_map(); + + if (multi_model_map.size() == 0) { + if (g_managers.find("current_manager") != g_managers.end()) { + g_managers.at("current_manager").reset(); + g_managers.erase("current_manager"); + } + + status.set_code(holoinfer_code::H_ERROR); + status.set_message("Inference manager, Error: Multi modal map cannot be empty in setup."); + return status; + } + + std::string unique_id_name(""); + for (auto& [model_name, _] : multi_model_map) { unique_id_name += model_name + "_[]_"; } + + unique_id_ = unique_id_name; + HOLOSCAN_LOG_INFO("Inference context ID: {}", unique_id_); + + if (g_managers.find(unique_id_name) != g_managers.end()) { + if (g_managers.find("current_manager") != g_managers.end()) { + g_managers.erase("current_manager"); + } + + status.set_code(holoinfer_code::H_ERROR); + status.set_message( + "Inference manager, Error: A manager with the same unique ID already exists."); + HOLOSCAN_LOG_ERROR( + "Inference manager setup error: model keywords are repeated in multiple instances of " + "inference. All model instances must have unique keyword in the configuration file."); + return status; + } + + if (g_managers.find("current_manager") == g_managers.end()) { + status.set_code(holoinfer_code::H_ERROR); + status.set_message("Inference manager, Error: Current Manager not initialized."); + HOLOSCAN_LOG_ERROR("Inference manager setup error: Inference context not initialized."); + return status; + } + + g_managers.insert({unique_id_name, std::move(g_managers.at("current_manager"))}); + g_managers.erase("current_manager"); + + g_manager = g_managers.at(unique_id_); + status = g_manager->set_inference_params(inference_specs); + } catch (const std::exception& e) { + status.set_code(holoinfer_code::H_ERROR); + status.set_message(std::string("Inference manager, Error in inference setup: ") + e.what()); + return status; + } + + return status; } InferContext::~InferContext() { - manager.reset(); + if (g_managers.find(unique_id_) != g_managers.end()) { + g_manager = g_managers.at(unique_id_); + g_manager.reset(); + g_managers.erase(unique_id_); + } } DimType InferContext::get_output_dimensions() const { - return manager->get_output_dimensions(); + g_manager = g_managers.at(unique_id_); + return g_manager->get_output_dimensions(); } } // namespace inference diff --git a/modules/holoinfer/src/manager/infer_manager.hpp b/modules/holoinfer/src/manager/infer_manager.hpp index d1eeca4e..e4e7adb3 100644 --- a/modules/holoinfer/src/manager/infer_manager.hpp +++ b/modules/holoinfer/src/manager/infer_manager.hpp @@ -167,7 +167,10 @@ class ManagerInfer { }; /// Pointer to manager class for inference -std::unique_ptr manager; +std::shared_ptr g_manager; + +/// Map to store multi-instance managers +std::map> g_managers; } // namespace inference } // namespace holoscan diff --git a/modules/holoinfer/src/utils/infer_utils.cpp b/modules/holoinfer/src/utils/infer_utils.cpp index 468c862f..f2408233 100644 --- a/modules/holoinfer/src/utils/infer_utils.cpp +++ b/modules/holoinfer/src/utils/infer_utils.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -38,7 +38,7 @@ cudaError_t check_cuda(cudaError_t result) { gxf_result_t report_error(const std::string& module, const std::string& submodule) { std::string error_string{"Error in " + module + ", Sub-module->" + submodule}; - HOLOSCAN_LOG_ERROR("%s\n", error_string.c_str()); + HOLOSCAN_LOG_ERROR("{}", error_string); return GXF_FAILURE; } diff --git a/modules/holoviz/examples/depth_map/Main.cpp b/modules/holoviz/examples/depth_map/Main.cpp index 9beb4cb1..5a2d8929 100644 --- a/modules/holoviz/examples/depth_map/Main.cpp +++ b/modules/holoviz/examples/depth_map/Main.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -186,8 +186,8 @@ void generateSourceData(uint32_t frame_index) { for (uint32_t x = 0; x < width; ++x) { const uint8_t depth = (std::sin((float(x) / float(width)) * 3.14f * 4.f) * std::cos((float(y) / float(height)) * 3.14f * 3.f) + - 1.f) * offset * - 63.f; + 1.f) * + offset * 63.f; depth_data[y * width + x] = depth; color_data[y * width + x] = depth | ((depth << (8 + (x & 1))) & 0xFF00) | diff --git a/modules/holoviz/src/exclusive_window.cpp b/modules/holoviz/src/exclusive_window.cpp index 1aef5f74..45e0b974 100644 --- a/modules/holoviz/src/exclusive_window.cpp +++ b/modules/holoviz/src/exclusive_window.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -127,8 +127,7 @@ vk::SurfaceKHR ExclusiveWindow::create_surface(vk::PhysicalDevice physical_devic vk::DisplayPropertiesKHR selected_display = display_properties[0]; for (auto&& displayProperty : display_properties) { HOLOSCAN_LOG_INFO("{}", displayProperty.displayName); - if (std::string(displayProperty.displayName).find(impl_->display_name_) != - std::string::npos) { + if (std::string(displayProperty.displayName).find(impl_->display_name_) != std::string::npos) { selected_display = displayProperty; } } diff --git a/modules/holoviz/src/layers/layer.cpp b/modules/holoviz/src/layers/layer.cpp index 914a7cfa..7f8b5b5d 100644 --- a/modules/holoviz/src/layers/layer.cpp +++ b/modules/holoviz/src/layers/layer.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -68,9 +68,7 @@ void Layer::set_views(const std::vector& views) { } void Layer::add_view(const View& view) { - if (view.height == 0) { - throw std::invalid_argument("Layer view height should not be zero"); - } + if (view.height == 0) { throw std::invalid_argument("Layer view height should not be zero"); } if (view.width <= 0) { throw std::invalid_argument("Layer view width should not be less than or equal to zero"); } diff --git a/modules/holoviz/src/layers/layer.hpp b/modules/holoviz/src/layers/layer.hpp index d6cbcb5d..3ad69b68 100644 --- a/modules/holoviz/src/layers/layer.hpp +++ b/modules/holoviz/src/layers/layer.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -108,7 +108,7 @@ class Layer { * * @param views layer views to add */ - void set_views(const std::vector &views); + void set_views(const std::vector& views); /** * Add a layer view. diff --git a/modules/holoviz/src/vulkan/vulkan_app.cpp b/modules/holoviz/src/vulkan/vulkan_app.cpp index 177a9115..34201e6f 100644 --- a/modules/holoviz/src/vulkan/vulkan_app.cpp +++ b/modules/holoviz/src/vulkan/vulkan_app.cpp @@ -1245,9 +1245,7 @@ void Vulkan::Impl::begin_transfer_pass() { } void Vulkan::Impl::end_transfer_pass() { - if (transfer_jobs_.empty()) { - throw std::runtime_error("Not in transfer pass."); - } + if (transfer_jobs_.empty()) { throw std::runtime_error("Not in transfer pass."); } TransferJob& transfer_job = transfer_jobs_.back(); diff --git a/modules/holoviz/tests/functional/im_gui_layer_test.cpp b/modules/holoviz/tests/functional/im_gui_layer_test.cpp index b67255e1..4eb23f56 100644 --- a/modules/holoviz/tests/functional/im_gui_layer_test.cpp +++ b/modules/holoviz/tests/functional/im_gui_layer_test.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -57,7 +57,7 @@ TEST_F(ImGuiLayer, Errors) { EXPECT_NO_THROW(viz::Begin()); // it's an error to call BeginImGuiLayer if no valid ImGui context is set - ImGuiContext *prev_context = ImGui::GetCurrentContext(); + ImGuiContext* prev_context = ImGui::GetCurrentContext(); ImGui::SetCurrentContext(nullptr); EXPECT_THROW(viz::BeginImGuiLayer(), std::runtime_error); ImGui::SetCurrentContext(prev_context); diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index fa520e78..a8d7edfd 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -48,18 +48,18 @@ if(HOLOSCAN_BUILD_TESTS) ) add_test(NAME python-api-system-distributed-tests - COMMAND ${PYTHON_EXECUTABLE} -m pytest ${CMAKE_PYTHON_WORKING_DIR}/tests/system/distributed -v --durations=0 -k "not test_exception_handling_distributed" + COMMAND ${PYTHON_EXECUTABLE} -m pytest ${CMAKE_PYTHON_WORKING_DIR}/tests/system/distributed -v --durations=0 # either have to run from this working directory or set PYTHONPATH WORKING_DIRECTORY ${CMAKE_PYTHON_WORKING_DIR} ) - # case with additional logging/verbosity for debugging failures on CI - add_test(NAME python-api-system-distributed-verbose-tests - COMMAND ${PYTHON_EXECUTABLE} -m pytest ${CMAKE_PYTHON_WORKING_DIR}/tests/system/distributed -v --durations=0 -s -k test_exception_handling_distributed + # rerun distributed tests but with the event-based scheduler + # (omit serialization tests on this second run to keep overall time down) + add_test(NAME python-api-system-distributed-ebs-tests + COMMAND ${PYTHON_EXECUTABLE} -m pytest ${CMAKE_PYTHON_WORKING_DIR}/tests/system/distributed -v --durations=0 -k "not test_ucx_object_serialization" # either have to run from this working directory or set PYTHONPATH WORKING_DIRECTORY ${CMAKE_PYTHON_WORKING_DIR} ) - # Tracking any fatal error reported by the application tests set_tests_properties(python-api-system-tests PROPERTIES FAIL_REGULAR_EXPRESSION "Fatal Python error") @@ -67,7 +67,7 @@ if(HOLOSCAN_BUILD_TESTS) set_tests_properties(python-api-system-distributed-tests PROPERTIES FAIL_REGULAR_EXPRESSION "Fatal Python error") - set_tests_properties(python-api-system-distributed-verbose-tests PROPERTIES + set_tests_properties(python-api-system-distributed-ebs-tests PROPERTIES FAIL_REGULAR_EXPRESSION "Fatal Python error") # set environment variables used by distributed applications in the tests @@ -84,10 +84,10 @@ HOLOSCAN_MAX_DURATION_MS=2500\ " ) set_tests_properties(python-api-system-distributed-tests PROPERTIES ENVIRONMENT - "${CMAKE_DISTRIBUTED_TEST_FLAGS}" + "${CMAKE_DISTRIBUTED_TEST_FLAGS} HOLOSCAN_DISTRIBUTED_APP_SCHEDULER=multi_thread" ) - set_tests_properties(python-api-system-distributed-verbose-tests PROPERTIES ENVIRONMENT - "${CMAKE_DISTRIBUTED_TEST_FLAGS}" + set_tests_properties(python-api-system-distributed-ebs-tests PROPERTIES ENVIRONMENT + "${CMAKE_DISTRIBUTED_TEST_FLAGS} HOLOSCAN_DISTRIBUTED_APP_SCHEDULER=event_based" ) # tracing tests diff --git a/python/holoscan/CMakeLists.txt b/python/holoscan/CMakeLists.txt index 19e46da4..d0db6904 100644 --- a/python/holoscan/CMakeLists.txt +++ b/python/holoscan/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -99,6 +99,10 @@ add_dependencies(holoscan-python holoscan-python-cli) add_subdirectory(cli) add_subdirectory(conditions) add_subdirectory(core) +target_link_libraries(core_python + PRIVATE holoscan::core + PRIVATE AJA::ajantv2 # need this to parse NTV2Channel enum from kwargs +) add_subdirectory(executors) add_subdirectory(graphs) add_subdirectory(gxf) diff --git a/python/holoscan/cli/common/artifact_sources.py b/python/holoscan/cli/common/artifact_sources.py index 78ca6fbc..0646568d 100644 --- a/python/holoscan/cli/common/artifact_sources.py +++ b/python/holoscan/cli/common/artifact_sources.py @@ -16,174 +16,129 @@ """ # noqa: E501 import json -from pathlib import Path +import logging from typing import Any, List, Optional -from .enum_types import Arch, Platform, PlatformConfiguration, SdkType -from .exceptions import InvalidSourceFileError +import requests + +from .enum_types import Arch, PlatformConfiguration, SdkType +from .exceptions import InvalidSourceFileError, ManifestDownloadError class ArtifactSources: - """Provides default artifact source URLs with ability to override.""" + """Provides default artifact source URLs with the ability to override.""" - SectionVersion = "versions" - SectionDebianPackages = "debian-packges" + SectionDebianPackages = "debian-packages" SectionBaseImages = "base-images" SectionBuildImages = "build-images" SectionHealthProbe = "health-probes" + ManifestFileUrl = ( + "https://edge.urm.nvidia.com/artifactory/sw-holoscan-cli-generic/artifacts.json" + ) + EdgeROToken = "eyJ2ZXIiOiIyIiwidHlwIjoiSldUIiwiYWxnIjoiUlMyNTYiLCJraWQiOiJLcXV1ZVdTTlRjSkhqTFhGLTJCSnctX0lkRnY0eVhqREJyNEdWMU5Gc2NJIn0.eyJzdWIiOiJqZnJ0QDAxZHRqNnF0ZWNmcnB6MXJrNmg2cjAwd2FkXC91c2Vyc1wvc3ZjLWhvbG9zY2FuLWNsaS1wdWJsaWMtcm8iLCJzY3AiOiJtZW1iZXItb2YtZ3JvdXBzOnN2Yy1ob2xvc2Nhbi1jbGktcHVibGljLWdyb3VwIiwiYXVkIjoiamZydEAwMWR0ajZxdGVjZnJwejFyazZoNnIwMHdhZCIsImlzcyI6ImpmcnRAMDFkdGo2cXRlY2ZycHoxcms2aDZyMDB3YWRcL3VzZXJzXC9ycGFsYW5pc3dhbXkiLCJpYXQiOjE3MDY1NzA1NjUsImp0aSI6IjlmNmEyMmM1LTk5ZTItNGRlMi1hMDhiLTQxZjg2NzIyYmJjNyJ9.Y0gfyW2F0kxiKnMhGzNCyRRE2DNrDW6CUj5ozrQiIvAbSbhohskFcFmP836PU4p3ZQTzbYk9-bBwrqoPDUaZf8p9AW9GZ3mvlU2BxK0EQ-F4oKxA1_Z7agZ0KKcmcrfWnE4Ffy53qAD8PTk5vdcznpYOBpJtF4i16j2QcXvhVGGEqUyGa7_sONdK0sevb3ZztiEoupi4gD2wPTRn30rjpGIiFSDKiswAQwoyF_SqMCQWOBEeXMISp8hkEggUpvPrESv2lbpjgaKuEJ1CikbivYTJCcoqpgH7E72FXr1sB9jfwrFD8pkjtRpGGDxN43waXy4f3Ctr8_rpbmCvwSa9iw" # noqa: E501 def __init__(self) -> None: - self._data = { - SdkType.Holoscan.value: { - ArtifactSources.SectionVersion: ["1.0.3"], - ArtifactSources.SectionDebianPackages: { - "1.0.3": { - Arch.amd64.value: "https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/holoscan_1.0.3.2-1_amd64.deb", # noqa: E501 - Arch.arm64.value: { - PlatformConfiguration.iGPU.value: "https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/arm64/holoscan_1.0.3.2-1_arm64.deb", # noqa: E501 - PlatformConfiguration.dGPU.value: "https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/sbsa/holoscan_1.0.3.2-1_arm64.deb", # noqa: E501 - }, - }, - }, - ArtifactSources.SectionBaseImages: { - PlatformConfiguration.iGPU.value: { - Platform.JetsonAgxOrinDevKit.value: { - "1.0.3": "nvcr.io/nvidia/clara-holoscan/holoscan:v1.0.3-igpu", - }, - Platform.IGXOrinDevIt.value: { - "1.0.3": "nvcr.io/nvidia/clara-holoscan/holoscan:v1.0.3-igpu", - }, - }, - PlatformConfiguration.dGPU.value: { - Platform.X64Workstation.value: { - "1.0.3": "nvcr.io/nvidia/clara-holoscan/holoscan:v1.0.3-dgpu", - }, - Platform.IGXOrinDevIt.value: { - "1.0.3": "nvcr.io/nvidia/clara-holoscan/holoscan:v1.0.3-dgpu", - }, - }, - PlatformConfiguration.CPU.value: { - Platform.X64Workstation.value: { - "1.0.3": "nvcr.io/nvidia/clara-holoscan/holoscan:v1.0.3-dgpu", - } - }, - }, - ArtifactSources.SectionBuildImages: { - PlatformConfiguration.iGPU.value: { - Platform.JetsonAgxOrinDevKit.value: { - "1.0.3": "nvcr.io/nvidia/clara-holoscan/holoscan:v1.0.3-igpu", - }, - Platform.IGXOrinDevIt.value: { - "1.0.3": "nvcr.io/nvidia/clara-holoscan/holoscan:v1.0.3-igpu", - }, - }, - PlatformConfiguration.dGPU.value: { - Platform.X64Workstation.value: { - "1.0.3": "nvcr.io/nvidia/clara-holoscan/holoscan:v1.0.3-dgpu", - }, - Platform.IGXOrinDevIt.value: { - "1.0.3": "nvcr.io/nvidia/clara-holoscan/holoscan:v1.0.3-dgpu", - }, - }, - PlatformConfiguration.CPU.value: { - Platform.X64Workstation.value: { - "1.0.3": "nvcr.io/nvidia/clara-holoscan/holoscan:v1.0.3-dgpu", - } - }, - }, - ArtifactSources.SectionHealthProbe: { - Arch.amd64.value: { - "1.0.3": "https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/v0.4.19/grpc_health_probe-linux-amd64", # noqa: E501 - }, - Arch.arm64.value: { - "1.0.3": "https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/v0.4.19/grpc_health_probe-linux-arm64", # noqa: E501 - }, - }, - }, - } - self.validate(self._data) + self._logger = logging.getLogger("common") + self._supported_holoscan_versions = ["2.0.0"] @property def holoscan_versions(self) -> List[str]: - return self._data[SdkType.Holoscan.value]["versions"] + return self._supported_holoscan_versions - @property - def base_images(self) -> List[Any]: - return self._data[SdkType.Holoscan.value][ArtifactSources.SectionBaseImages] + def base_images(self, version) -> List[Any]: + return self._data[version][SdkType.Holoscan.value][ArtifactSources.SectionBaseImages] - @property - def build_images(self) -> List[Any]: - return self._data[SdkType.Holoscan.value][ArtifactSources.SectionBuildImages] + def build_images(self, version) -> List[Any]: + return self._data[version][SdkType.Holoscan.value][ArtifactSources.SectionBuildImages] - @property - def health_prob(self) -> List[Any]: - return self._data[SdkType.Holoscan.value][ArtifactSources.SectionHealthProbe] + def health_probe(self, version) -> List[Any]: + return self._data[version][ArtifactSources.SectionHealthProbe] - def load(self, file: Path): - """Overrides the default values from a given JOSN file. - Validates top-level attributes to ensure file is valid + def load(self, uri: str): + """Overrides the default values from a given JSON file. + Validates top-level attributes to ensure the file is valid Args: file (Path): Path to JSON file """ - temp = json.loads(file.read_text()) - - try: - self.validate(temp) - except Exception as ex: - raise InvalidSourceFileError(f"{file} is missing required data: {ex}") from ex + if uri.startswith("https"): + self._download_manifest_internal(uri) + elif uri.startswith("http"): + raise ManifestDownloadError( + "Downloading manifest files from non-HTTPS servers is not supported." + ) + else: + self._logger.info(f"Using CLI manifest file from {uri}...") + with open(uri) as file: + temp = json.load(file) - self._data = temp + try: + self.validate(temp) + self._data = temp + except Exception as ex: + raise InvalidSourceFileError(f"{uri} is missing required data: {ex}") from ex def validate(self, data: Any): - assert SdkType.Holoscan.value in data - - assert ArtifactSources.SectionVersion in data[SdkType.Holoscan.value] - assert ArtifactSources.SectionDebianPackages in data[SdkType.Holoscan.value] - assert ArtifactSources.SectionBaseImages in data[SdkType.Holoscan.value] - assert ArtifactSources.SectionBuildImages in data[SdkType.Holoscan.value] - - for version in data[SdkType.Holoscan.value][ArtifactSources.SectionVersion]: - assert version in data[SdkType.Holoscan.value][ArtifactSources.SectionDebianPackages] - assert ( - Arch.amd64.value - in data[SdkType.Holoscan.value][ArtifactSources.SectionDebianPackages][version] - ) - assert version in data[SdkType.Holoscan.value][ArtifactSources.SectionDebianPackages] - assert ( - Arch.arm64.value - in data[SdkType.Holoscan.value][ArtifactSources.SectionDebianPackages][version] - ) + self._logger.debug("Validating CLI manifest file...") + + for key in data: + item = data[key] + assert SdkType.Holoscan.value in item + holoscan = item[SdkType.Holoscan.value] + + assert ArtifactSources.SectionDebianPackages in holoscan + assert ArtifactSources.SectionBaseImages in holoscan + assert ArtifactSources.SectionBuildImages in holoscan + + for config in PlatformConfiguration: + assert config.value in holoscan[ArtifactSources.SectionBaseImages] + assert config.value in holoscan[ArtifactSources.SectionBuildImages] - for config in PlatformConfiguration: - assert config.value in data[SdkType.Holoscan.value][ArtifactSources.SectionBaseImages] - assert config.value in data[SdkType.Holoscan.value][ArtifactSources.SectionBuildImages] + def download_manifest(self): + self._download_manifest_internal( + ArtifactSources.ManifestFileUrl, + {"Authorization": f"Bearer {ArtifactSources.EdgeROToken}"}, + ) + + def _download_manifest_internal(self, url, headers=None): + self._logger.info("Downloading CLI manifest file...") + manifest = requests.get(url, headers=headers) + + try: + manifest.raise_for_status() + except Exception as ex: + raise ManifestDownloadError( + f"Error downloading manifest file from {url}: {manifest.reason}" + ) from ex + else: + self._data = manifest.json() + self.validate(self._data) def debian_packages( self, version: str, architecture: Arch, platform_configuration: PlatformConfiguration ) -> Optional[str]: - """Gets the URI of a debian package based on the version, + """Gets the URI of a Debian package based on the version, the architecture and the platform configuration. Args: version (str): version of package - architecture (Arch): architecture oif the package + architecture (Arch): architecture of the package platform_configuration (PlatformConfiguration): platform configuration of the package Returns: Optional[str]: _description_ """ - debian_sources = self._data[SdkType.Holoscan.value][ArtifactSources.SectionDebianPackages] - if version not in debian_sources: - return None + debian_sources = self._data[version][SdkType.Holoscan.value][ + ArtifactSources.SectionDebianPackages + ] - if architecture == Arch.amd64 and architecture.value in debian_sources[version]: - return debian_sources[version][architecture.value] + if architecture == Arch.amd64 and architecture.value in debian_sources: + return debian_sources[architecture.value] elif ( architecture == Arch.arm64 - and architecture.value in debian_sources[version] - and platform_configuration.value in debian_sources[version][architecture.value] + and architecture.value in debian_sources + and platform_configuration.value in debian_sources[architecture.value] ): - return debian_sources[version][architecture.value][platform_configuration.value] + return debian_sources[architecture.value][platform_configuration.value] return None diff --git a/python/holoscan/cli/common/constants.py b/python/holoscan/cli/common/constants.py index f55ee1d3..05b27275 100644 --- a/python/holoscan/cli/common/constants.py +++ b/python/holoscan/cli/common/constants.py @@ -1,18 +1,18 @@ """ - SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - SPDX-License-Identifier: Apache-2.0 +SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +SPDX-License-Identifier: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. """ # noqa: E501 from pathlib import Path @@ -81,6 +81,7 @@ class SDK: PLATFORM_MAPPINGS = { Platform.IGXOrinDevIt: Arch.arm64, Platform.JetsonAgxOrinDevKit: Arch.arm64, + Platform.SBSA: Arch.arm64, Platform.X64Workstation: Arch.amd64, } @@ -88,6 +89,7 @@ class SDK: PLATFORMS = [ Platform.IGXOrinDevIt.value, Platform.JetsonAgxOrinDevKit.value, + Platform.SBSA.value, Platform.X64Workstation.value, ] diff --git a/python/holoscan/cli/common/dockerutils.py b/python/holoscan/cli/common/dockerutils.py index 15cb03c9..cc2e4009 100644 --- a/python/holoscan/cli/common/dockerutils.py +++ b/python/holoscan/cli/common/dockerutils.py @@ -29,8 +29,8 @@ from ..common.utils import run_cmd_output from .constants import DefaultValues, EnvironmentVariables from .enum_types import PlatformConfiguration, SdkType -from .exceptions import InvalidManifestError, RunContainerError -from .utils import get_requested_gpus +from .exceptions import GpuResourceError, InvalidManifestError, RunContainerError +from .utils import get_gpu_count, get_requested_gpus logger = logging.getLogger("common") @@ -161,6 +161,7 @@ def docker_run( network: str, network_interface: Optional[str], use_all_nics: bool, + gpu_enum: Optional[str], config: Optional[Path], render: bool, user: str, @@ -216,11 +217,12 @@ def docker_run( if display is not None: environment_variables["DISPLAY"] = display - gpu = None - + # Use user-specified --gpu values + if gpu_enum is not None: + environment_variables["NVIDIA_VISIBLE_DEVICES"] = gpu_enum # If the image was built for iGPU but the system is configured for dGPU, attempt # targeting the system's iGPU using the CDI spec - if platform_config == PlatformConfiguration.iGPU.value and not _host_is_native_igpu(): + elif platform_config == PlatformConfiguration.iGPU.value and not _host_is_native_igpu(): environment_variables["NVIDIA_VISIBLE_DEVICES"] = "nvidia.com/igpu=0" logger.info( "Attempting to run an image for iGPU (integrated GPU) on a system configured " @@ -229,10 +231,22 @@ def docker_run( "user guide. If not, either rebuild the image for dGPU or run this image on a " "system configured for iGPU only (ex: Jetson AGX, Nano...)." ) + # Otherwise, read specs from package manifest else: requested_gpus = get_requested_gpus(pkg_info) - if requested_gpus > 0: - gpu = "all" + available_gpus = get_gpu_count() + + if available_gpus < requested_gpus: + raise GpuResourceError( + f"Available GPUs ({available_gpus}) are less than required ({requested_gpus}). " + ) + + if requested_gpus == 0: + environment_variables["NVIDIA_VISIBLE_DEVICES"] = "all" + else: + environment_variables["NVIDIA_VISIBLE_DEVICES"] = ",".join( + map(str, range(0, requested_gpus)) + ) if "path" in app_info["input"]: mapped_input = Path(app_info["input"]["path"]).as_posix() @@ -312,7 +326,6 @@ def docker_run( user, volumes, environment_variables, - gpu, shared_memory_size, ipc_mode, ulimits, @@ -330,7 +343,6 @@ def docker_run( user, volumes, environment_variables, - gpu, shared_memory_size, ipc_mode, ulimits, @@ -349,7 +361,6 @@ def _start_container( user, volumes, environment_variables, - gpu, shared_memory_size, ipc_mode, ulimits, @@ -360,7 +371,6 @@ def _start_container( image_name, command=commands, envs=environment_variables, - gpus=gpu, hostname=name, name=name, networks=[network], @@ -418,7 +428,6 @@ def _enter_terminal( user, volumes, environment_variables, - gpu, shared_memory_size, ipc_mode, ulimits, @@ -438,7 +447,6 @@ def _enter_terminal( detach=False, entrypoint="/bin/bash", envs=environment_variables, - gpus=gpu, hostname=name, interactive=True, name=name, diff --git a/python/holoscan/cli/common/enum_types.py b/python/holoscan/cli/common/enum_types.py index 8919f165..a9d5660d 100644 --- a/python/holoscan/cli/common/enum_types.py +++ b/python/holoscan/cli/common/enum_types.py @@ -1,18 +1,18 @@ """ - SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - SPDX-License-Identifier: Apache-2.0 +SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +SPDX-License-Identifier: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. """ # noqa: E501 from enum import Enum @@ -43,6 +43,7 @@ class Platform(Enum): IGXOrinDevIt = "igx-orin-devkit" JetsonAgxOrinDevKit = "jetson-agx-orin-devkit" X64Workstation = "x64-workstation" + SBSA = "sbsa" class PlatformConfiguration(Enum): diff --git a/python/holoscan/cli/common/exceptions.py b/python/holoscan/cli/common/exceptions.py index 650cd343..cac44623 100644 --- a/python/holoscan/cli/common/exceptions.py +++ b/python/holoscan/cli/common/exceptions.py @@ -108,6 +108,13 @@ class InvalidSharedMemoryValueError(HoloscanSdkError): pass +class ManifestDownloadError(HoloscanSdkError): + """ + Raise when the failed to download manifest file.""" + + pass + + class UnmatchedDeviceError(HoloscanSdkError): """ Raise when the shared memory value is invalid.""" @@ -116,3 +123,10 @@ def __init__(self, unmatched_devices: List[str], *args: object) -> None: super().__init__( f"The following devices cannot be found in /dev/: {str.join(',', unmatched_devices)}" ) + + +class GpuResourceError(HoloscanSdkError): + """ + Raise when the available GPUs are less than requetsed.""" + + pass diff --git a/python/holoscan/cli/common/sdk_utils.py b/python/holoscan/cli/common/sdk_utils.py index 75a63498..1edac79b 100644 --- a/python/holoscan/cli/common/sdk_utils.py +++ b/python/holoscan/cli/common/sdk_utils.py @@ -104,7 +104,6 @@ def detect_holoscan_version( Returns: str: SDK version """ - if sdk_version is not None: if sdk_version.base_version not in artifact_sources.holoscan_versions: raise InvalidSdkError( diff --git a/python/holoscan/cli/common/utils.py b/python/holoscan/cli/common/utils.py index f063e648..90255299 100644 --- a/python/holoscan/cli/common/utils.py +++ b/python/holoscan/cli/common/utils.py @@ -50,6 +50,10 @@ def get_requested_gpus(pkg_info: dict) -> int: return num_gpu +def get_gpu_count(): + return len(run_cmd_output("nvidia-smi -L").splitlines()) + + def run_cmd(cmd: str) -> int: """ Executes command and return the returncode of the executed command. @@ -66,7 +70,7 @@ def run_cmd(cmd: str) -> int: return proc.wait() -def run_cmd_output(cmd: str) -> int: +def run_cmd_output(cmd: str) -> str: """ Executes command and returns the output. diff --git a/python/holoscan/cli/package-source.json b/python/holoscan/cli/package-source.json index 7b72c9a6..3a50848e 100644 --- a/python/holoscan/cli/package-source.json +++ b/python/holoscan/cli/package-source.json @@ -1,94 +1,135 @@ { - "holoscan": { - "versions": [ - "0.6.0" - ], - "debian-packges": { - "0.6.0": { + "0.6.0": { + "holoscan": { + "debian-packages": { "linux/amd64": "https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/holoscan_0.6.0.3-1_amd64.deb", "linux/arm64": { - "0.6.0": { - "igpu": "https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/arm64/holoscan_0.6.0.3-1_arm64.deb", - "dgpu": "https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/sbsa/holoscan_0.6.0.3-1_arm64.deb" - } + "igpu": "https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/arm64/holoscan_0.6.0.3-1_arm64.deb", + "dgpu": "https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/sbsa/holoscan_0.6.0.3-1_arm64.deb" } - } - }, - "base-images": { - "igpu": { - "jetson-agx-orin-devkit": { - "0.6.0": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-igpu" + }, + "base-images": { + "igpu": { + "jetson-agx-orin-devkit": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-igpu", + "igx-orin-devkit": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-igpu" }, - "igx-orin-devkit": { - "0.6.0": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-igpu" + "dgpu": { + "x64-workstation": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-dgpu", + "igx-orin-devkit": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-dgpu", + "clara-agx-devkit": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-dgpu" + }, + "igpu-assist": { + "clara-agx-devkit": "nvcr.io/nvidia/clara-holoscan/l4t-compute-assist:r34.1.0-r8.4.0-runtime", + "igx-orin-devkit": "nvcr.io/nvidia/clara-holoscan/l4t-compute-assist:r35.3.0-r8.5.2-runtime" + }, + "cpu": { + "x64-workstation": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-dgpu" } }, - "dgpu": { - "x64-workstation": { - "0.6.0": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-dgpu" + "build-images": { + "igpu": { + "jetson-agx-orin-devkit": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-igpu", + "igx-orin-devkit": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-igpu" + }, + "dgpu": { + "x64-workstation": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-dgpu", + "igx-orin-devkit": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-dgpu", + "clara-agx-devkit": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-dgpu" }, - "clara-agx-devkit": { - "0.6.0": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-dgpu" + "igpu-assist": { + "clara-agx-devkit": "nvcr.io/nvidia/clara-holoscan/l4t-compute-assist:r34.1.0-r8.4.0-runtime", + "igx-orin-devkit": "nvcr.io/nvidia/clara-holoscan/l4t-compute-assist:r35.3.0-r8.5.2-runtime" }, - "igx-orin-devkit": { - "0.6.0": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-dgpu" + "cpu": { + "x64-workstation": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-dgpu" + } + } + }, + "health-probes": { + "linux/amd64": "https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/v0.4.19/grpc_health_probe-linux-amd64", + "linux/arm64": "https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/v0.4.19/grpc_health_probe-linux-arm64" + } + }, + "1.0.3": { + "holoscan": { + "debian-packages": { + "linux/amd64": "https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/holoscan_1.0.3.0-1_amd64.deb", + "linux/arm64": { + "igpu": "https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/arm64/holoscan_1.0.3.0-1_arm64.deb", + "dgpu": "https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/sbsa/holoscan_1.0.3.0-1_arm64.deb" } }, - "igpu-assist": { - "clara-agx-devkit": { - "0.6.0": "nvcr.io/nvidia/clara-holoscan/l4t-compute-assist:r34.1.0-r8.4.0-runtime" + "base-images": { + "igpu": { + "jetson-agx-orin-devkit": "nvcr.io/nvidia/clara-holoscan/holoscan:v1.0.3-igpu", + "igx-orin-devkit": "nvcr.io/nvidia/clara-holoscan/holoscan:v1.0.3-igpu" }, - "igx-orin-devkit": { - "0.6.0": "nvcr.io/nvidia/clara-holoscan/l4t-compute-assist:r35.3.0-r8.5.2-runtime" + "dgpu": { + "x64-workstation": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-dgpu", + "igx-orin-devkit": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-dgpu" + }, + "cpu": { + "x64-workstation": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-dgpu" } }, - "cpu": { - "x64-workstation": { - "0.6.0": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-dgpu" + "build-images": { + "igpu": { + "jetson-agx-orin-devkit": "nvcr.io/nvidia/clara-holoscan/holoscan:v1.0.3-igpu", + "igx-orin-devkit": "nvcr.io/nvidia/clara-holoscan/holoscan:v1.0.3-igpu" + }, + "dgpu": { + "x64-workstation": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-dgpu", + "igx-orin-devkit": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-dgpu" + }, + "cpu": { + "x64-workstation": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-dgpu" } } }, - "build-images": { - "igpu": { - "jetson-agx-orin-devkit": { - "0.6.0": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-igpu" - }, - "igx-orin-devkit": { - "0.6.0": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-igpu" + "health-probes": { + "linux/amd64": "https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/v0.4.24/grpc_health_probe-linux-amd64", + "linux/arm64": "https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/v0.4.24/grpc_health_probe-linux-arm64" + } + }, + "2.0.0": { + "holoscan": { + "debian-packages": { + "linux/amd64": "https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/holoscan_1.0.3.0-1_amd64.deb", + "linux/arm64": { + "igpu": "https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/arm64/holoscan_1.0.3.0-1_arm64.deb", + "dgpu": "https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/sbsa/holoscan_1.0.3.0-1_arm64.deb" } }, - "dgpu": { - "x64-workstation": { - "0.6.0": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-dgpu" + "base-images": { + "igpu": { + "jetson-agx-orin-devkit": "nvcr.io/nvidia/clara-holoscan/holoscan:v1.0.3-igpu", + "igx-orin-devkit": "nvcr.io/nvidia/clara-holoscan/holoscan:v1.0.3-igpu" }, - "clara-agx-devkit": { - "0.6.0": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-dgpu" + "dgpu": { + "x64-workstation": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-dgpu", + "igx-orin-devkit": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-dgpu" }, - "igx-orin-devkit": { - "0.6.0": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-dgpu" + "cpu": { + "x64-workstation": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-dgpu" } }, - "igpu-assist": { - "clara-agx-devkit": { - "0.6.0": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-igpu" + "build-images": { + "igpu": { + "jetson-agx-orin-devkit": "nvcr.io/nvidia/clara-holoscan/holoscan:v1.0.3-igpu", + "igx-orin-devkit": "nvcr.io/nvidia/clara-holoscan/holoscan:v1.0.3-igpu" }, - "igx-orin-devkit": { - "0.6.0": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-igpu" - } - }, - "cpu": { - "x64-workstation": { - "0.6.0": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-dgpu" + "dgpu": { + "x64-workstation": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-dgpu", + "igx-orin-devkit": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-dgpu" + }, + "cpu": { + "x64-workstation": "nvcr.io/nvidia/clara-holoscan/holoscan:v0.6.0-dgpu" } } }, "health-probes": { - "linux/amd64": { - "0.6.0": "https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/v0.4.19/grpc_health_probe-linux-amd64" - }, - "linux/arm64": { - "0.6.0": "https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/v0.4.19/grpc_health_probe-linux-arm64" - } + "linux/amd64": "https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/v0.4.24/grpc_health_probe-linux-amd64", + "linux/arm64": "https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/v0.4.24/grpc_health_probe-linux-arm64" } } } \ No newline at end of file diff --git a/python/holoscan/cli/packager/arguments.py b/python/holoscan/cli/packager/arguments.py index d4370794..dc2efa93 100644 --- a/python/holoscan/cli/packager/arguments.py +++ b/python/holoscan/cli/packager/arguments.py @@ -62,6 +62,8 @@ def __init__(self, args: Namespace, temp_dir: str) -> None: if args.source is not None: self._artifact_sources.load(args.source) + else: + self._artifact_sources.download_manifest() self.build_parameters.username = args.username self.build_parameters.uid = args.uid diff --git a/python/holoscan/cli/packager/container_builder.py b/python/holoscan/cli/packager/container_builder.py index d6d87563..366e3dde 100644 --- a/python/holoscan/cli/packager/container_builder.py +++ b/python/holoscan/cli/packager/container_builder.py @@ -1,18 +1,18 @@ """ - SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - SPDX-License-Identifier: Apache-2.0 +SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +SPDX-License-Identifier: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. """ # noqa: E501 import logging @@ -25,7 +25,6 @@ from ..common.constants import Constants, DefaultValues from ..common.dockerutils import build_docker_image, create_and_get_builder, docker_export_tarball -from ..common.enum_types import SdkType from ..common.exceptions import WrongApplicationPathError from .parameters import PackageBuildParameters, PlatformBuildResults, PlatformParameters @@ -70,7 +69,6 @@ def build(self, platform_parameters: PlatformParameters) -> PlatformBuildResults PlatformBuildResults: build results """ self._copy_supporting_files(platform_parameters) - self._copy_health_probe(platform_parameters) docker_file_path = self._write_dockerfile(platform_parameters) return self._build_internal(docker_file_path, platform_parameters) @@ -125,17 +123,10 @@ def _build_internal( build_result.docker_tag = platform_parameters.tag export_to_tar_ball = self._build_parameters.tarball_output is not None else: - if shutil.which("update-binfmts") is None: - build_result.succeeded = False - build_result.error = ( - "Skipped due to missing QEMU and its dependencies. " - "Please follow the link to install QEMU " - "https://docs.nvidia.com/datacenter/cloud-native/playground/x-arch.html#installing-qemu" # noqa: E501 - ) - return build_result if self._build_parameters.tarball_output is not None: builds["output"] = { - "type": "oci", + # type=oci cannot be loaded by docker: https://github.com/docker/buildx/issues/59 + "type": "docker", "dest": build_result.tarball_filenaem, } else: @@ -262,11 +253,6 @@ def _copy_model_files(self): ) shutil.copy(self._build_parameters.models[model], target_model_path) - def _copy_health_probe(self, platform_parameters: PlatformParameters): - if self._build_parameters.sdk is SdkType.Holoscan: - target_path = os.path.join(self._temp_dir, "grpc_health_probe") - shutil.copy2(platform_parameters.health_probe, target_path) - def _copy_docs(self): """Copy user documentations to temporary location""" if self._build_parameters.docs is not None: diff --git a/python/holoscan/cli/packager/package_command.py b/python/holoscan/cli/packager/package_command.py index 62347998..86b8390f 100644 --- a/python/holoscan/cli/packager/package_command.py +++ b/python/holoscan/cli/packager/package_command.py @@ -134,9 +134,9 @@ def create_package_parser( ) advanced_group.add_argument( "--source", - type=valid_existing_path, + type=str, help="override Debian package, build container image and run container image from a " - "JSON formatted file.", + "JSON formatted file or a secured web server (HTTPS).", ) advanced_group.add_argument( "--sdk-version", diff --git a/python/holoscan/cli/packager/parameters.py b/python/holoscan/cli/packager/parameters.py index c1e61489..16023e4b 100644 --- a/python/holoscan/cli/packager/parameters.py +++ b/python/holoscan/cli/packager/parameters.py @@ -52,6 +52,9 @@ def __init__( self._data["holoscan_sdk_filename"] = None self._data["monai_deploy_sdk_file"] = None self._data["monai_deploy_sdk_filename"] = None + self._data["custom_base_image"] = False + self._data["custom_holoscan_sdk"] = False + self._data["custom_monai_deploy_sdk"] = False @property def tag(self) -> str: @@ -67,6 +70,30 @@ def tag(self) -> str: def tag_prefix(self) -> str: return self._tag_prefix + @property + def custom_base_image(self) -> Optional[str]: + return self._data["custom_base_image"] + + @custom_base_image.setter + def custom_base_image(self, value: str): + self._data["custom_base_image"] = value + + @property + def custom_holoscan_sdk(self) -> Optional[str]: + return self._data["custom_holoscan_sdk"] + + @custom_holoscan_sdk.setter + def custom_holoscan_sdk(self, value: str): + self._data["custom_holoscan_sdk"] = value + + @property + def custom_monai_deploy_sdk(self) -> Optional[str]: + return self._data["custom_monai_deploy_sdk"] + + @custom_monai_deploy_sdk.setter + def custom_monai_deploy_sdk(self, value: str): + self._data["custom_monai_deploy_sdk"] = value + @property def base_image(self) -> Optional[str]: return self._data["base_image"] diff --git a/python/holoscan/cli/packager/platforms.py b/python/holoscan/cli/packager/platforms.py index 24394f35..2b5329b9 100644 --- a/python/holoscan/cli/packager/platforms.py +++ b/python/holoscan/cli/packager/platforms.py @@ -27,7 +27,7 @@ from ..common.exceptions import IncompatiblePlatformConfigurationError, InvalidSdkError from ..common.sdk_utils import detect_sdk, detect_sdk_version from .parameters import PlatformParameters -from .sdk_downloader import download_health_probe_file, download_sdk_debian_file +from .sdk_downloader import download_sdk_debian_file class Platform: @@ -79,8 +79,23 @@ def configure_platforms( platform_parameters = PlatformParameters(platform, platform_config, args.tag, version) ( - platform_parameters.holoscan_sdk_file, - platform_parameters.monai_deploy_sdk_file, + platform_parameters.custom_base_image, + platform_parameters.base_image, + ) = self._find_base_image(platform_parameters, holoscan_sdk_version, args.base_image) + + platform_parameters.build_image = self._find_build_image( + platform_parameters, holoscan_sdk_version, application_type, args.build_image + ) + + ( + ( + platform_parameters.custom_holoscan_sdk, + platform_parameters.holoscan_sdk_file, + ), + ( + platform_parameters.custom_monai_deploy_sdk, + platform_parameters.monai_deploy_sdk_file, + ), ) = self._select_sdk_file( platform_parameters, temp_dir, @@ -91,21 +106,11 @@ def configure_platforms( args.holoscan_sdk_file, args.monai_deploy_sdk_file, ) - platform_parameters.base_image = self._find_base_image( - platform_parameters, holoscan_sdk_version, args.base_image - ) - platform_parameters.build_image = self._find_build_image( - platform_parameters, holoscan_sdk_version, application_type, args.build_image - ) if sdk is SdkType.Holoscan: - platform_parameters.health_probe = download_health_probe_file( - holoscan_sdk_version, - platform_parameters.platform_arch, - temp_dir, - self._logger, - self._artifact_sources, - ) + platform_parameters.health_probe = self._artifact_sources.health_probe( + holoscan_sdk_version + )[platform_parameters.platform_arch.value] platforms.append(platform_parameters) @@ -142,7 +147,7 @@ def _find_base_image( platform_parameters: PlatformParameters, sdk_version: str, base_image: Optional[str] = None, - ) -> str: + ) -> Tuple[bool, str]: """ Ensure user provided base image exists in Docker or locate the base image to use based on request platform. @@ -153,18 +158,23 @@ def _find_base_image( base_image (Optional[str]): user provided base image Returns: - (str): base image for building the image based on the given platform and SDK version. + (Tuple(bool, str)): bool: True if using user provided image. + str: base image for building the image based on the given + platform and SDK version. """ if base_image is not None: if image_exists(base_image): - return base_image + return (True, base_image) else: raise InvalidSdkError(f"Specified base image cannot be found: {base_image}") try: - return self._artifact_sources.base_images[platform_parameters.platform_config.value][ - platform_parameters.platform.value - ][sdk_version] + return ( + False, + self._artifact_sources.base_images(sdk_version)[ + platform_parameters.platform_config.value + ][platform_parameters.platform.value], + ) except Exception as ex: raise IncompatiblePlatformConfigurationError( f"""No base image found for the selected configuration: @@ -201,9 +211,9 @@ def _find_build_image( if application_type == ApplicationType.CppCMake: try: - return self._artifact_sources.build_images[ + return self._artifact_sources.build_images(sdk_version)[ platform_parameters.platform_config.value - ][platform_parameters.platform.value][sdk_version] + ][platform_parameters.platform.value] except Exception as ex: raise IncompatiblePlatformConfigurationError( f"No build image found for the selected configuration:" @@ -224,7 +234,7 @@ def _select_sdk_file( application_type: ApplicationType, holoscan_sdk_file: Optional[Path] = None, monai_deploy_sdk_file: Optional[Path] = None, - ) -> Tuple[Union[Path, str], Union[Path, str, None]]: + ) -> Tuple[Tuple[bool, Union[Path, str]], Tuple[Union[Path, str, None]]]: """ Detects the SDK distributable to use based on internal mapping or user input. @@ -259,7 +269,7 @@ def _select_sdk_file( application_type, holoscan_sdk_file, ), - None, + (None, None), ) elif sdk == SdkType.MonaiDeploy: if monai_deploy_app_sdk_version is None: @@ -285,7 +295,7 @@ def _get_holoscan_sdk( sdk_version: str, application_type: ApplicationType, sdk_file: Optional[Path] = None, - ) -> Union[Path, str]: + ) -> Tuple[bool, Union[Path, str]]: """ Validates Holoscan SDK redistributable file if specified. Otherwise, attempt to download the SDK file from internet. @@ -303,7 +313,9 @@ def _get_holoscan_sdk( file. Returns: - Union[Path, str]: Path to the SDK redistributable file. + Tuple[bool, Union[Path, str]]: + bool: True when user provides SDk file. Otherwise, False. + Union[Path, str]: Path to the SDK redistributable file. """ assert sdk is SdkType.Holoscan @@ -317,7 +329,7 @@ def _get_holoscan_sdk( "Invalid SDK file format, must be a PyPI wheel file with .whl file " "extension." ) - return sdk_file + return (True, sdk_file) elif application_type in [ ApplicationType.CppCMake, ApplicationType.Binary, @@ -327,7 +339,7 @@ def _get_holoscan_sdk( "Invalid SDK file format, must be a Debian package file with .deb " "file extension." ) - return sdk_file + return (True, sdk_file) raise InvalidSdkError(f"Unknown application type: {application_type.value}") else: @@ -335,7 +347,7 @@ def _get_holoscan_sdk( ApplicationType.PythonModule, ApplicationType.PythonFile, ]: - return Constants.PYPI_INSTALL_SOURCE + return (False, Constants.PYPI_INSTALL_SOURCE) elif application_type in [ ApplicationType.CppCMake, ApplicationType.Binary, @@ -346,13 +358,16 @@ def _get_holoscan_sdk( platform_parameters.platform_config, ) if debian_package_source is not None: - return download_sdk_debian_file( - debian_package_source, - sdk_version, - platform_parameters.platform_arch, - temp_dir, - self._logger, - self._artifact_sources, + return ( + False, + download_sdk_debian_file( + debian_package_source, + sdk_version, + platform_parameters.platform_arch, + temp_dir, + self._logger, + self._artifact_sources, + ), ) else: raise InvalidSdkError( @@ -364,7 +379,7 @@ def _get_holoscan_sdk( def _get_monai_deploy_sdk( self, monai_deploy_app_sdk_version: Optional[str], sdk_file: Optional[Path] = None - ) -> Union[Path, str]: + ) -> Tuple[Union[Path, str]]: """ Validates MONAI Deploy SDK redistributable file if specified. Otherwise, Docker build stage will install the SDK from PyPI. @@ -385,6 +400,6 @@ def _get_monai_deploy_sdk( raise InvalidSdkError( "Invalid SDK file format, must be a PyPI wheel file with .whl file extension." ) - return sdk_file + return (True, sdk_file) - return Constants.PYPI_INSTALL_SOURCE + return (False, None) diff --git a/python/holoscan/cli/packager/sdk_downloader.py b/python/holoscan/cli/packager/sdk_downloader.py index 92e4f126..c5f663c3 100644 --- a/python/holoscan/cli/packager/sdk_downloader.py +++ b/python/holoscan/cli/packager/sdk_downloader.py @@ -59,7 +59,7 @@ def download_health_probe_file( os.mkdir(target_dir) try: - download_url = artifact_sources.health_prob[arch.value][sdk_version] + download_url = artifact_sources.health_probe(sdk_version)[arch.value] logger.info(f"Downloading gRPC health probe from {download_url}...") response = requests.get(download_url) if not response.ok: @@ -114,7 +114,9 @@ def download_sdk_debian_file( "HTTP status {response.status_code}." ) except Exception as ex: - raise InvalidSdkError(f"failed to download SDK from {debian_package_source}", ex) from ex + raise InvalidSdkError( + f"failed to download SDK from {debian_package_source}: {response.reason}." + ) from ex if debian_package_source.endswith(".deb"): filename = Path(debian_package_source).name @@ -132,7 +134,9 @@ def download_sdk_debian_file( logger.info(f"Extracting Debian Package to {unzip_dir}...") z.extractall(unzip_dir) except Exception as ex: - raise InvalidSdkError(f"failed to unzip SDK from {debian_package_source}", ex) from ex + raise InvalidSdkError( + f"failed to unzip SDK from {debian_package_source}: {response.reason}." + ) from ex for file in os.listdir(unzip_dir): if file.endswith(".deb"): diff --git a/python/holoscan/cli/packager/templates/Dockerfile.jinja2 b/python/holoscan/cli/packager/templates/Dockerfile.jinja2 index 7bd1237a..9616dd9f 100644 --- a/python/holoscan/cli/packager/templates/Dockerfile.jinja2 +++ b/python/holoscan/cli/packager/templates/Dockerfile.jinja2 @@ -87,21 +87,23 @@ RUN apt-get update \ ENV PYTHONPATH="{{ app_dir }}:$PYTHONPATH" {% endif %} - {% if application_type == 'CppCMake' or application_type == 'Binary' %} - + {% if custom_base_image == True or custom_holoscan_sdk == True %} # Update NV GPG repo key # https://developer.nvidia.com/blog/updating-the-cuda-linux-gpg-repository-key/ - RUN if [ $(uname -m) = "aarch64" ]; then ARCH=sbsa; else ARCH=x86_64; fi \ && curl -OL https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/$ARCH/cuda-keyring_1.0-1_all.deb \ && dpkg -i cuda-keyring_1.0-1_all.deb +# Use user-specified Holoscan SDK Debian Package COPY ./{{ holoscan_sdk_filename }} /tmp/{{ holoscan_sdk_filename }} RUN apt-get update \ && apt-get install -y /tmp/{{ holoscan_sdk_filename }} \ && rm -rf /var/lib/apt/lists/* - + {% else %} +# Use embedded Holoscan SDK +{# no op here #} + {% endif %} {% endif %} RUN groupadd -f -g $GID $UNAME @@ -120,8 +122,9 @@ RUN chmod +x {{ working_dir }}/tools # Copy gRPC health probe {% if health_probe is defined %} -COPY ./grpc_health_probe /bin/grpc_health_probe -RUN chmod +x /bin/grpc_health_probe + +RUN curl -L -o /bin/grpc_health_probe {{ health_probe | pprint }} \ + && chmod +x /bin/grpc_health_probe && ls -l /bin/grpc_health_probe HEALTHCHECK --interval=10s --timeout=1s \ CMD /bin/grpc_health_probe -addr=:8777 || exit 1 @@ -137,29 +140,35 @@ COPY ./pip/requirements.txt /tmp/requirements.txt RUN pip install --upgrade pip RUN pip install --no-cache-dir --user -r /tmp/requirements.txt -# Install Holoscan from PyPI only when sdk_type is Holoscan. -# For MONAI Deploy, the APP SDK will install it unless user specifies the Holoscan SDK file. -{% if holoscan_sdk_filename == 'pypi.org' and sdk_type == 'holoscan' %} -# Install Holoscan from PyPI org -RUN pip install holoscan=={{ holoscan_sdk_version }} -{% elif sdk_type == 'holoscan' %} -# Copy user-specified Holoscan SDK file +{# Use Holoscan SDK in the image unless specified by the user. +For MONAI Deploy, the APP SDK will install it unless user specifies the Holoscan SDK file. #} +{% if sdk_type == 'holoscan' %} + + {% if custom_holoscan_sdk == True %} +# Copy user-specified Holoscan SDK wheel file COPY ./{{ holoscan_sdk_filename }} /tmp/{{ holoscan_sdk_filename }} RUN pip install /tmp/{{ holoscan_sdk_filename }} -{% endif %} - -{% if monai_deploy_sdk_filename == 'pypi.org' %} -# Install MONAI Deploy from PyPI org -RUN pip install monai-deploy-app-sdk=={{ monai_deploy_app_sdk_version }} -{% elif sdk_type == 'monai-deploy' %} + {% elif custom_base_image == True %} +# Install Holoscan SDK wheel from PyPI +RUN pip install holoscan=={{ holoscan_sdk_version }} + {% else %} +# Use embedded Holoscan SDK +{# no op here #} + {% endif %} +{% else %} +# MONAI Deploy + + {% if custom_monai_deploy_sdk %} # Copy user-specified MONAI Deploy SDK file COPY ./{{ monai_deploy_sdk_filename }} /tmp/{{ monai_deploy_sdk_filename }} RUN pip install /tmp/{{ monai_deploy_sdk_filename }} -{% endif %} + {% else %} +# Install MONAI Deploy from PyPI org +RUN pip install monai-deploy-app-sdk=={{ monai_deploy_app_sdk_version }} + {% endif %} {% endif %} - - +{% endif %} {% if models is defined %} COPY ./models {{ models_dir }} diff --git a/python/holoscan/cli/runner/run_command.py b/python/holoscan/cli/runner/run_command.py index 3d8c0d57..1e6eed88 100644 --- a/python/holoscan/cli/runner/run_command.py +++ b/python/holoscan/cli/runner/run_command.py @@ -153,7 +153,7 @@ def create_run_parser( advanced_group.add_argument( "--shm-size", dest="shm_size", - help="sets the size of /dev/shm. The format is " + help="set the size of /dev/shm. The format is " "[MB|m|GB|g|Mi|MiB|Gi|GiB]. " "Use 'config' to read the shared memory value defined in the app.json manifest. " "If not specified, the container is launched using '--ipc=host' with host system's " @@ -164,7 +164,7 @@ def create_run_parser( dest="terminal", action="store_true", default=False, - help="enters terminal with all configured volume mappings and environment variables. " + help="enter terminal with all configured volume mappings and environment variables. " "(default: False)", ) advanced_group.add_argument( @@ -178,19 +178,27 @@ def create_run_parser( --device ajantv0 ajantv1 to mount AJA capture card 0 and 1. --device video1 to mount V4L2 video device 1. """, ) + advanced_group.add_argument( + "--gpus", + dest="gpus", + help="""Override the value of NVIDIA_VISIBLE_DEVICES environment variable. + default: the value specified in the package manifest file or 'all' if not specified. + Refer to https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/docker-specialized.html#gpu-enumeration + for all available options.""", + ) user_group = parser.add_argument_group(title="security options") user_group.add_argument( "--uid", type=str, default=os.getuid(), - help=f"runs the container with the UID. (default:{os.getuid()})", + help=f"run the container with the UID. (default:{os.getuid()})", ) user_group.add_argument( "--gid", type=str, default=os.getgid(), - help=f"runs the container with the GID. (default:{os.getgid()})", + help=f"run the container with the GID. (default:{os.getgid()})", ) return parser diff --git a/python/holoscan/cli/runner/runner.py b/python/holoscan/cli/runner/runner.py index 852074fb..bebc3ee5 100644 --- a/python/holoscan/cli/runner/runner.py +++ b/python/holoscan/cli/runner/runner.py @@ -100,6 +100,7 @@ def _run_app(args: Namespace, app_info: dict, pkg_info: dict): network: str = create_or_use_network(args.network, map_name) nic: str = args.nic if args.nic else None use_all_nics: bool = args.use_all_nics + gpus: str = args.gpus if args.gpus else None config: Path = args.config if args.config else None address: str = args.address if args.address else None worker_address: str = args.worker_address if args.worker_address else None @@ -151,6 +152,7 @@ def _run_app(args: Namespace, app_info: dict, pkg_info: dict): network, nic, use_all_nics, + gpus, config, render, user, @@ -243,14 +245,18 @@ def _pkg_specific_dependency_verification(pkg_info: dict) -> bool: """Checks for any package specific dependencies. Currently it verifies the following dependencies: - * If gpu has been requested by the application, verify that nvidia-docker is installed. - + * If gpu has been requested by the application, verify that nvidia-ctk is installed. + Note: when running inside a Docker container, always assume nvidia-ctk is installed. Args: pkg_info: package manifest as a python dict Returns: True if all dependencies are satisfied, otherwise False. """ + if os.path.exists("/.dockerenv"): + logger.info("--> Skipping nvidia-ctk check inside Docker...\n") + return True + requested_gpus = get_requested_gpus(pkg_info) if requested_gpus > 0: # check for NVIDIA Container TOolkit diff --git a/python/holoscan/conditions/conditions.cpp b/python/holoscan/conditions/conditions.cpp index 03e15d5d..b2a74ef9 100644 --- a/python/holoscan/conditions/conditions.cpp +++ b/python/holoscan/conditions/conditions.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -47,10 +47,10 @@ PYBIND11_MODULE(_conditions, m) { m.attr("__version__") = "dev"; #endif -init_boolean(m); -init_count(m); -init_periodic(m); -init_downstream_message_affordable(m); -init_message_available(m); + init_boolean(m); + init_count(m); + init_periodic(m); + init_downstream_message_affordable(m); + init_message_available(m); } // PYBIND11_MODULE } // namespace holoscan diff --git a/python/holoscan/conditions/message_available.cpp b/python/holoscan/conditions/message_available.cpp index 7e3fcc09..fa65979b 100644 --- a/python/holoscan/conditions/message_available.cpp +++ b/python/holoscan/conditions/message_available.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -55,7 +55,7 @@ class PyMessageAvailableCondition : public MessageAvailableCondition { // Define a constructor that fully initializes the object. PyMessageAvailableCondition(Fragment* fragment, // std::shared_ptr receiver, - size_t min_size = 1UL, size_t front_stage_max_size = 1UL, + uint64_t min_size = 1UL, size_t front_stage_max_size = 1UL, const std::string& name = "noname_message_available_condition") : MessageAvailableCondition( ArgList{Arg{"min_size", min_size}, Arg{"front_stage_max_size", front_stage_max_size}}) { @@ -73,7 +73,7 @@ void init_message_available(py::module_& m) { gxf::GXFCondition, std::shared_ptr>( m, "MessageAvailableCondition", doc::MessageAvailableCondition::doc_MessageAvailableCondition) - .def(py::init(), + .def(py::init(), "fragment"_a, "min_size"_a = 1UL, "front_stage_max_size"_a = 1UL, diff --git a/python/holoscan/core/application.cpp b/python/holoscan/core/application.cpp index 5291bd1c..47b75b26 100644 --- a/python/holoscan/core/application.cpp +++ b/python/holoscan/core/application.cpp @@ -116,4 +116,88 @@ void init_application(py::module_& m) { R"doc(Return repr(self).)doc"); } +py::list PyApplication::py_argv() { + py::list argv; + // In Python, `sys.argv` returns `['']` if there are no arguments (i.e., when just `python` is + // called). We'll do the same here. + if (argv_.empty()) { + argv.append(py::cast("", py::return_value_policy::reference)); + return argv; + } + + for (auto iter = std::next(argv_.begin()); iter != argv_.end(); ++iter) { + argv.append(py::cast(*iter, py::return_value_policy::reference)); + } + + if (argv.empty()) { argv.append(py::cast("", py::return_value_policy::reference)); } + return argv; +} + +void PyApplication::add_operator(const std::shared_ptr& op) { + /* , , , */ + PYBIND11_OVERRIDE(void, Application, add_operator, op); +} + +void PyApplication::add_flow(const std::shared_ptr& upstream_op, + const std::shared_ptr& downstream_op) { + /* , , , */ + PYBIND11_OVERRIDE(void, Application, add_flow, upstream_op, downstream_op); +} + +void PyApplication::add_flow(const std::shared_ptr& upstream_op, + const std::shared_ptr& downstream_op, + std::set> io_map) { + /* , , , */ + PYBIND11_OVERRIDE(void, Application, add_flow, upstream_op, downstream_op, io_map); +} + +void PyApplication::add_flow(const std::shared_ptr& upstream_frag, + const std::shared_ptr& downstream_frag, + std::set> port_pairs) { + /* , , , */ + PYBIND11_OVERRIDE(void, Application, add_flow, upstream_frag, downstream_frag, port_pairs); +} + +void PyApplication::compose() { + /* , , , */ + PYBIND11_OVERRIDE(void, Application, compose); +} + +void PyApplication::run() { + // Create a deleter for DLManagedTensor objects so that they can be deleted in a separate thread + // to avoid blocking the GXF runtime mutex. + LazyDLManagedTensorDeleter deleter; + + // Get the trace and profile functions from sys + { + pybind11::gil_scoped_acquire gil; + + auto sys_module = py::module::import("sys"); + + // Note that when cProfile is used, the profile_func_ is a cProfile.Profile object, not a + // function. If the return value of getprofile() is not a function, we need to use the + // existing c_profilefunc_ and c_profileobj_ instead of calling sys.setprofile() with + // profile_func_. + py_profile_func_ = sys_module.attr("getprofile")(); + py_trace_func_ = sys_module.attr("gettrace")(); + + auto py_thread_state = _PyThreadState_UncheckedGet(); + c_profilefunc_ = py_thread_state->c_profilefunc; + c_profileobj_ = py_thread_state->c_profileobj; + c_tracefunc_ = py_thread_state->c_tracefunc; + c_traceobj_ = py_thread_state->c_traceobj; + +#if PY_VERSION_HEX >= 0x030b0000 // >= Python 3.11.0 + // _PyInterpreterFrame* + py_last_frame_ = py_thread_state->cframe->current_frame; +#else + // PyFrameObject* + py_last_frame_ = py_thread_state->frame; // = PyEval_GetFrame(); +#endif + } + + /* , , , */ + PYBIND11_OVERRIDE(void, Application, run); +} + } // namespace holoscan diff --git a/python/holoscan/core/application.hpp b/python/holoscan/core/application.hpp index ea19a193..4f2c50c9 100644 --- a/python/holoscan/core/application.hpp +++ b/python/holoscan/core/application.hpp @@ -67,93 +67,27 @@ class PyApplication : public Application { * @param obj PyApplication object. * @return The argv_ as a Python list, discarding the first element. */ - py::list py_argv() { - py::list argv; - // In Python, `sys.argv` returns `['']` if there are no arguments (i.e., when just `python` is - // called). We'll do the same here. - if (argv_.empty()) { - argv.append(py::cast("", py::return_value_policy::reference)); - return argv; - } - - for (auto iter = std::next(argv_.begin()); iter != argv_.end(); ++iter) { - argv.append(py::cast(*iter, py::return_value_policy::reference)); - } - - if (argv.empty()) { argv.append(py::cast("", py::return_value_policy::reference)); } - return argv; - } + py::list py_argv(); /* Trampolines (need one for each virtual function) */ - void add_operator(const std::shared_ptr& op) override { - /* , , , */ - PYBIND11_OVERRIDE(void, Application, add_operator, op); - } + void add_operator(const std::shared_ptr& op) override; void add_flow(const std::shared_ptr& upstream_op, - const std::shared_ptr& downstream_op) override { - /* , , , */ - PYBIND11_OVERRIDE(void, Application, add_flow, upstream_op, downstream_op); - } + const std::shared_ptr& downstream_op) override; void add_flow(const std::shared_ptr& upstream_op, const std::shared_ptr& downstream_op, - std::set> io_map) override { - /* , , , */ - PYBIND11_OVERRIDE(void, Application, add_flow, upstream_op, downstream_op, io_map); - } + std::set> io_map) override; void add_flow(const std::shared_ptr& upstream_frag, const std::shared_ptr& downstream_frag, - std::set> port_pairs) override { - /* , , , */ - PYBIND11_OVERRIDE(void, Application, add_flow, upstream_frag, downstream_frag, port_pairs); - } - void compose() override { - /* , , , */ - PYBIND11_OVERRIDE(void, Application, compose); - } - void run() override { - // Create a deleter for DLManagedTensor objects so that they can be deleted in a separate thread - // to avoid blocking the GXF runtime mutex. - LazyDLManagedTensorDeleter deleter; - - // Get the trace and profile functions from sys - { - pybind11::gil_scoped_acquire gil; - - auto sys_module = py::module::import("sys"); - - // Note that when cProfile is used, the profile_func_ is a cProfile.Profile object, not a - // function. If the return value of getprofile() is not a function, we need to use the - // existing c_profilefunc_ and c_profileobj_ instead of calling sys.setprofile() with - // profile_func_. - py_profile_func_ = sys_module.attr("getprofile")(); - py_trace_func_ = sys_module.attr("gettrace")(); - - auto py_thread_state = _PyThreadState_UncheckedGet(); - c_profilefunc_ = py_thread_state->c_profilefunc; - c_profileobj_ = py_thread_state->c_profileobj; - c_tracefunc_ = py_thread_state->c_tracefunc; - c_traceobj_ = py_thread_state->c_traceobj; - -#if PY_VERSION_HEX >= 0x030b0000 // >= Python 3.11.0 - py_last_frame_ = py_thread_state->cframe->current_frame; -#else - py_last_frame_ = py_thread_state->frame; // = PyEval_GetFrame(); -#endif - } - - /* , , , */ - PYBIND11_OVERRIDE(void, Application, run); - } + std::set> port_pairs) override; + void compose() override; + void run() override; private: friend class PyOperator; // Fake frame object for the last python frame (where Application.run() was called). -#if PY_VERSION_HEX >= 0x030b0000 // >= Python 3.11.0 - _PyInterpreterFrame* py_last_frame_ = nullptr; -#else - PyFrameObject* py_last_frame_ = nullptr; -#endif + // Actual type is either _PyInterpreterFrame* (PY_VERSION_HEX >= 0x030b0000) or PyFrameObject*. + void* py_last_frame_ = nullptr; // Trace/profile functions // - Retain a reference to the Python trace/profile function if available via diff --git a/python/holoscan/core/application_pydoc.hpp b/python/holoscan/core/application_pydoc.hpp index c21eb0ef..42f4be1d 100644 --- a/python/holoscan/core/application_pydoc.hpp +++ b/python/holoscan/core/application_pydoc.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/python/holoscan/core/arg.cpp b/python/holoscan/core/arg.cpp index 5dc324ed..8e51c859 100644 --- a/python/holoscan/core/arg.cpp +++ b/python/holoscan/core/arg.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -148,7 +148,7 @@ void init_arg(py::module_& m) { .def_property_readonly("description", &Arg::description, doc::Arg::doc_description) .def( "__repr__", - // use py::object and obj.cast to avoid a segfault if object has not been initialized + // use py::object and obj.cast to avoid a segfault if object has not been initialized [](const Arg& arg) { return arg.description(); }, R"doc(Return repr(self).)doc"); @@ -166,7 +166,7 @@ void init_arg(py::module_& m) { .def_property_readonly("description", &ArgList::description, doc::ArgList::doc_description) .def( "__repr__", - // use py::object and obj.cast to avoid a segfault if object has not been initialized + // use py::object and obj.cast to avoid a segfault if object has not been initialized [](const ArgList& list) { return list.description(); }, R"doc(Return repr(self).)doc"); @@ -183,7 +183,7 @@ void init_arg(py::module_& m) { .def_property_readonly("to_string", &ArgType::to_string, doc::ArgType::doc_to_string) .def( "__repr__", - // use py::object and obj.cast to avoid a segfault if object has not been initialized + // use py::object and obj.cast to avoid a segfault if object has not been initialized [](const ArgType& t) { return t.to_string(); }, R"doc(Return repr(self).)doc"); // Register argument setter and gxf parameter adaptor for py::object diff --git a/python/holoscan/core/arg.hpp b/python/holoscan/core/arg.hpp index 77734867..bb2654d8 100644 --- a/python/holoscan/core/arg.hpp +++ b/python/holoscan/core/arg.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -30,33 +30,6 @@ namespace holoscan { void init_arg(py::module_&); -static const std::unordered_map element_type_namemap{ - {ArgElementType::kCustom, "CUSTOM"}, - {ArgElementType::kBoolean, "BOOLEAN"}, - {ArgElementType::kInt8, "INT8"}, - {ArgElementType::kUnsigned8, "UNSIGNED8"}, - {ArgElementType::kInt16, "INT16"}, - {ArgElementType::kUnsigned16, "UNSIGNED16"}, - {ArgElementType::kInt32, "INT32"}, - {ArgElementType::kUnsigned32, "UNSIGNED32"}, - {ArgElementType::kInt64, "INT64"}, - {ArgElementType::kUnsigned64, "UNSIGNED64"}, - {ArgElementType::kFloat32, "FLOAT32"}, - {ArgElementType::kFloat64, "FLOAT64"}, - {ArgElementType::kString, "STRING"}, - {ArgElementType::kHandle, "HANDLE"}, - {ArgElementType::kYAMLNode, "YAML_NODE"}, - {ArgElementType::kIOSpec, "IO_SPEC"}, - {ArgElementType::kCondition, "CONDITION"}, - {ArgElementType::kResource, "RESOURCE"}, -}; - -static const std::unordered_map container_type_namemap{ - {ArgContainerType::kNative, "NATIVE"}, - {ArgContainerType::kVector, "VECTOR"}, - {ArgContainerType::kArray, "ARRAY"}, -}; - } // namespace holoscan #endif /* PYBIND11_CORE_ARG_HPP */ diff --git a/python/holoscan/core/component.cpp b/python/holoscan/core/component.cpp index d8e968ad..6aa59a21 100644 --- a/python/holoscan/core/component.cpp +++ b/python/holoscan/core/component.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -32,6 +32,18 @@ namespace py = pybind11; namespace holoscan { +class PyComponentBase : public ComponentBase { + public: + /* Inherit the constructors */ + using ComponentBase::ComponentBase; + + /* Trampolines (need one for each virtual function) */ + void initialize() override { + /* , , , */ + PYBIND11_OVERRIDE(void, ComponentBase, initialize); + } +}; + class PyComponent : public Component { public: /* Inherit the constructors */ @@ -56,39 +68,43 @@ void init_component(py::module_& m) { "description", &ComponentSpec::description, doc::ComponentSpec::doc_description) .def( "__repr__", - // use py::object and obj.cast to avoid a segfault if object has not been initialized + // use py::object and obj.cast to avoid a segfault if object has not been initialized [](const ComponentSpec& spec) { return spec.description(); }, R"doc(Return repr(self).)doc"); - py::class_>( - m, "Component", doc::Component::doc_Component) + py::class_>( + m, "ComponentBase", doc::Component::doc_Component) .def(py::init<>(), doc::Component::doc_Component) - .def_property_readonly("id", &Component::id, doc::Component::doc_id) - .def_property_readonly("name", &Component::name, doc::Component::doc_name) - .def_property_readonly("fragment", &Component::fragment, doc::Component::doc_fragment) + .def_property_readonly("id", &ComponentBase::id, doc::Component::doc_id) + .def_property_readonly("name", &ComponentBase::name, doc::Component::doc_name) + .def_property_readonly("fragment", &ComponentBase::fragment, doc::Component::doc_fragment) .def("add_arg", - py::overload_cast(&Component::add_arg), + py::overload_cast(&ComponentBase::add_arg), "arg"_a, doc::Component::doc_add_arg_Arg) .def("add_arg", - py::overload_cast(&Component::add_arg), + py::overload_cast(&ComponentBase::add_arg), "arg"_a, doc::Component::doc_add_arg_ArgList) - .def_property_readonly("args", &Component::args, doc::Component::doc_args) + .def_property_readonly("args", &ComponentBase::args, doc::Component::doc_args) .def("initialize", - &Component::initialize, + &ComponentBase::initialize, doc::Component::doc_initialize) // note: virtual function .def_property_readonly( - "description", &Component::description, doc::Component::doc_description) + "description", &ComponentBase::description, doc::Component::doc_description) .def( "__repr__", [](const py::object& obj) { // use py::object and obj.cast to avoid a segfault if object has not been initialized - auto component = obj.cast>(); + auto component = obj.cast>(); if (component) { return component->description(); } return std::string(""); }, R"doc(Return repr(self).)doc"); + + py::class_>( + m, "Component", doc::Component::doc_Component) + .def(py::init<>(), doc::Component::doc_Component); } } // namespace holoscan diff --git a/python/holoscan/core/component_pydoc.hpp b/python/holoscan/core/component_pydoc.hpp index 10d4c1d0..551210d3 100644 --- a/python/holoscan/core/component_pydoc.hpp +++ b/python/holoscan/core/component_pydoc.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -80,7 +80,7 @@ name : holoscan.core.Fragment PYDOC(id, R"doc( The identifier of the component. -The identifier is initially set to -1, and will become a valid value when the +The identifier is initially set to ``-1``, and will become a valid value when the component is initialized. With the default executor (`holoscan.gxf.GXFExecutor`), the identifier is set to the GXF diff --git a/python/holoscan/core/core.hpp b/python/holoscan/core/core.hpp index 0aa1411e..28e15691 100644 --- a/python/holoscan/core/core.hpp +++ b/python/holoscan/core/core.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -45,15 +45,6 @@ void init_application(py::module_&); void init_data_flow_tracker(py::module_&); void init_cli(py::module_&); -// TODO: remove this unused function -template -std::vector get_names_from_map(ObjT& map_obj) { - std::vector names; - names.reserve(map_obj.size()); - for (auto& i : map_obj) { names.push_back(i.first); } - return names; -} - } // namespace holoscan #endif /* PYBIND11_CORE_CORE_HPP */ diff --git a/python/holoscan/core/dl_converter.cpp b/python/holoscan/core/dl_converter.cpp index 8c4dc21d..ccae260c 100644 --- a/python/holoscan/core/dl_converter.cpp +++ b/python/holoscan/core/dl_converter.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -27,17 +27,42 @@ #include "holoscan/core/common.hpp" #include "holoscan/core/domain/tensor.hpp" +#include "gxf/std/dlpack_utils.hpp" // nvidia::gxf::numpyTypestr + +namespace { + +// A macro like CHECK_CUDA_ERROR from gxf/cuda/cuda_common.h, but it uses Holoscan-style +// logging and throws an exception instead of returning an nvidia::gxf::Unexpected. +#define CHECK_CUDA_THROW_ERROR(cu_result, stmt, ...) \ + do { \ + cudaError_t err = (cu_result); \ + if (err != cudaSuccess) { \ + HOLOSCAN_LOG_ERROR("Runtime call {} in line {} of file {} failed with '{}' ({})", \ + #stmt, \ + __LINE__, \ + __FILE__, \ + cudaGetErrorString(err), \ + err); \ + throw std::runtime_error("Error occurred in CUDA runtime API call"); \ + } \ + } while (0) + +} // namespace namespace holoscan { -void set_array_interface(const py::object& obj, std::shared_ptr ctx) { +void set_array_interface(const py::object& obj, std::shared_ptr ctx) { DLTensor& dl_tensor = ctx->tensor.dl_tensor; if (dl_tensor.data) { // Prepare the array interface items // Main items - const char* type_str = numpy_dtype(dl_tensor.dtype); + auto maybe_type_str = nvidia::gxf::numpyTypestr(dl_tensor.dtype); + if (!maybe_type_str) { + throw std::runtime_error("Unable to determine NumPy dtype from DLPack tensor"); + } + const char* type_str = maybe_type_str.value(); py::tuple shape = array2pytuple(dl_tensor.shape, dl_tensor.ndim); py::str typestr = py::str(type_str); py::tuple data = pybind11::make_tuple(py::int_(reinterpret_cast(dl_tensor.data)), @@ -139,10 +164,19 @@ py::capsule py_dlpack(Tensor* tensor, py::object stream) { // Wait for the current stream to finish before the provided stream starts consuming the memory. if (stream_id >= 0 && curr_stream_ptr != stream_ptr) { cudaEvent_t curr_stream_event; - cudaEventCreateWithFlags(&curr_stream_event, cudaEventDisableTiming); - cudaEventRecord(curr_stream_event, curr_stream_ptr); - cudaStreamWaitEvent(stream_ptr, curr_stream_event, 0); - cudaEventDestroy(curr_stream_event); + cudaError_t cuda_status; + + cuda_status = cudaEventCreateWithFlags(&curr_stream_event, cudaEventDisableTiming); + CHECK_CUDA_THROW_ERROR(cuda_status, "Failure during call to cudaEventCreateWithFlags"); + + cuda_status = cudaEventRecord(curr_stream_event, curr_stream_ptr); + CHECK_CUDA_THROW_ERROR(cuda_status, "Failure during call to cudaEventRecord"); + + cuda_status = cudaStreamWaitEvent(stream_ptr, curr_stream_event, 0); + CHECK_CUDA_THROW_ERROR(cuda_status, "Failure during call to cudaStreamWaitEvent"); + + cuda_status = cudaEventDestroy(curr_stream_event); + CHECK_CUDA_THROW_ERROR(cuda_status, "Failure during call to cudaEventDestroy"); } DLManagedTensor* dl_managed_tensor = tensor->to_dlpack(); diff --git a/python/holoscan/core/dl_converter.hpp b/python/holoscan/core/dl_converter.hpp index a51b70bf..d8c756d9 100644 --- a/python/holoscan/core/dl_converter.hpp +++ b/python/holoscan/core/dl_converter.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -38,9 +38,6 @@ namespace py = pybind11; namespace holoscan { -// Forward declaration -class DLManagedTensorCtx; - /** * @brief Structure to hold the context of a DLManagedTensor. * @@ -61,7 +58,7 @@ struct ArrayInterfaceMemoryBuffer { * @param obj The Python object to set the array interface object. * @param ctx The context of the DLManagedTensor. */ -void set_array_interface(const py::object& obj, std::shared_ptr ctx); +void set_array_interface(const py::object& obj, std::shared_ptr ctx); /** * @brief Provide `__dlpack__` method diff --git a/python/holoscan/core/execution_context.cpp b/python/holoscan/core/execution_context.cpp index 1e950c0f..7c5770cd 100644 --- a/python/holoscan/core/execution_context.cpp +++ b/python/holoscan/core/execution_context.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -39,4 +39,22 @@ void init_execution_context(py::module_& m) { .def_property_readonly("input", &PyExecutionContext::py_input) .def_property_readonly("output", &PyExecutionContext::py_output); } + +PyExecutionContext::PyExecutionContext(gxf_context_t context, + std::shared_ptr& py_input_context, + std::shared_ptr& py_output_context, + py::object op) + : gxf::GXFExecutionContext(context, py_input_context, py_output_context), + py_op_(op), + py_input_context_(py_input_context), + py_output_context_(py_output_context) {} + +std::shared_ptr PyExecutionContext::py_input() const { + return py_input_context_; +} + +std::shared_ptr PyExecutionContext::py_output() const { + return py_output_context_; +} + } // namespace holoscan diff --git a/python/holoscan/core/execution_context.hpp b/python/holoscan/core/execution_context.hpp index e2cce8d1..974c8cca 100644 --- a/python/holoscan/core/execution_context.hpp +++ b/python/holoscan/core/execution_context.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -38,15 +38,11 @@ class PyExecutionContext : public gxf::GXFExecutionContext { PyExecutionContext(gxf_context_t context, std::shared_ptr& py_input_context, std::shared_ptr& py_output_context, - py::object op = py::none()) - : gxf::GXFExecutionContext(context, py_input_context, py_output_context), - py_op_(op), - py_input_context_(py_input_context), - py_output_context_(py_output_context) {} + py::object op = py::none()); - std::shared_ptr py_input() const { return py_input_context_; } + std::shared_ptr py_input() const; - std::shared_ptr py_output() const { return py_output_context_; } + std::shared_ptr py_output() const; private: py::object py_op_ = py::none(); diff --git a/python/holoscan/core/fragment.cpp b/python/holoscan/core/fragment.cpp index 43a70540..40668149 100644 --- a/python/holoscan/core/fragment.cpp +++ b/python/holoscan/core/fragment.cpp @@ -154,4 +154,42 @@ void init_fragment(py::module_& m) { R"doc(Return repr(self).)doc"); } +PyFragment::PyFragment(py::object op) : Fragment() { + py::gil_scoped_acquire scope_guard; + py_compose_ = py::getattr(op, "compose"); +} + +void PyFragment::add_operator(const std::shared_ptr& op) { + /* , , , */ + PYBIND11_OVERRIDE(void, Fragment, add_operator, op); +} + +void PyFragment::add_flow(const std::shared_ptr& upstream_op, + const std::shared_ptr& downstream_op) { + /* , , , */ + PYBIND11_OVERRIDE(void, Fragment, add_flow, upstream_op, downstream_op); +} + +void PyFragment::add_flow(const std::shared_ptr& upstream_op, + const std::shared_ptr& downstream_op, + std::set> io_map) { + /* , , , */ + PYBIND11_OVERRIDE(void, Fragment, add_flow, upstream_op, downstream_op, io_map); +} + +void PyFragment::compose() { + /* , , , */ + // PYBIND11_OVERRIDE(void, Fragment, compose); + + // PYBIND11_doesn't work when Fragment object is created during Application::compose(). + // So we take the py::object from the constructor and call it here. + py::gil_scoped_acquire scope_guard; + py_compose_.operator()(); +} + +void PyFragment::run() { + /* , , , */ + PYBIND11_OVERRIDE(void, Fragment, run); +} + } // namespace holoscan diff --git a/python/holoscan/core/fragment.hpp b/python/holoscan/core/fragment.hpp index fbad5b4d..90f89b44 100644 --- a/python/holoscan/core/fragment.hpp +++ b/python/holoscan/core/fragment.hpp @@ -58,41 +58,18 @@ class PyFragment : public Fragment { /* Inherit the constructors */ using Fragment::Fragment; - explicit PyFragment(py::object op) : Fragment() { - py::gil_scoped_acquire scope_guard; - py_compose_ = py::getattr(op, "compose"); - } + explicit PyFragment(py::object op); /* Trampolines (need one for each virtual function) */ - void add_operator(const std::shared_ptr& op) override { - /* , , , */ - PYBIND11_OVERRIDE(void, Fragment, add_operator, op); - } + void add_operator(const std::shared_ptr& op) override; void add_flow(const std::shared_ptr& upstream_op, - const std::shared_ptr& downstream_op) override { - /* , , , */ - PYBIND11_OVERRIDE(void, Fragment, add_flow, upstream_op, downstream_op); - } + const std::shared_ptr& downstream_op) override; void add_flow(const std::shared_ptr& upstream_op, const std::shared_ptr& downstream_op, - std::set> io_map) override { - /* , , , */ - PYBIND11_OVERRIDE(void, Fragment, add_flow, upstream_op, downstream_op, io_map); - } + std::set> io_map) override; - void compose() override { - /* , , , */ - // PYBIND11_OVERRIDE(void, Fragment, compose); - - // PYBIND11_OVERRIDE doesn't work when Fragment object is created during Application::compose(). - // So we take the py::object from the constructor and call it here. - py::gil_scoped_acquire scope_guard; - py_compose_.operator()(); - } - void run() override { - /* , , , */ - PYBIND11_OVERRIDE(void, Fragment, run); - } + void compose() override; + void run() override; private: py::object py_compose_ = py::none(); diff --git a/python/holoscan/core/io_context.cpp b/python/holoscan/core/io_context.cpp index d4d050d8..7edc51e1 100644 --- a/python/holoscan/core/io_context.cpp +++ b/python/holoscan/core/io_context.cpp @@ -23,6 +23,7 @@ #include #include #include +#include #include #include "../gxf/entity.hpp" @@ -32,7 +33,6 @@ #include "holoscan/core/expected.hpp" #include "holoscan/core/io_context.hpp" #include "holoscan/core/domain/tensor.hpp" -#include "holoscan/core/gxf/gxf_tensor.hpp" #include "holoscan/operators/holoviz/holoviz.hpp" #include "io_context_pydoc.hpp" #include "tensor.hpp" // for PyTensor @@ -582,4 +582,15 @@ void init_io_context(py::module_& m) { // register a cloudpickle-based serializer for Python objects register_py_object_codec(); } + +PyInputContext::PyInputContext(ExecutionContext* execution_context, Operator* op, + std::unordered_map>& inputs, + py::object py_op) + : gxf::GXFInputContext::GXFInputContext(execution_context, op, inputs), py_op_(py_op) {} + +PyOutputContext::PyOutputContext(ExecutionContext* execution_context, Operator* op, + std::unordered_map>& outputs, + py::object py_op) + : gxf::GXFOutputContext::GXFOutputContext(execution_context, op, outputs), py_op_(py_op) {} + } // namespace holoscan diff --git a/python/holoscan/core/io_context.hpp b/python/holoscan/core/io_context.hpp index 96cb0415..f018b898 100644 --- a/python/holoscan/core/io_context.hpp +++ b/python/holoscan/core/io_context.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -41,8 +41,8 @@ class PyInputContext : public gxf::GXFInputContext { /* Inherit the constructors */ using gxf::GXFInputContext::GXFInputContext; PyInputContext(ExecutionContext* execution_context, Operator* op, - std::unordered_map>& inputs, py::object py_op) - : gxf::GXFInputContext::GXFInputContext(execution_context, op, inputs), py_op_(py_op) {} + std::unordered_map>& inputs, + py::object py_op); py::object py_receive(const std::string& name); @@ -57,8 +57,7 @@ class PyOutputContext : public gxf::GXFOutputContext { PyOutputContext(ExecutionContext* execution_context, Operator* op, std::unordered_map>& outputs, - py::object py_op) - : gxf::GXFOutputContext::GXFOutputContext(execution_context, op, outputs), py_op_(py_op) {} + py::object py_op); void py_emit(py::object& data, const std::string& name); diff --git a/python/holoscan/core/io_context_pydoc.hpp b/python/holoscan/core/io_context_pydoc.hpp index 8164752a..2337f43f 100644 --- a/python/holoscan/core/io_context_pydoc.hpp +++ b/python/holoscan/core/io_context_pydoc.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -39,7 +39,6 @@ Holoscan native operator. } // namespace Message - namespace InputContext { PYDOC(InputContext, R"doc( diff --git a/python/holoscan/core/io_spec.cpp b/python/holoscan/core/io_spec.cpp index dfb61748..f19f1aca 100644 --- a/python/holoscan/core/io_spec.cpp +++ b/python/holoscan/core/io_spec.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -81,9 +81,8 @@ void init_io_spec(py::module_& m) { [](IOSpec& io_spec, std::shared_ptr connector) { return io_spec.connector(connector); }) - .def( - "__repr__", - // use py::object and obj.cast to avoid a segfault if object has not been initialized - [](const IOSpec& iospec) { return iospec.description(); }); + .def("__repr__", + // use py::object and obj.cast to avoid a segfault if object has not been initialized + [](const IOSpec& iospec) { return iospec.description(); }); } } // namespace holoscan diff --git a/python/holoscan/core/kwarg_handling.cpp b/python/holoscan/core/kwarg_handling.cpp index c9a8bcd3..e33a0b1e 100644 --- a/python/holoscan/core/kwarg_handling.cpp +++ b/python/holoscan/core/kwarg_handling.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,9 +15,9 @@ * limitations under the License. */ -#include // py::dtype, py::array +#include // py::dtype, py::array #include -#include // needed for py::cast to work with std::vector types +#include // needed for py::cast to work with std::vector types #include #include @@ -29,6 +29,7 @@ #include "holoscan/core/condition.hpp" #include "holoscan/core/io_spec.hpp" #include "holoscan/core/resource.hpp" +#include "holoscan/operators/aja_source/ntv2channel.hpp" #include "kwarg_handling.hpp" #include "kwarg_handling_pydoc.hpp" @@ -249,6 +250,10 @@ py::object yaml_node_to_py_object(YAML::Node node) { } // Check if it is a string. { + // special case for string -> AJASourceOp NTV2Channel enum + NTV2Channel aja_t; + if (YAML::convert::decode(node, aja_t)) { return py::cast(aja_t); } + std::string t; if (YAML::convert::decode(node, t)) { return py::str(t); } } diff --git a/python/holoscan/core/kwarg_handling.hpp b/python/holoscan/core/kwarg_handling.hpp index 9f34c26a..c23dfc24 100644 --- a/python/holoscan/core/kwarg_handling.hpp +++ b/python/holoscan/core/kwarg_handling.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,7 +18,7 @@ #ifndef HOLOSCAN_PYTHON_PYBIND11_CORE_KWARG_HANDLING_HPP #define HOLOSCAN_PYTHON_PYBIND11_CORE_KWARG_HANDLING_HPP -#include // py::array, py::dtype +#include // py::array, py::dtype #include #include diff --git a/python/holoscan/core/operator.cpp b/python/holoscan/core/operator.cpp index 49ea328b..e4b53cc5 100644 --- a/python/holoscan/core/operator.cpp +++ b/python/holoscan/core/operator.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,12 +20,11 @@ #include #include // for unordered_map -> dict, etc. +#include #include #include #include -#include "kwarg_handling.hpp" -#include "operator_pydoc.hpp" #include "gil_guarded_pyobject.hpp" #include "holoscan/core/component.hpp" #include "holoscan/core/component_spec.hpp" @@ -36,6 +35,8 @@ #include "holoscan/core/operator.hpp" #include "holoscan/core/operator_spec.hpp" #include "holoscan/core/resource.hpp" +#include "kwarg_handling.hpp" +#include "operator_pydoc.hpp" using std::string_literals::operator""s; using pybind11::literals::operator""_a; @@ -100,7 +101,7 @@ void init_operator(py::module_& m) { doc::OperatorSpec::doc_param); // note: added py::dynamic_attr() to allow dynamically adding attributes in a Python subclass - py::class_> operator_class( + py::class_> operator_class( m, "Operator", py::dynamic_attr(), doc::Operator::doc_Operator_args_kwargs); operator_class @@ -114,7 +115,8 @@ void init_operator(py::module_& m) { "fragment", py::overload_cast<>(&Operator::fragment), doc::Operator::doc_fragment) .def_property("spec", &Operator::spec_shared, - py::overload_cast&>(&Operator::spec)) + py::overload_cast&>(&Operator::spec), + doc::Operator::doc_spec) .def_property_readonly("conditions", &Operator::conditions, doc::Operator::doc_conditions) .def_property_readonly("resources", &Operator::resources, doc::Operator::doc_resources) .def_property_readonly( @@ -165,12 +167,418 @@ void init_operator(py::module_& m) { // use py::object and obj.cast to avoid a segfault if object has not been initialized auto op = obj.cast>(); if (op) { return op->description(); } - return std::string(""); + return std::string(""); }, R"doc(Return repr(self).)doc"); - py::enum_(operator_class, "OperatorType") + py::enum_( + operator_class, "OperatorType", doc::OperatorType::doc_OperatorType) .value("NATIVE", Operator::OperatorType::kNative) - .value("GXF", Operator::OperatorType::kGXF); + .value("GXF", Operator::OperatorType::kGXF) + .value("VIRTUAL", Operator::OperatorType::kVirtual); +} + +PyOperatorSpec::PyOperatorSpec(Fragment* fragment, py::object op) + : OperatorSpec(fragment), py_op_(op) {} + +void PyOperatorSpec::py_param(const std::string& name, const py::object& default_value, + const ParameterFlag& flag, const py::kwargs& kwargs) { + using std::string_literals::operator""s; + + bool is_receivers = false; + std::string headline{""s}; + std::string description{""s}; + for (const auto& [name, value] : kwargs) { + std::string param_name = name.cast(); + if (param_name == "headline") { + headline = value.cast(); + } else if (param_name == "description") { + description = value.cast(); + } else if (param_name == "kind") { + auto kind_val = value.cast(); + if (kind_val == "receivers") { + is_receivers = true; + } else { + throw std::runtime_error("unknown kind: '"s + kind_val + + "'. Only `kind='receivers'` is currently supported."s); + } + } else { + throw std::runtime_error("unsupported kwarg: "s + param_name); + } + } + + if (is_receivers) { + // Create receivers object + py_receivers_params_.emplace_back(); + + // Register parameter + auto& parameter = py_receivers_params_.back(); + param(parameter, name.c_str(), headline.c_str(), description.c_str(), {}, flag); + } else { + // Create parameter object + py_params_.emplace_back(py_op()); + + // Register parameter + auto& parameter = py_params_.back(); + param(parameter, name.c_str(), headline.c_str(), description.c_str(), default_value, flag); + } +} + +py::object PyOperatorSpec::py_op() const { + return py_op_; +} + +std::list>& PyOperatorSpec::py_params() { + return py_params_; +} + +std::list>>& PyOperatorSpec::py_receivers() { + return py_receivers_params_; +} + +// PyOperator + +PyOperator::PyOperator(py::object op, Fragment* fragment, const py::args& args, + const py::kwargs& kwargs) + : Operator() { + using std::string_literals::operator""s; + + HOLOSCAN_LOG_TRACE("PyOperator::PyOperator()"); + py_op_ = op; + py_compute_ = py::getattr(op, "compute"); // cache the compute method + py_initialize_ = py::getattr(op, "initialize"); // cache the initialize method + py_start_ = py::getattr(op, "start"); // cache the start method + py_stop_ = py::getattr(op, "stop"); // cache the stop method + fragment_ = fragment; + + // Store the application object to access the trace/profile functions + auto app = fragment_->application(); + py_app_ = static_cast(app); + + // Parse args + for (auto& item : args) { + py::object arg_value = item.cast(); + if (py::isinstance(arg_value)) { + this->add_arg(arg_value.cast>()); + } else if (py::isinstance(arg_value)) { + this->add_arg(arg_value.cast>()); + } else if (py::isinstance(arg_value)) { + throw std::runtime_error("multiple Fragment objects provided"); + } else if (py::isinstance(arg_value)) { + this->add_arg(arg_value.cast()); + } else if (py::isinstance(arg_value)) { + this->add_arg(arg_value.cast()); + } else { + this->add_arg(py_object_to_arg(arg_value, ""s)); + } + } + + // Pars kwargs + for (const auto& [name, value] : kwargs) { + std::string kwarg_name = name.cast(); + py::object kwarg_value = value.cast(); + if (kwarg_name == "name"s) { + if (py::isinstance(kwarg_value)) { + this->name(kwarg_value.cast()); + } else { + throw std::runtime_error("name kwarg must be a string"); + } + } else if (kwarg_name == "fragment"s) { + if (py::isinstance(kwarg_value)) { + throw std::runtime_error( + "Cannot add kwarg fragment. Fragment can only be provided positionally"); + } else { + throw std::runtime_error("fragment kwarg must be a Fragment"); + } + } else if (py::isinstance(kwarg_value)) { + // Set the condition's name to the kwarg name + auto cond = kwarg_value.cast>(); + cond.get()->name(kwarg_name); + this->add_arg(cond); + } else if (py::isinstance(kwarg_value)) { + // Set the resource's name to the kwarg name + auto resource = kwarg_value.cast>(); + resource.get()->name(kwarg_name); + this->add_arg(resource); + } else { + this->add_arg(py_object_to_arg(kwarg_value, kwarg_name)); + } + } + + // Set name if needed + if (name_ == "") { + static size_t op_number; + op_number++; + this->name("unnamed_operator_" + std::to_string(op_number)); + } +} + +std::shared_ptr PyOperator::py_shared_spec() { + auto spec_ptr = spec_shared(); + return std::static_pointer_cast(spec_ptr); +} + +PyOperator::GILGuardedThreadLocal::GILGuardedThreadLocal() { + py::gil_scoped_acquire scope_guard; + data.pydevd_trace_func = py::none(); + data.pydevd_set_trace_to_threads_func = py::none(); + data.pydevd_thread_idents = py::none(); + data.dummy_thread = py::none(); +} + +PyOperator::GILGuardedThreadLocal::~GILGuardedThreadLocal() { + // Since this destructor is called when exiting the thread, acquiring GIL and + // decreasing reference count of Python objects in the thread-local data + // may cause the interpreter to crash. So, we don't acquire GIL or handle + // reference count of Python objects here. + + // Just release the Python objects. + data.pydevd_trace_func.release(); + data.pydevd_set_trace_to_threads_func.release(); + data.pydevd_thread_idents.release(); + data.dummy_thread.release(); + + data.py_last_frame = nullptr; + + data.c_profilefunc = nullptr; + data.c_tracefunc = nullptr; + data.c_profileobj = nullptr; + data.c_traceobj = nullptr; +} + +PyOperator::TracingThreadLocal& PyOperator::get_tracing_data() { + // Define a thread-local object for storing tracing data. + // Important: The type of a thread_local variable should be a pointer due to issues + // with Thread-Local Storage (TLS) when dynamically loading libraries using dlopen(). + // The TLS space is limited to 2048 bytes. + // For more details, refer to: https://fasterthanli.me/articles/a-dynamic-linker-murder-mystery. + static thread_local std::unique_ptr gil_guarded_thread_local = + std::make_unique(); + + py::gil_scoped_acquire scope_guard; + + auto& data = gil_guarded_thread_local->data; + + // Return the cached thread-local data if it is already initialized + if (data.is_initialized) { return data; } + + try { + if (data.dummy_thread.is_none()) { + // Create a dummy thread object for this thread by calling threading.current_thread() + // so that debugger can recognize this thread as a Python thread. + auto threading_module = py::module::import("threading"); + auto current_thread_func = py::getattr(threading_module, "current_thread"); + // Create py::object object having the result of current_thread_func() + data.dummy_thread = current_thread_func(); + } + + // Check if the module name starts with '_pydevd_bundle' which means that it is using + // PyDevd debugger. If so, then we need to store pydevd-specific data. + + // Get py_trace_func_'s class object using "__class__" attr + auto trace_module = py_app_->py_trace_func_.attr("__class__").attr("__module__"); + // Check if the module name starts with '_pydevd_bundle' which means that it is using + // PyDevd debugger. If so, then we need to set the trace function to the current frame. + auto module_name = trace_module.cast(); + if (module_name.find("_pydevd_bundle") != std::string::npos) { + if (data.pydevd_trace_func.is_none()) { + // Get the trace function from the debugger + auto pydevd_module = py::module::import("pydevd"); + auto debugger = py::getattr(pydevd_module, "GetGlobalDebugger")(); + // Get the trace function from the debugger + data.pydevd_trace_func = py::getattr(debugger, "get_thread_local_trace_func")(); + } + + if (data.pydevd_set_trace_to_threads_func.is_none()) { + auto pydevd_module = py::module::import("pydevd"); + + data.pydevd_set_trace_to_threads_func = + pydevd_module.attr("pydevd_tracing").attr("set_trace_to_threads"); + } + + if (data.pydevd_thread_idents.is_none()) { + auto thread_module = py::module::import("_thread"); + auto get_ident_func = py::getattr(thread_module, "get_ident"); + // Create py::list object having the result of get_ident_func() + auto thread_idents = py::list(); + thread_idents.append(get_ident_func()); + + data.pydevd_thread_idents = thread_idents; + } + } + + data.is_initialized = true; + data.in_tracing = (py_app_->c_tracefunc_ != nullptr) || (py_app_->c_profilefunc_ != nullptr); + data.is_pydevd = (!data.pydevd_trace_func.is_none()) && + (!data.pydevd_set_trace_to_threads_func.is_none()) && + (!data.pydevd_thread_idents.is_none()); + } catch (const std::exception& e) { + HOLOSCAN_LOG_WARN("Exception occurredPyOperator:: while initializing tracing data: {}", + e.what()); + data.is_initialized = true; + data.in_tracing = false; // pretend that tracing is not enabled for this thread + data.is_pydevd = false; + } + + return data; } + +void PyOperator::set_py_tracing() { + auto& tracing_data = get_tracing_data(); + + try { + // If tracing is not enabled, do nothing and return + if (!tracing_data.in_tracing) { return; } + + auto py_thread_state = _PyThreadState_UncheckedGet(); + + // If tracing_data.is_func_set is false, cache the current trace/profile functions for + // the current thread. + if (!tracing_data.is_func_set) { + auto& py_last_frame = py_app_->py_last_frame_; + auto& py_profile_func = py_app_->py_profile_func_; + auto& py_trace_func = py_app_->py_trace_func_; + auto& c_profilefunc = py_app_->c_profilefunc_; + auto& c_profileobj = py_app_->c_profileobj_; + auto& c_tracefunc = py_app_->c_tracefunc_; + auto& c_traceobj = py_app_->c_traceobj_; + + tracing_data.py_last_frame = py_last_frame; + + // If pydevd is used, call pydevd.pydevd_tracing.set_trace_to_threads() to set + // the trace function to the current thread. + if (tracing_data.is_pydevd) { + tracing_data.pydevd_set_trace_to_threads_func( + tracing_data.pydevd_trace_func, + py::arg("thread_idents") = tracing_data.pydevd_thread_idents, + py::arg("create_dummy_thread") = py::bool_(false)); + + tracing_data.c_profilefunc = py_thread_state->c_profilefunc; + tracing_data.c_profileobj = py_thread_state->c_profileobj; + + tracing_data.c_tracefunc = py_thread_state->c_tracefunc; + tracing_data.c_traceobj = py_thread_state->c_traceobj; + } else { + // If pydevd is not used, call sys.settrace/setprofile() to set + // the trace/profile function to the current thread. + auto sys_module = py::module::import("sys"); + + // Check if py_profile_func is callable and call it. + // In case of cProfile.Profile object, it is not callable so should not be called. + if (!py_profile_func.is_none() && py::isinstance(py_profile_func)) { + sys_module.attr("setprofile")(py_profile_func); + tracing_data.c_profilefunc = py_thread_state->c_profilefunc; + tracing_data.c_profileobj = py_thread_state->c_profileobj; + } else { + HOLOSCAN_LOG_DEBUG("py_profile_func_ is not callable"); + tracing_data.c_profilefunc = c_profilefunc; + tracing_data.c_profileobj = c_profileobj; + } + + // Check if py_trace_func is callable and call it. + if (!py_trace_func.is_none() && py::isinstance(py_trace_func)) { + sys_module.attr("settrace")(py_trace_func); + tracing_data.c_tracefunc = py_thread_state->c_tracefunc; + tracing_data.c_traceobj = py_thread_state->c_traceobj; + } else { + HOLOSCAN_LOG_DEBUG("py_trace_func_ is not callable"); + tracing_data.c_tracefunc = c_tracefunc; + tracing_data.c_traceobj = c_traceobj; + } + } + tracing_data.is_func_set = true; + } + + // Set the trace/profile functions to the current thread. + // Depending on the Python version, the way to set the trace/profile functions is different. + + // Set current frame to the last valid Python frame +#if PY_VERSION_HEX >= 0x030B0000 // >= Python 3.11.0 + // https://github.com/python/cpython/blob/c184c6750e40ca4ffa4f62a5d145b892cbd066bc + // /Doc/whatsnew/3.11.rst#L2301 + // - tstate->frame is removed. + py_thread_state->cframe->current_frame = + reinterpret_cast<_PyInterpreterFrame*>(tracing_data.py_last_frame); +#else // < Python 3.11.0 + py_thread_state->frame = reinterpret_cast(tracing_data.py_last_frame); +#endif + +#if PY_VERSION_HEX >= 0x030B0000 // >= Python 3.11.0 + // Recommended way to set the trace/profile functions in Python 3.11 + // (see + // https://discuss.python.org/t/python-3-11-frame-structure-and-various-changes/17895/19) + _PyEval_SetProfile( + py_thread_state, tracing_data.c_profilefunc, tracing_data.c_profileobj.ptr()); + _PyEval_SetTrace(py_thread_state, tracing_data.c_tracefunc, tracing_data.c_traceobj.ptr()); +#else // < Python 3.11.0 + py_thread_state->c_profilefunc = tracing_data.c_profilefunc; + Py_XINCREF(tracing_data.c_profileobj.ptr()); + Py_XDECREF(py_thread_state->c_profileobj); + py_thread_state->c_profileobj = tracing_data.c_profileobj.ptr(); + + py_thread_state->c_tracefunc = tracing_data.c_tracefunc; + Py_XINCREF(tracing_data.c_traceobj.ptr()); + Py_XDECREF(py_thread_state->c_traceobj); + py_thread_state->c_traceobj = tracing_data.c_traceobj.ptr(); + +#if PY_VERSION_HEX >= 0x030A00B1 // >= Python 3.10.0 b1 + py_thread_state->cframe->use_tracing = 1; +#else // < Python 3.10.0 b1 + py_thread_state->use_tracing = 1; +#endif // about Python 3.10.0 b1 +#endif // about Python 3.11.0 + } catch (const std::exception& e) { + HOLOSCAN_LOG_WARN("Exception occurred while setting trace/profile functions: {}", e.what()); + tracing_data.is_initialized = true; + tracing_data.is_pydevd = false; + tracing_data.in_tracing = false; // pretend that tracing is not enabled for this thread + } +} + +void PyOperator::initialize() { + Operator::initialize(); + // Get the initialize method of the Python Operator class and call it + py::gil_scoped_acquire scope_guard; + + set_py_tracing(); + + py_initialize_.operator()(); +} + +void PyOperator::start() { + // Get the start method of the Python Operator class and call it + py::gil_scoped_acquire scope_guard; + + set_py_tracing(); + + py_start_.operator()(); +} + +void PyOperator::stop() { + // Get the stop method of the Python Operator class and call it + py::gil_scoped_acquire scope_guard; + + set_py_tracing(); + + py_stop_.operator()(); +} + +void PyOperator::compute(InputContext& op_input, OutputContext& op_output, + ExecutionContext& context) { + auto gxf_context = context.context(); + + // Get the compute method of the Python Operator class and call it + py::gil_scoped_acquire scope_guard; + auto py_op_input = + std::make_shared(&context, op_input.op(), op_input.inputs(), this->py_op_); + auto py_op_output = std::make_shared( + &context, op_output.op(), op_output.outputs(), this->py_op_); + auto py_context = + std::make_shared(gxf_context, py_op_input, py_op_output, this->py_op_); + + set_py_tracing(); + + py_compute_.operator()(py::cast(py_op_input), py::cast(py_op_output), py::cast(py_context)); +} + } // namespace holoscan diff --git a/python/holoscan/core/operator.hpp b/python/holoscan/core/operator.hpp index 45817b82..684f9204 100644 --- a/python/holoscan/core/operator.hpp +++ b/python/holoscan/core/operator.hpp @@ -59,59 +59,18 @@ class PyOperatorSpec : public OperatorSpec { using OperatorSpec::OperatorSpec; // Override the constructor to get the py::object for the Python class - explicit PyOperatorSpec(Fragment* fragment = nullptr, py::object op = py::none()) - : OperatorSpec(fragment), py_op_(op) {} + explicit PyOperatorSpec(Fragment* fragment = nullptr, py::object op = py::none()); // TOIMPROVE: Should we parse headline and description from kwargs or just // add them to the function signature? void py_param(const std::string& name, const py::object& default_value, const ParameterFlag& flag, - const py::kwargs& kwargs) { - using std::string_literals::operator""s; - - bool is_receivers = false; - std::string headline{""s}; - std::string description{""s}; - for (const auto& [name, value] : kwargs) { - std::string param_name = name.cast(); - if (param_name == "headline") { - headline = value.cast(); - } else if (param_name == "description") { - description = value.cast(); - } else if (param_name == "kind") { - auto kind_val = value.cast(); - if (kind_val == "receivers") { - is_receivers = true; - } else { - throw std::runtime_error("unknown kind: '"s + kind_val + - "'. Only `kind='receivers'` is currently supported."s); - } - } else { - throw std::runtime_error("unsupported kwarg: "s + param_name); - } - } - - if (is_receivers) { - // Create receivers object - py_receivers_params_.emplace_back(); - - // Register parameter - auto& parameter = py_receivers_params_.back(); - param(parameter, name.c_str(), headline.c_str(), description.c_str(), {}, flag); - } else { - // Create parameter object - py_params_.emplace_back(py_op()); - - // Register parameter - auto& parameter = py_params_.back(); - param(parameter, name.c_str(), headline.c_str(), description.c_str(), default_value, flag); - } - } - - py::object py_op() const { return py_op_; } - - std::list>& py_params() { return py_params_; } - - std::list>>& py_receivers() { return py_receivers_params_; } + const py::kwargs& kwargs); + + py::object py_op() const; + + std::list>& py_params(); + + std::list>>& py_receivers(); private: py::object py_op_ = py::none(); @@ -129,85 +88,10 @@ class PyOperator : public Operator { // Define a kwargs-based constructor that can create an ArgList // for passing on to the variadic-template based constructor. - PyOperator(py::object op, Fragment* fragment, const py::args& args, const py::kwargs& kwargs) - : Operator() { - using std::string_literals::operator""s; - - HOLOSCAN_LOG_TRACE("PyOperator::PyOperator()"); - py_op_ = op; - py_compute_ = py::getattr(op, "compute"); // cache the compute method - py_initialize_ = py::getattr(op, "initialize"); // cache the initialize method - py_start_ = py::getattr(op, "start"); // cache the start method - py_stop_ = py::getattr(op, "stop"); // cache the stop method - fragment_ = fragment; - - // Store the application object to access the trace/profile functions - auto app = fragment_->application(); - py_app_ = static_cast(app); - - // Parse args - for (auto& item : args) { - py::object arg_value = item.cast(); - if (py::isinstance(arg_value)) { - this->add_arg(arg_value.cast>()); - } else if (py::isinstance(arg_value)) { - this->add_arg(arg_value.cast>()); - } else if (py::isinstance(arg_value)) { - throw std::runtime_error("multiple Fragment objects provided"); - } else if (py::isinstance(arg_value)) { - this->add_arg(arg_value.cast()); - } else if (py::isinstance(arg_value)) { - this->add_arg(arg_value.cast()); - } else { - this->add_arg(py_object_to_arg(arg_value, ""s)); - } - } - - // Pars kwargs - for (const auto& [name, value] : kwargs) { - std::string kwarg_name = name.cast(); - py::object kwarg_value = value.cast(); - if (kwarg_name == "name"s) { - if (py::isinstance(kwarg_value)) { - this->name(kwarg_value.cast()); - } else { - throw std::runtime_error("name kwarg must be a string"); - } - } else if (kwarg_name == "fragment"s) { - if (py::isinstance(kwarg_value)) { - throw std::runtime_error( - "Cannot add kwarg fragment. Fragment can only be provided positionally"); - } else { - throw std::runtime_error("fragment kwarg must be a Fragment"); - } - } else if (py::isinstance(kwarg_value)) { - // Set the condition's name to the kwarg name - auto cond = kwarg_value.cast>(); - cond.get()->name(kwarg_name); - this->add_arg(cond); - } else if (py::isinstance(kwarg_value)) { - // Set the resource's name to the kwarg name - auto resource = kwarg_value.cast>(); - resource.get()->name(kwarg_name); - this->add_arg(resource); - } else { - this->add_arg(py_object_to_arg(kwarg_value, kwarg_name)); - } - } - - // Set name if needed - if (name_ == "") { - static size_t op_number; - op_number++; - this->name("unnamed_operator_" + std::to_string(op_number)); - } - } + PyOperator(py::object op, Fragment* fragment, const py::args& args, const py::kwargs& kwargs); // Override spec() method - std::shared_ptr py_shared_spec() { - auto spec_ptr = spec_shared(); - return std::static_pointer_cast(spec_ptr); - } + std::shared_ptr py_shared_spec(); /// Thread-local tracing data struct TracingThreadLocal { @@ -223,11 +107,8 @@ class PyOperator : public Operator { py::object pydevd_thread_idents; ///< thread identifiers for this thread // Fake frame object for the last python frame -#if PY_VERSION_HEX >= 0x030b0000 // >= Python 3.11.0 - _PyInterpreterFrame* py_last_frame = nullptr; -#else - PyFrameObject* py_last_frame = nullptr; -#endif + // Actual type is either _PyInterpreterFrame* (PY_VERSION_HEX >= 0x030b0000) or PyFrameObject*. + void* py_last_frame = nullptr; Py_tracefunc c_profilefunc = nullptr; Py_tracefunc c_tracefunc = nullptr; @@ -239,32 +120,8 @@ class PyOperator : public Operator { * @brief Thread-local data guarded by GIL. */ struct GILGuardedThreadLocal { - GILGuardedThreadLocal() { - py::gil_scoped_acquire scope_guard; - data.pydevd_trace_func = py::none(); - data.pydevd_set_trace_to_threads_func = py::none(); - data.pydevd_thread_idents = py::none(); - data.dummy_thread = py::none(); - } - ~GILGuardedThreadLocal() { - // Since this destructor is called when exiting the thread, acquiring GIL and - // decreasing reference count of Python objects in the thread-local data - // may cause the interpreter to crash. So, we don't acquire GIL or handle - // reference count of Python objects here. - - // Just release the Python objects. - data.pydevd_trace_func.release(); - data.pydevd_set_trace_to_threads_func.release(); - data.pydevd_thread_idents.release(); - data.dummy_thread.release(); - - data.py_last_frame = nullptr; - - data.c_profilefunc = nullptr; - data.c_tracefunc = nullptr; - data.c_profileobj = nullptr; - data.c_traceobj = nullptr; - } + GILGuardedThreadLocal(); + ~GILGuardedThreadLocal(); TracingThreadLocal data{}; }; @@ -275,282 +132,25 @@ class PyOperator : public Operator { * * @return The reference to the thread-local tracing data object. */ - TracingThreadLocal& get_tracing_data() { - // Define a thread-local object for storing tracing data. - // Important: The type of a thread_local variable should be a pointer due to issues - // with Thread-Local Storage (TLS) when dynamically loading libraries using dlopen(). - // The TLS space is limited to 2048 bytes. - // For more details, refer to: https://fasterthanli.me/articles/a-dynamic-linker-murder-mystery. - static thread_local std::unique_ptr gil_guarded_thread_local = - std::make_unique(); - - py::gil_scoped_acquire scope_guard; - - auto& data = gil_guarded_thread_local->data; - - // Return the cached thread-local data if it is already initialized - if (data.is_initialized) { return data; } - - try { - if (data.dummy_thread.is_none()) { - // Create a dummy thread object for this thread by calling threading.current_thread() - // so that debugger can recognize this thread as a Python thread. - auto threading_module = py::module::import("threading"); - auto current_thread_func = py::getattr(threading_module, "current_thread"); - // Create py::object object having the result of current_thread_func() - data.dummy_thread = current_thread_func(); - } - - // Check if the module name starts with '_pydevd_bundle' which means that it is using - // PyDevd debugger. If so, then we need to store pydevd-specific data. - - // Get py_trace_func_'s class object using "__class__" attr - auto trace_module = py_app_->py_trace_func_.attr("__class__").attr("__module__"); - // Check if the module name starts with '_pydevd_bundle' which means that it is using - // PyDevd debugger. If so, then we need to set the trace function to the current frame. - auto module_name = trace_module.cast(); - if (module_name.find("_pydevd_bundle") != std::string::npos) { - if (data.pydevd_trace_func.is_none()) { - // Get the trace function from the debugger - auto pydevd_module = py::module::import("pydevd"); - auto debugger = py::getattr(pydevd_module, "GetGlobalDebugger")(); - // Get the trace function from the debugger - data.pydevd_trace_func = py::getattr(debugger, "get_thread_local_trace_func")(); - } - - if (data.pydevd_set_trace_to_threads_func.is_none()) { - auto pydevd_module = py::module::import("pydevd"); - - data.pydevd_set_trace_to_threads_func = - pydevd_module.attr("pydevd_tracing").attr("set_trace_to_threads"); - } - - if (data.pydevd_thread_idents.is_none()) { - auto thread_module = py::module::import("_thread"); - auto get_ident_func = py::getattr(thread_module, "get_ident"); - // Create py::list object having the result of get_ident_func() - auto thread_idents = py::list(); - thread_idents.append(get_ident_func()); - - data.pydevd_thread_idents = thread_idents; - } - } - - data.is_initialized = true; - data.in_tracing = (py_app_->c_tracefunc_ != nullptr) || (py_app_->c_profilefunc_ != nullptr); - data.is_pydevd = (!data.pydevd_trace_func.is_none()) && - (!data.pydevd_set_trace_to_threads_func.is_none()) && - (!data.pydevd_thread_idents.is_none()); - } catch (const std::exception& e) { - HOLOSCAN_LOG_WARN("Exception occurred while initializing tracing data: {}", e.what()); - data.is_initialized = true; - data.in_tracing = false; // pretend that tracing is not enabled for this thread - data.is_pydevd = false; - } - - return data; - } + TracingThreadLocal& get_tracing_data(); /** * @brief Set the tracing functions to the current thread. * * GIL must be acquired before calling this function. */ - void set_py_tracing() { - auto& tracing_data = get_tracing_data(); - - try { - // If tracing is not enabled, do nothing and return - if (!tracing_data.in_tracing) { return; } - - auto py_thread_state = _PyThreadState_UncheckedGet(); - - // If tracing_data.is_func_set is false, cache the current trace/profile functions for - // the current thread. - if (!tracing_data.is_func_set) { - auto& py_last_frame = py_app_->py_last_frame_; - auto& py_profile_func = py_app_->py_profile_func_; - auto& py_trace_func = py_app_->py_trace_func_; - auto& c_profilefunc = py_app_->c_profilefunc_; - auto& c_profileobj = py_app_->c_profileobj_; - auto& c_tracefunc = py_app_->c_tracefunc_; - auto& c_traceobj = py_app_->c_traceobj_; - - tracing_data.py_last_frame = py_last_frame; - - // If pydevd is used, call pydevd.pydevd_tracing.set_trace_to_threads() to set - // the trace function to the current thread. - if (tracing_data.is_pydevd) { - tracing_data.pydevd_set_trace_to_threads_func( - tracing_data.pydevd_trace_func, - py::arg("thread_idents") = tracing_data.pydevd_thread_idents, - py::arg("create_dummy_thread") = py::bool_(false)); - - tracing_data.c_profilefunc = py_thread_state->c_profilefunc; - tracing_data.c_profileobj = py_thread_state->c_profileobj; - - tracing_data.c_tracefunc = py_thread_state->c_tracefunc; - tracing_data.c_traceobj = py_thread_state->c_traceobj; - } else { - // If pydevd is not used, call sys.settrace/setprofile() to set - // the trace/profile function to the current thread. - auto sys_module = py::module::import("sys"); - - // Check if py_profile_func is callable and call it. - // In case of cProfile.Profile object, it is not callable so should not be called. - if (!py_profile_func.is_none() && py::isinstance(py_profile_func)) { - sys_module.attr("setprofile")(py_profile_func); - tracing_data.c_profilefunc = py_thread_state->c_profilefunc; - tracing_data.c_profileobj = py_thread_state->c_profileobj; - } else { - HOLOSCAN_LOG_DEBUG("py_profile_func_ is not callable"); - tracing_data.c_profilefunc = c_profilefunc; - tracing_data.c_profileobj = c_profileobj; - } - - // Check if py_trace_func is callable and call it. - if (!py_trace_func.is_none() && py::isinstance(py_trace_func)) { - sys_module.attr("settrace")(py_trace_func); - tracing_data.c_tracefunc = py_thread_state->c_tracefunc; - tracing_data.c_traceobj = py_thread_state->c_traceobj; - } else { - HOLOSCAN_LOG_DEBUG("py_trace_func_ is not callable"); - tracing_data.c_tracefunc = c_tracefunc; - tracing_data.c_traceobj = c_traceobj; - } - } - tracing_data.is_func_set = true; - } - - // Set the trace/profile functions to the current thread. - // Depending on the Python version, the way to set the trace/profile functions is different. - - // Set current frame to the last valid Python frame -#if PY_VERSION_HEX >= 0x030B0000 // >= Python 3.11.0 - // https://github.com/python/cpython/blob/c184c6750e40ca4ffa4f62a5d145b892cbd066bc - // /Doc/whatsnew/3.11.rst#L2301 - // - tstate->frame is removed. - py_thread_state->cframe->current_frame = tracing_data.py_last_frame; -#else // < Python 3.11.0 - py_thread_state->frame = tracing_data.py_last_frame; -#endif - -#if PY_VERSION_HEX >= 0x030B0000 // >= Python 3.11.0 - // Recommended way to set the trace/profile functions in Python 3.11 - // (see https://discuss.python.org/t/python-3-11-frame-structure-and-various-changes/17895/19) - _PyEval_SetProfile( - py_thread_state, tracing_data.c_profilefunc, tracing_data.c_profileobj.ptr()); - _PyEval_SetTrace(py_thread_state, tracing_data.c_tracefunc, tracing_data.c_traceobj.ptr()); -#else // < Python 3.11.0 - py_thread_state->c_profilefunc = tracing_data.c_profilefunc; - Py_XINCREF(tracing_data.c_profileobj.ptr()); - Py_XDECREF(py_thread_state->c_profileobj); - py_thread_state->c_profileobj = tracing_data.c_profileobj.ptr(); - - py_thread_state->c_tracefunc = tracing_data.c_tracefunc; - Py_XINCREF(tracing_data.c_traceobj.ptr()); - Py_XDECREF(py_thread_state->c_traceobj); - py_thread_state->c_traceobj = tracing_data.c_traceobj.ptr(); - -#if PY_VERSION_HEX >= 0x030A00B1 // >= Python 3.10.0 b1 - py_thread_state->cframe->use_tracing = 1; -#else // < Python 3.10.0 b1 - py_thread_state->use_tracing = 1; -#endif // about Python 3.10.0 b1 -#endif // about Python 3.11.0 - } catch (const std::exception& e) { - HOLOSCAN_LOG_WARN("Exception occurred while setting trace/profile functions: {}", e.what()); - tracing_data.is_initialized = true; - tracing_data.is_pydevd = false; - tracing_data.in_tracing = false; // pretend that tracing is not enabled for this thread - } - } - - void initialize() override { - Operator::initialize(); - try { - // Get the initialize method of the Python Operator class and call it - py::gil_scoped_acquire scope_guard; - - set_py_tracing(); - - try { - py_initialize_.operator()(); - } catch (const py::error_already_set& e) { _handle_python_error(e, "initialize"); } - } catch (const std::exception& e) { - HOLOSCAN_LOG_ERROR("Exception occurred for operator: '{}' - {}", name(), e.what()); - } - } - - void start() override { - try { - // Get the start method of the Python Operator class and call it - py::gil_scoped_acquire scope_guard; - - set_py_tracing(); - - try { - py_start_.operator()(); - } catch (const py::error_already_set& e) { _handle_python_error(e, "start"); } - } catch (const std::exception& e) { - HOLOSCAN_LOG_ERROR("Exception occurred for operator: '{}' - {}", name(), e.what()); - } - } - - void stop() override { - try { - // Get the stop method of the Python Operator class and call it - py::gil_scoped_acquire scope_guard; - - set_py_tracing(); - - try { - py_stop_.operator()(); - } catch (const py::error_already_set& e) { _handle_python_error(e, "stop"); } - } catch (const std::exception& e) { - HOLOSCAN_LOG_ERROR("Exception occurred for operator: '{}' - {}", name(), e.what()); - } - } + void set_py_tracing(); + + void initialize() override; + + void start() override; + + void stop() override; void compute(InputContext& op_input, OutputContext& op_output, - ExecutionContext& context) override { - auto gxf_context = context.context(); - - try { - // Get the compute method of the Python Operator class and call it - py::gil_scoped_acquire scope_guard; - auto py_op_input = std::make_shared( - &context, op_input.op(), op_input.inputs(), this->py_op_); - auto py_op_output = std::make_shared( - &context, op_output.op(), op_output.outputs(), this->py_op_); - auto py_context = std::make_shared( - gxf_context, py_op_input, py_op_output, this->py_op_); - - set_py_tracing(); - - try { - py_compute_.operator()(py::cast(py_op_input), py::cast(py_op_output), py::cast(py_context)); - } catch (const py::error_already_set& e) { _handle_python_error(e, "compute"); } - } catch (const std::exception& e) { - HOLOSCAN_LOG_ERROR("Exception occurred for operator: '{}' - {}", name(), e.what()); - } - } + ExecutionContext& context) override; private: - void _handle_python_error(const py::error_already_set& e, std::string method_name) { - // Print the Python error to stderr - auto stderr = py::module::import("sys").attr("stderr"); - - py::print(fmt::format("Exception occurred in {} method of operator: '{}'", method_name, name_), - py::arg("file") = stderr); - py::module::import("traceback") - .attr("print_exception")(e.type(), e.value(), e.trace(), py::none(), stderr); - // Note:: We don't want to throw an exception here, because it will cause the Python - // interpreter to exit. Instead, we'll just log the error and continue. - // throw std::runtime_error(fmt::format("Python error in {} method: {}", method_name, - // e.what())); - } - py::object py_op_ = py::none(); py::object py_initialize_ = py::none(); py::object py_start_ = py::none(); diff --git a/python/holoscan/core/operator_pydoc.hpp b/python/holoscan/core/operator_pydoc.hpp index c9a62cb6..1681f520 100644 --- a/python/holoscan/core/operator_pydoc.hpp +++ b/python/holoscan/core/operator_pydoc.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -163,18 +163,14 @@ RuntimeError PYDOC(name, R"doc( The name of the operator. - -Returns -------- -name : str )doc") PYDOC(fragment, R"doc( -The fragment that the operator belongs to. +The fragment (``holoscan.core.Fragment``) that the operator belongs to. +)doc") -Returns -------- -name : holoscan.core.Fragment +PYDOC(spec, R"doc( +The operator spec (``holoscan.core.OperatorSpec``) associated with the operator. )doc") PYDOC(conditions, R"doc( @@ -246,6 +242,17 @@ YAML formatted string describing the operator. } // namespace Operator +namespace OperatorType { + +PYDOC(OperatorType, R"doc( +Enum class for operator types used by the executor. + +- NATIVE: Native operator. +- GXF: GXF operator. +- VIRTUAL: Virtual operator. (for internal use, not intended for use by application authors) +)doc") + +} // namespace OperatorType } // namespace holoscan::doc #endif // PYHOLOSCAN_CORE_OPERATOR_PYDOC_HPP diff --git a/python/holoscan/core/tensor.cpp b/python/holoscan/core/tensor.cpp index 3e8974bf..4825aa6e 100644 --- a/python/holoscan/core/tensor.cpp +++ b/python/holoscan/core/tensor.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -27,6 +27,7 @@ #include #include "dl_converter.hpp" +#include "gxf/std/dlpack_utils.hpp" // DLDeviceFromPointer, DLDataTypeFromTypeString #include "holoscan/core/domain/tensor.hpp" #include "kwarg_handling.hpp" #include "tensor.hpp" @@ -37,6 +38,28 @@ using pybind11::literals::operator""_a; namespace py = pybind11; +namespace { + +// A macro like CHECK_CUDA_ERROR from gxf/cuda/cuda_common.h, but it uses Holoscan-style +// logging and throws an exception instead of returning an nvidia::gxf::Unexpected. +#define CHECK_CUDA_THROW_ERROR(cu_result, stmt, ...) \ + do { \ + cudaError_t err = (cu_result); \ + if (err != cudaSuccess) { \ + HOLOSCAN_LOG_ERROR("Runtime call {} in line {} of file {} failed with '{}' ({})", \ + #stmt, \ + __LINE__, \ + __FILE__, \ + cudaGetErrorString(err), \ + err); \ + throw std::runtime_error("Error occurred in CUDA runtime API call"); \ + } \ + } while (0) + +static constexpr const char* dlpack_capsule_name{"dltensor"}; +static constexpr const char* used_dlpack_capsule_name{"used_dltensor"}; +} // namespace + namespace holoscan { void init_tensor(py::module_& m) { @@ -83,7 +106,9 @@ void init_tensor(py::module_& m) { .def("__dlpack_device__", &PyTensor::dlpack_device, doc::Tensor::doc_dlpack_device); py::class_>(m, "PyTensor", doc::Tensor::doc_Tensor) - .def_static("as_tensor", &PyTensor::as_tensor, "obj"_a, doc::Tensor::doc_as_tensor); + .def_static("as_tensor", &PyTensor::as_tensor, "obj"_a, doc::Tensor::doc_as_tensor) + .def_static( + "from_dlpack", &PyTensor::from_dlpack_pyobj, "obj"_a, doc::Tensor::doc_from_dlpack); py::enum_(m, "DLDataTypeCode", py::module_local()) .value("DLINT", kDLInt) @@ -130,6 +155,18 @@ LazyDLManagedTensorDeleter::LazyDLManagedTensorDeleter() { // Use std::memory_order_relaxed because there are no other memory operations that need to be // synchronized with the fetch_add operation. if (s_instance_count.fetch_add(1, std::memory_order_relaxed) == 0) { + // Wait until both s_stop and s_is_running are false (busy-waiting). + // s_stop being true indicates that the previous deleter thread is still in the process + // of deleting the object. + while (true) { + { + std::lock_guard lock(s_mutex); + if (!s_stop && !s_is_running) { break; } + } + // Yield to other threads + std::this_thread::yield(); + } + std::lock_guard lock(s_mutex); // Register pthread_atfork() and std::atexit() handlers (registered only once) // @@ -151,7 +188,6 @@ LazyDLManagedTensorDeleter::LazyDLManagedTensorDeleter() { std::atexit(on_exit); } - s_stop = false; s_is_running = true; s_thread = std::thread(run); // Detach the thread so that it can be stopped when the application exits @@ -274,10 +310,10 @@ void LazyDLManagedTensorDeleter::on_fork_child() { // PyTensor definition //////////////////////////////////////////////////////////////////////////////////////////////////// -PyTensor::PyTensor(std::shared_ptr& ctx) : Tensor(ctx) {} +PyTensor::PyTensor(std::shared_ptr& ctx) : Tensor(ctx) {} PyTensor::PyTensor(DLManagedTensor* dl_managed_tensor_ptr) { - dl_ctx_ = std::make_shared(); + dl_ctx_ = std::make_shared(); // Create PyDLManagedMemoryBuffer to hold the DLManagedTensor and acquire GIL before calling // the deleter function dl_ctx_->memory_ref = std::make_shared(dl_managed_tensor_ptr); @@ -317,6 +353,20 @@ py::object PyTensor::as_tensor(const py::object& obj) { return py_tensor; } +py::object PyTensor::from_dlpack_pyobj(const py::object& obj) { + std::shared_ptr tensor; + if (py::hasattr(obj, "__dlpack__") && py::hasattr(obj, "__dlpack_device__")) { + tensor = PyTensor::from_dlpack(obj); + } else { + throw std::runtime_error("Unsupported Python object type"); + } + py::object py_tensor = py::cast(tensor); + + // Set array interface attributes + set_array_interface(py_tensor, tensor->dl_ctx()); + return py_tensor; +} + std::shared_ptr PyTensor::from_array_interface(const py::object& obj, bool cuda) { auto memory_buf = std::make_shared(); memory_buf->obj_ref = obj; // hold obj to prevent it from being garbage collected @@ -348,11 +398,17 @@ std::shared_ptr PyTensor::from_array_interface(const py::object& obj, // bool data_readonly = data_array[1] > 0; // auto version = array_interface["version"].cast(); + auto maybe_dldatatype = nvidia::gxf::DLDataTypeFromTypeString(typestr); + if (!maybe_dldatatype) { + throw std::runtime_error("Unable to determine DLDataType from NumPy typestr"); + } + auto maybe_device = nvidia::gxf::DLDeviceFromPointer(data_ptr); + if (!maybe_device) { throw std::runtime_error("Unable to determine DLDevice from data pointer"); } DLTensor local_dl_tensor{ .data = data_ptr, - .device = dldevice_from_pointer(data_ptr), + .device = maybe_device.value(), .ndim = static_cast(shape.size()), - .dtype = dldatatype_from_typestr(typestr), + .dtype = maybe_dldatatype.value(), .shape = shape.data(), .strides = nullptr, .byte_offset = 0, @@ -402,24 +458,31 @@ std::shared_ptr PyTensor::from_array_interface(const py::object& obj, if (stream_id >= 0 && curr_stream_ptr != stream_ptr) { cudaEvent_t curr_stream_event; - cudaEventCreateWithFlags(&curr_stream_event, cudaEventDisableTiming); - cudaEventRecord(curr_stream_event, stream_ptr); + cudaError_t cuda_status; + + cuda_status = cudaEventCreateWithFlags(&curr_stream_event, cudaEventDisableTiming); + CHECK_CUDA_THROW_ERROR(cuda_status, "Failure during call to cudaEventCreateWithFlags"); + + cuda_status = cudaEventRecord(curr_stream_event, stream_ptr); + CHECK_CUDA_THROW_ERROR(cuda_status, "Failure during call to cudaEventRecord"); // Make current stream (curr_stream_ptr) to wait until the given stream (stream_ptr) - // is finished. - // This is a reverse of py_dlpack() method (in dl_converter.hpp). - cudaStreamWaitEvent(curr_stream_ptr, curr_stream_event, 0); - cudaEventDestroy(curr_stream_event); + // is finished. This is a reverse of py_dlpack() method. + cuda_status = cudaStreamWaitEvent(curr_stream_ptr, curr_stream_event, 0); + CHECK_CUDA_THROW_ERROR(cuda_status, "Failure during call to cudaStreamWaitEvent"); + + cuda_status = cudaEventDestroy(curr_stream_event); + CHECK_CUDA_THROW_ERROR(cuda_status, "Failure during call to cudaEventDestroy"); } } // Create DLManagedTensor object - auto dl_managed_tensor_ctx = new DLManagedTensorCtx; + auto dl_managed_tensor_ctx = new DLManagedTensorContext; auto& dl_managed_tensor = dl_managed_tensor_ctx->tensor; dl_managed_tensor_ctx->memory_ref = memory_buf; dl_managed_tensor.manager_ctx = dl_managed_tensor_ctx; dl_managed_tensor.deleter = [](DLManagedTensor* self) { - auto dl_managed_tensor_ctx = static_cast(self->manager_ctx); + auto dl_managed_tensor_ctx = static_cast(self->manager_ctx); // Note: since 'memory_ref' is maintaining python object reference, we should acquire GIL in // case this function is called from another non-python thread, before releasing 'memory_ref'. py::gil_scoped_acquire scope_guard; @@ -474,7 +537,7 @@ std::shared_ptr PyTensor::from_dlpack(const py::object& obj) { case kDLCUDA: case kDLCUDAManaged: { py::int_ stream_ptr(1); // legacy stream - dlpack_capsule = py::reinterpret_borrow(dlpack_func(stream_ptr)); + dlpack_capsule = py::reinterpret_borrow(dlpack_func("stream"_a = stream_ptr)); break; } case kDLCPU: @@ -492,7 +555,7 @@ std::shared_ptr PyTensor::from_dlpack(const py::object& obj) { PyObject* dlpack_capsule_ptr = dlpack_obj.ptr(); - if (!PyCapsule_IsValid(dlpack_capsule_ptr, "dltensor")) { + if (!PyCapsule_IsValid(dlpack_capsule_ptr, dlpack_capsule_name)) { const char* capsule_name = PyCapsule_GetName(dlpack_capsule_ptr); throw std::runtime_error( fmt::format("Received an invalid DLPack capsule ('{}'). You might have already consumed " @@ -501,7 +564,7 @@ std::shared_ptr PyTensor::from_dlpack(const py::object& obj) { } DLManagedTensor* dl_managed_tensor = - static_cast(PyCapsule_GetPointer(dlpack_capsule_ptr, "dltensor")); + static_cast(PyCapsule_GetPointer(dlpack_capsule_ptr, dlpack_capsule_name)); // Set device dl_managed_tensor->dl_tensor.device = device; @@ -510,7 +573,7 @@ std::shared_ptr PyTensor::from_dlpack(const py::object& obj) { std::shared_ptr tensor = std::make_shared(dl_managed_tensor); // Set the capsule name to 'used_dltensor' so that it will not be consumed again. - PyCapsule_SetName(dlpack_capsule_ptr, "used_dltensor"); + PyCapsule_SetName(dlpack_capsule_ptr, used_dlpack_capsule_name); // Steal the ownership of the capsule so that it will not be destroyed when the capsule object // goes out of scope. diff --git a/python/holoscan/core/tensor.hpp b/python/holoscan/core/tensor.hpp index 1658c483..856e0e55 100644 --- a/python/holoscan/core/tensor.hpp +++ b/python/holoscan/core/tensor.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -47,13 +47,13 @@ static const std::unordered_map dldatatypecode_name /** * @brief Class to wrap the deleter of a DLManagedTensor in Python. * - * This class is used with DLManagedTensorCtx class to wrap the DLManagedTensor. + * This class is used with DLManagedTensorContext class to wrap the DLManagedTensor. * - * A shared pointer to this class in DLManagedTensorCtx class is used as the deleter of the - * DLManagedTensorCtx::memory_ref + * A shared pointer to this class in DLManagedTensorContext class is used as the deleter of the + * DLManagedTensorContext::memory_ref * - * When the last reference to the DLManagedTensorCtx object is released, - * DLManagedTensorCtx::memory_ref will also be destroyed, which will call the deleter function + * When the last reference to the DLManagedTensorContext object is released, + * DLManagedTensorContext::memory_ref will also be destroyed, which will call the deleter function * of the DLManagedTensor object. * * Compared to the C++ version (DLManagedMemoryBuffer), this class is used to acquire the GIL @@ -148,11 +148,11 @@ class LazyDLManagedTensorDeleter { class PyTensor : public Tensor { public: /** - * @brief Construct a new Tensor from an existing DLManagedTensorCtx. + * @brief Construct a new Tensor from an existing DLManagedTensorContext. * - * @param ctx A shared pointer to the DLManagedTensorCtx to be used in Tensor construction. + * @param ctx A shared pointer to the DLManagedTensorContext to be used in Tensor construction. */ - explicit PyTensor(std::shared_ptr& ctx); + explicit PyTensor(std::shared_ptr& ctx); /** * @brief Construct a new Tensor from an existing DLManagedTensor pointer. @@ -177,6 +177,7 @@ class PyTensor : public Tensor { return from_array_interface(obj, true); } static std::shared_ptr from_dlpack(const py::object& obj); + static py::object from_dlpack_pyobj(const py::object& obj); static py::capsule dlpack(const py::object& obj, py::object stream); static py::tuple dlpack_device(const py::object& obj); }; diff --git a/python/holoscan/core/tensor_pydoc.hpp b/python/holoscan/core/tensor_pydoc.hpp index 6f872a66..a139e065 100644 --- a/python/holoscan/core/tensor_pydoc.hpp +++ b/python/holoscan/core/tensor_pydoc.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -57,6 +57,50 @@ Base class representing a Holoscan Tensor. PYDOC(as_tensor, R"doc( Convert a Python object to a Tensor. + +Parameters +========== +object : array-like + An object such as a NumPy array, CuPy array, PyTorch tensor, etc. supporting one of the + supported protocols. + +Returns +======= +holocan.Tensor + +Notes +===== +For device arrays, this method first attempts to convert via ``__cuda_array_interface__`` [1]_, +but falls back to the DLPack protocol [2]_, [3]_ if it is unavailable. + +For host arrays, this method first attempts to convert via the DLPack protocol, but falls back to +the ``__array_interface__`` [3]_ if it is unavailable. + +References +========== +.. [1] https://numpy.org/doc/stable/reference/arrays.interface.html +.. [2] https://dmlc.github.io/dlpack/latest/python_spec.html +.. [3] https://data-apis.org/array-api/2022.12/API_specification/generated/array_api.array.__dlpack__.html +.. [4] https://numba.readthedocs.io/en/stable/cuda/cuda_array_interface.html +)doc") + +PYDOC(from_dlpack, R"doc( +Convert a Python object to a Tensor via the DLPack protocol [1]_, [2]_. + +Parameters +========== +object : array-like + An object such as a NumPy array, CuPy array, PyTorch tensor, etc. supporting one of the + supported protocols. + +Returns +======= +holocan.Tensor + +References +========== +.. [1] https://dmlc.github.io/dlpack/latest/python_spec.html +.. [2] https://data-apis.org/array-api/2022.12/API_specification/generated/array_api.array.__dlpack__.html )doc") PYDOC(py_dlpack, R"doc( diff --git a/python/holoscan/gxf/entity_pydoc.hpp b/python/holoscan/gxf/entity_pydoc.hpp index 2cc75b06..65fc77a5 100644 --- a/python/holoscan/gxf/entity_pydoc.hpp +++ b/python/holoscan/gxf/entity_pydoc.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -41,7 +41,7 @@ name : str Returns ------- -resource : GXFTensor +resource : Tensor The resource with the given name. )doc") diff --git a/python/holoscan/gxf/gxf.cpp b/python/holoscan/gxf/gxf.cpp index 533bbbd4..28e72cb5 100644 --- a/python/holoscan/gxf/gxf.cpp +++ b/python/holoscan/gxf/gxf.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -35,7 +35,6 @@ #include "holoscan/core/gxf/gxf_extension_registrar.hpp" #include "holoscan/core/gxf/gxf_io_context.hpp" #include "holoscan/core/gxf/gxf_resource.hpp" -#include "holoscan/core/gxf/gxf_tensor.hpp" #include "holoscan/core/gxf/gxf_wrapper.hpp" #include "gxf/core/gxf.h" @@ -100,10 +99,6 @@ PYBIND11_MODULE(_gxf, m) { "extension_filenames"_a = std::vector{}, "manifest_filenames"_a = std::vector{}); - py::class_>( - m, "GXFTensor", doc::GXFTensor::doc_GXFTensor) - .def(py::init<>(), doc::GXFTensor::doc_GXFTensor); - py::class_>( m, "GXFComponent", doc::GXFComponent::doc_GXFComponent) .def(py::init<>(), doc::GXFComponent::doc_GXFComponent) diff --git a/python/holoscan/gxf/gxf_pydoc.hpp b/python/holoscan/gxf/gxf_pydoc.hpp index 0e6dd78e..467cb619 100644 --- a/python/holoscan/gxf/gxf_pydoc.hpp +++ b/python/holoscan/gxf/gxf_pydoc.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,15 +24,6 @@ namespace holoscan::doc { -namespace GXFTensor { - -// Constructor -PYDOC(GXFTensor, R"doc( -Base class representing a GXF Tensor. -)doc") - -} // namespace GXFTensor - namespace GXFComponent { // Constructor diff --git a/python/holoscan/logger/__init__.py b/python/holoscan/logger/__init__.py index 984a395f..a433c738 100644 --- a/python/holoscan/logger/__init__.py +++ b/python/holoscan/logger/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,42 +17,21 @@ .. autosummary:: holoscan.logger.LogLevel - holoscan.logger.disable_backtrace - holoscan.logger.dump_backtrace - holoscan.logger.enable_backtrace - holoscan.logger.flush - holoscan.logger.flush_level - holoscan.logger.flush_on holoscan.logger.log_level holoscan.logger.set_log_level holoscan.logger.set_log_pattern - holoscan.logger.should_backtrace """ from ._logger import ( LogLevel, - disable_backtrace, - dump_backtrace, - enable_backtrace, - flush, - flush_level, - flush_on, log_level, set_log_level, set_log_pattern, - should_backtrace, ) __all__ = [ "LogLevel", - "disable_backtrace", - "dump_backtrace", - "enable_backtrace", - "flush", - "flush_level", - "flush_on", "log_level", "set_log_level", "set_log_pattern", - "should_backtrace", ] diff --git a/python/holoscan/logger/logger.cpp b/python/holoscan/logger/logger.cpp index ed0d1f38..6547da31 100644 --- a/python/holoscan/logger/logger.cpp +++ b/python/holoscan/logger/logger.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -56,12 +56,5 @@ PYBIND11_MODULE(_logger, m) { m.def("set_log_level", &set_log_level, doc::Logger::doc_set_log_level); m.def("log_level", &log_level, doc::Logger::doc_log_level); m.def("set_log_pattern", &set_log_pattern, doc::Logger::doc_set_log_pattern); - m.def("enable_backtrace", &Logger::enable_backtrace, doc::Logger::doc_enable_backtrace); - m.def("disable_backtrace", &Logger::disable_backtrace, doc::Logger::doc_disable_backtrace); - m.def("dump_backtrace", &Logger::dump_backtrace, doc::Logger::doc_enable_backtrace); - m.def("should_backtrace", &Logger::should_backtrace, doc::Logger::doc_enable_backtrace); - m.def("flush", &Logger::flush, doc::Logger::doc_flush); - m.def("flush_level", &Logger::flush_level, doc::Logger::doc_flush_level); - m.def("flush_on", &Logger::flush_on, doc::Logger::doc_flush_on); } // PYBIND11_MODULE } // namespace holoscan diff --git a/python/holoscan/logger/logger_pydoc.hpp b/python/holoscan/logger/logger_pydoc.hpp index 37718f76..7252b510 100644 --- a/python/holoscan/logger/logger_pydoc.hpp +++ b/python/holoscan/logger/logger_pydoc.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -57,56 +57,6 @@ References .. [1] https://spdlog.docsforge.com/v1.x/3.custom-formatting/ )doc") -PYDOC(enable_backtrace, R"doc( -Enable backtrace support on the logger. - -When enabled, all debug/trace messages are stored in a circular buffer until needed for debugging. - -Parameters ----------- -n_messages : str - The number of messages in the circular buffer. -)doc") - -PYDOC(disable_backtrace, R"doc( -Disable backtrace support on the logger. -)doc") - -PYDOC(dump_backtrace, R"doc( -Dump the loggers backtrace buffer. -)doc") - -PYDOC(should_backtrace, R"doc( -Boolean indicating whether backtracing is enabled. - -Returns -------- -bool - Whether backtracing are enabled. -)doc") - -PYDOC(flush, R"doc( -Force the logger to immediately flush its contents. -)doc") - -PYDOC(flush_level, R"doc( -Determine the minimum log level that will trigger an automatic flush. - -Returns -------- -level : holoscan.logger.LogLevel - The level at which the flush occurs. -)doc") - -PYDOC(flush_on, R"doc( -Sets the minimum log level that will trigger an automatic flush. - -Parameters ----------- -level : holoscan.logger.LogLevel - The level at which the logger should automatically flush. -)doc") - } // namespace Logger } // namespace holoscan::doc diff --git a/python/holoscan/operators/__init__.py b/python/holoscan/operators/__init__.py index eea9f52b..6285bdde 100644 --- a/python/holoscan/operators/__init__.py +++ b/python/holoscan/operators/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,7 +22,6 @@ holoscan.operators.HolovizOp holoscan.operators.InferenceOp holoscan.operators.InferenceProcessorOp - holoscan.operators.NTV2Channel holoscan.operators.PingRxOp holoscan.operators.PingTxOp holoscan.operators.SegmentationPostprocessorOp diff --git a/python/holoscan/operators/aja_source/aja_source.cpp b/python/holoscan/operators/aja_source/aja_source.cpp index 39defdac..4f3f3a36 100644 --- a/python/holoscan/operators/aja_source/aja_source.cpp +++ b/python/holoscan/operators/aja_source/aja_source.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -131,7 +131,7 @@ PYBIND11_MODULE(_aja_source, m) { "overlay_channel"_a = NTV2Channel::NTV2_CHANNEL2, "overlay_rdma"_a = true, "name"_a = "aja_source"s, - doc::AJASourceOp::doc_AJASourceOp_python) + doc::AJASourceOp::doc_AJASourceOp) .def("initialize", &AJASourceOp::initialize, doc::AJASourceOp::doc_initialize) .def("setup", &AJASourceOp::setup, "spec"_a, doc::AJASourceOp::doc_setup); } // PYBIND11_MODULE NOLINT diff --git a/python/holoscan/operators/aja_source/pydoc.hpp b/python/holoscan/operators/aja_source/pydoc.hpp index 32d606c6..ae03cb4a 100644 --- a/python/holoscan/operators/aja_source/pydoc.hpp +++ b/python/holoscan/operators/aja_source/pydoc.hpp @@ -24,54 +24,57 @@ namespace holoscan::doc::AJASourceOp { +// PyAJASourceOp Constructor PYDOC(AJASourceOp, R"doc( Operator to get a video stream from an AJA capture card. -)doc") -// PyAJASourceOp Constructor -PYDOC(AJASourceOp_python, R"doc( -Operator to get a video stream from an AJA capture card. +**==Named Inputs==** -Named inputs: - overlay_buffer_input: nvidia::gxf::VideoBuffer (optional) + overlay_buffer_input : nvidia::gxf::VideoBuffer (optional) The operator does not require a message on this input port in order for ``compute`` to be called. If a message is found, and ``enable_overlay`` is ``True``, the image will be mixed - with the image captured by the AJA card. If `enable_overlay` is ``False``, any message on + with the image captured by the AJA card. If ``enable_overlay`` is ``False``, any message on this port will be ignored. -Named outputs: - video_buffer_output: nvidia::gxf::VideoBuffer +**==Named Outputs==** + + video_buffer_output : nvidia::gxf::VideoBuffer The output video frame from the AJA capture card. If ``overlay_rdma`` is ``True``, this video buffer will be on the device, otherwise it will be in pinned host memory. - overlay_buffer_output: nvidia::gxf::VideoBuffer (optional) + overlay_buffer_output : nvidia::gxf::VideoBuffer (optional) This output port will only emit a video buffer when ``enable_overlay`` is ``True``. If ``overlay_rdma`` is ``True``, this video buffer will be on the device, otherwise it will be in pinned host memory. Parameters ---------- -fragment : holoscan.core.Fragment +fragment : holoscan.core.Fragment (constructor only) The fragment that the operator belongs to. device : str, optional - The device to target (e.g. "0" for device 0) + The device to target (e.g., "0" for device 0). Default value is ``"0"``. channel : holoscan.operators.NTV2Channel or int, optional - The camera NTV2Channel to use for output. + The camera ``NTV2Channel`` to use for output (e.g., ``NTV2Channel.NTV2_CHANNEL1`` (``0``) or + "NTV2_CHANNEL1" (in YAML) for the first channel). Default value is ``NTV2Channel.NTV2_CHANNEL1`` + (``"NTV2_CHANNEL1"`` in YAML). width : int, optional - Width of the video stream. + Width of the video stream. Default value is ``1920``. height : int, optional - Height of the video stream. + Height of the video stream. Default value is ``1080``. framerate : int, optional - Frame rate of the video stream. + Frame rate of the video stream. Default value is ``60``. rdma : bool, optional - Boolean indicating whether RDMA is enabled. + Boolean indicating whether RDMA is enabled. Default value is ``False`` (``"false"`` in YAML). enable_overlay : bool, optional - Boolean indicating whether a separate overlay channel is enabled. + Boolean indicating whether a separate overlay channel is enabled. Default value is ``False`` + (``"false"`` in YAML). overlay_channel : holoscan.operators.NTV2Channel or int, optional - The camera NTV2Channel to use for overlay output. + The camera NTV2Channel to use for overlay output. Default value is ``NTV2Channel.NTV2_CHANNEL2`` + (``"NTV2_CHANNEL2"`` in YAML). overlay_rdma : bool, optional - Boolean indicating whether RDMA is enabled for the overlay. -name : str, optional - The name of the operator. + Boolean indicating whether RDMA is enabled for the overlay. Default value is ``False`` + (``"false"`` in YAML). +name : str, optional (constructor only) + The name of the operator. Default value is ``"aja_source"``. )doc") PYDOC(setup, R"doc( diff --git a/python/holoscan/operators/bayer_demosaic/bayer_demosaic.cpp b/python/holoscan/operators/bayer_demosaic/bayer_demosaic.cpp index 7ea79342..e7854928 100644 --- a/python/holoscan/operators/bayer_demosaic/bayer_demosaic.cpp +++ b/python/holoscan/operators/bayer_demosaic/bayer_demosaic.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -117,7 +117,7 @@ PYBIND11_MODULE(_bayer_demosaic, m) { "generate_alpha"_a = false, "alpha_value"_a = 255, "name"_a = "format_converter"s, - doc::BayerDemosaicOp::doc_BayerDemosaicOp_python) + doc::BayerDemosaicOp::doc_BayerDemosaicOp) .def("initialize", &BayerDemosaicOp::initialize, doc::BayerDemosaicOp::doc_initialize) .def("setup", &BayerDemosaicOp::setup, "spec"_a, doc::BayerDemosaicOp::doc_setup); } // PYBIND11_MODULE NOLINT diff --git a/python/holoscan/operators/bayer_demosaic/pydoc.hpp b/python/holoscan/operators/bayer_demosaic/pydoc.hpp index d04c77fa..389d21a7 100644 --- a/python/holoscan/operators/bayer_demosaic/pydoc.hpp +++ b/python/holoscan/operators/bayer_demosaic/pydoc.hpp @@ -24,55 +24,73 @@ namespace holoscan::doc::BayerDemosaicOp { -// Constructor +// PyBayerDemosaicOp Constructor PYDOC(BayerDemosaicOp, R"doc( Bayer Demosaic operator. -)doc") -// PyBayerDemosaicOp Constructor -PYDOC(BayerDemosaicOp_python, R"doc( -Bayer Demosaic operator. +**==Named Inputs==** -Named inputs: - receiver: nvidia::gxf::Tensor or nvidia::gxf::VideoBuffer + receiver : nvidia::gxf::Tensor or nvidia::gxf::VideoBuffer The input video frame to process. If the input is a VideoBuffer it must be an 8-bit unsigned grayscale video (nvidia::gxf::VideoFormat::GXF_VIDEO_FORMAT_GRAY). The video buffer may be in either host or device memory (a host->device copy is performed if needed). If a video buffer is not found, the input port message is searched for a tensor with the - name specified by `in_tensor_name`. This must be a device tensor in either 8-bit or 16-bit + name specified by ``in_tensor_name``. This must be a device tensor in either 8-bit or 16-bit unsigned integer format. -Named outputs: - transmitter: nvidia::gxf::Tensor +**==Named Outputs==** + + transmitter : nvidia::gxf::Tensor The output video frame after demosaicing. This will be a 3-channel RGB image if - `alpha_value` is ``True``, otherwise it will be a 4-channel RGBA image. The data type will + ``alpha_value`` is ``True``, otherwise it will be a 4-channel RGBA image. The data type will be either 8-bit or 16-bit unsigned integer (matching the bit depth of the input). The - name of the tensor that is output is controlled by `out_tensor_name`. + name of the tensor that is output is controlled by ``out_tensor_name``. Parameters ---------- -fragment : holoscan.core.Fragment +fragment : holoscan.core.Fragment (constructor only) The fragment that the operator belongs to. pool : holoscan.resources.Allocator Memory pool allocator used by the operator. cuda_stream_pool : holoscan.resources.CudaStreamPool, optional - `holoscan.resources.CudaStreamPool` instance to allocate CUDA streams. + ``holoscan.resources.CudaStreamPool`` instance to allocate CUDA streams. Default value is ``None``. in_tensor_name : str, optional - The name of the input tensor. + The name of the input tensor. Default value is ``""`` (empty string). out_tensor_name : str, optional - The name of the output tensor. + The name of the output tensor. Default value is ``""`` (empty string). interpolation_mode : int, optional The interpolation model to be used for demosaicing. Values available at: - https://docs.nvidia.com/cuda/npp/group__typedefs__npp.html#ga2b58ebd329141d560aa4367f1708f191 + https://docs.nvidia.com/cuda/npp/nppdefs.html?highlight=Two%20parameter%20cubic%20filter#c.NppiInterpolationMode + + - NPPI_INTER_UNDEFINED (``0``): Undefined filtering interpolation mode. + - NPPI_INTER_NN (``1``): Nearest neighbor filtering. + - NPPI_INTER_LINEAR (``2``): Linear interpolation. + - NPPI_INTER_CUBIC (``4``): Cubic interpolation. + - NPPI_INTER_CUBIC2P_BSPLINE (``5``): Two-parameter cubic filter (B=1, C=0) + - NPPI_INTER_CUBIC2P_CATMULLROM (``6``): Two-parameter cubic filter (B=0, C=1/2) + - NPPI_INTER_CUBIC2P_B05C03 (``7``): Two-parameter cubic filter (B=1/2, C=3/10) + - NPPI_INTER_SUPER (``8``): Super sampling. + - NPPI_INTER_LANCZOS (``16``): Lanczos filtering. + - NPPI_INTER_LANCZOS3_ADVANCED (``17``): Generic Lanczos filtering with order 3. + - NPPI_SMOOTH_EDGE (``0x8000000``): Smooth edge filtering. + + Default value is ``0`` (NPPI_INTER_UNDEFINED). bayer_grid_pos : int, optional - The Bayer grid position (default of 2 = GBRG). Values available at: - https://docs.nvidia.com/cuda/npp/group__typedefs__npp.html#ga5597309d6766fb2dffe155990d915ecb + The Bayer grid position. Values available at: + https://docs.nvidia.com/cuda/npp/nppdefs.html?highlight=Two%20parameter%20cubic%20filter#c.NppiBayerGridPosition + + - NPPI_BAYER_BGGR (``0``): Default registration position BGGR. + - NPPI_BAYER_RGGB (``1``): Registration position RGGB. + - NPPI_BAYER_GBRG (``2``): Registration position GBRG. + - NPPI_BAYER_GRBG (``3``): Registration position GRBG. + + Default value is ``2`` (NPPI_BAYER_GBRG). generate_alpha : bool, optional - Generate alpha channel. + Generate alpha channel. Default value is ``False``. alpha_value : int, optional - Alpha value to be generated if `generate_alpha` is set to ``True``. -name : str, optional - The name of the operator. + Alpha value to be generated if ``generate_alpha`` is set to ``True``. Default value is ``255``. +name : str, optional (constructor only) + The name of the operator. Default value is ``"bayer_demosaic"``. )doc") PYDOC(gxf_typename, R"doc( diff --git a/python/holoscan/operators/format_converter/format_converter.cpp b/python/holoscan/operators/format_converter/format_converter.cpp index 33e1af0c..e18f2644 100644 --- a/python/holoscan/operators/format_converter/format_converter.cpp +++ b/python/holoscan/operators/format_converter/format_converter.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -138,7 +138,7 @@ PYBIND11_MODULE(_format_converter, m) { "out_channel_order"_a = std::vector{}, "cuda_stream_pool"_a = py::none(), "name"_a = "format_converter"s, - doc::FormatConverterOp::doc_FormatConverterOp_python) + doc::FormatConverterOp::doc_FormatConverterOp) .def("initialize", &FormatConverterOp::initialize, doc::FormatConverterOp::doc_initialize) .def("setup", &FormatConverterOp::setup, "spec"_a, doc::FormatConverterOp::doc_setup); } // PYBIND11_MODULE NOLINT diff --git a/python/holoscan/operators/format_converter/pydoc.hpp b/python/holoscan/operators/format_converter/pydoc.hpp index 3361d4f8..90579dfc 100644 --- a/python/holoscan/operators/format_converter/pydoc.hpp +++ b/python/holoscan/operators/format_converter/pydoc.hpp @@ -24,63 +24,96 @@ namespace holoscan::doc::FormatConverterOp { +// PyFormatConverterOp Constructor PYDOC(FormatConverterOp, R"doc( Format conversion operator. -)doc") -// PyFormatConverterOp Constructor -PYDOC(FormatConverterOp_python, R"doc( -Format conversion operator. +**==Named Inputs==** -Named inputs: - source_video: nvidia::gxf::Tensor or nvidia::gxf::VideoBuffer + source_video : nvidia::gxf::Tensor or nvidia::gxf::VideoBuffer The input video frame to process. If the input is a VideoBuffer it must be in format GXF_VIDEO_FORMAT_RGBA, GXF_VIDEO_FORMAT_RGB or GXF_VIDEO_FORMAT_NV12. This video buffer may be in either host or device memory (a host->device copy is performed if needed). If a video buffer is not found, the input port message is searched for a tensor with the - name specified by `in_tensor_name`. This must be a device tensor in one of several + name specified by ``in_tensor_name``. This must be a device tensor in one of several supported formats (unsigned 8-bit int or float32 graycale, unsigned 8-bit int RGB or RGBA, YUV420 or NV12). -Named outputs: - tensor: nvidia::gxf::Tensor +**==Named Outputs==** + + tensor : nvidia::gxf::Tensor The output video frame after processing. The shape, data type and number of channels of this output tensor will depend on the specific parameters that were set for this operator. The - name of the Tensor transmitted on this port is determined by `out_tensor_name`. + name of the Tensor transmitted on this port is determined by ``out_tensor_name``. Parameters ---------- -fragment : holoscan.core.Fragment +fragment : holoscan.core.Fragment (constructor only) The fragment that the operator belongs to. pool : holoscan.resources.Allocator Memory pool allocator used by the operator. out_dtype : str - Destination data type (e.g. "rgb888" or "rgba8888"). + Destination data type. The available options are: + + - ``"rgb888"`` + - ``"uint8"`` + - ``"float32"`` + - ``"rgba8888"`` + - ``"yuv420"`` + - ``"nv12"`` in_dtype : str, optional - Source data type (e.g. "rgb888" or "rgba8888"). + Source data type. The available options are: + + - ``"rgb888"`` + - ``"uint8"`` + - ``"float32"`` + - ``"rgba8888"`` + - ``"yuv420"`` + - ``"nv12"`` in_tensor_name : str, optional - The name of the input tensor (default is the empty string, ""). + The name of the input tensor. Default value is ``""`` (empty string). out_tensor_name : str, optional - The name of the output tensor (default is the empty string, ""). + The name of the output tensor. Default value is ``""`` (empty string). scale_min : float, optional - Output will be clipped to this minimum value. + Output will be clipped to this minimum value. Default value is ``0.0``. scale_max : float, optional - Output will be clipped to this maximum value. + Output will be clipped to this maximum value. Default value is ``1.0``. alpha_value : int, optional Unsigned integer in range [0, 255], indicating the alpha channel value to use - when converting from RGB to RGBA. + when converting from RGB to RGBA. Default value is ``255``. resize_height : int, optional - Desired height for the (resized) output. Height will be unchanged if `resize_height` is 0. + Desired height for the (resized) output. Height will be unchanged if ``resize_height`` is ``0``. + Default value is ``0``. resize_width : int, optional - Desired width for the (resized) output. Width will be unchanged if `resize_width` is 0. + Desired width for the (resized) output. Width will be unchanged if ``resize_width`` is ``0``. + Default value is ``0``. resize_mode : int, optional - Resize mode enum value corresponding to NPP's nppiInterpolationMode (default=NPPI_INTER_CUBIC). + Resize mode enum value corresponding to NPP's NppiInterpolationMode. + Values available at: + https://docs.nvidia.com/cuda/npp/nppdefs.html?highlight=Two%20parameter%20cubic%20filter#c.NppiInterpolationMode + + - NPPI_INTER_UNDEFINED (``0``): Undefined filtering interpolation mode. + - NPPI_INTER_NN (``1``): Nearest neighbor filtering. + - NPPI_INTER_LINEAR (``2``): Linear interpolation. + - NPPI_INTER_CUBIC (``4``): Cubic interpolation. + - NPPI_INTER_CUBIC2P_BSPLINE (``5``): Two-parameter cubic filter (B=1, C=0) + - NPPI_INTER_CUBIC2P_CATMULLROM (``6``): Two-parameter cubic filter (B=0, C=1/2) + - NPPI_INTER_CUBIC2P_B05C03 (``7``): Two-parameter cubic filter (B=1/2, C=3/10) + - NPPI_INTER_SUPER (``8``): Super sampling. + - NPPI_INTER_LANCZOS (``16``): Lanczos filtering. + - NPPI_INTER_LANCZOS3_ADVANCED (``17``): Generic Lanczos filtering with order 3. + - NPPI_SMOOTH_EDGE (``0x8000000``): Smooth edge filtering. + + Default value is ``0`` (NPPI_INTER_UNDEFINED) which would be equivalent to ``4`` + (NPPI_INTER_CUBIC). channel_order : sequence of int Sequence of integers describing how channel values are permuted. + Default value is ``[0, 1, 2]`` for 3-channel images and ``[0, 1, 2, 3]`` for 4-channel images. cuda_stream_pool : holoscan.resources.CudaStreamPool, optional `holoscan.resources.CudaStreamPool` instance to allocate CUDA streams. -name : str, optional - The name of the operator. + Default value is ``None``. +name : str, optional (constructor only) + The name of the operator. Default value is ``"format_converter"``. )doc") PYDOC(initialize, R"doc( diff --git a/python/holoscan/operators/holoviz/holoviz.cpp b/python/holoscan/operators/holoviz/holoviz.cpp index 9f5c244e..f6b91a54 100644 --- a/python/holoscan/operators/holoviz/holoviz.cpp +++ b/python/holoscan/operators/holoviz/holoviz.cpp @@ -159,7 +159,7 @@ PYBIND11_MODULE(_holoviz, m) { "font_path"_a = "", "cuda_stream_pool"_a = py::none(), "name"_a = "holoviz_op"s, - doc::HolovizOp::doc_HolovizOp_python) + doc::HolovizOp::doc_HolovizOp) .def("initialize", &HolovizOp::initialize, doc::HolovizOp::doc_initialize) .def("setup", &HolovizOp::setup, "spec"_a, doc::HolovizOp::doc_setup); diff --git a/python/holoscan/operators/holoviz/pydoc.hpp b/python/holoscan/operators/holoviz/pydoc.hpp index a0850555..bd45e6d4 100644 --- a/python/holoscan/operators/holoviz/pydoc.hpp +++ b/python/holoscan/operators/holoviz/pydoc.hpp @@ -24,121 +24,121 @@ namespace holoscan::doc::HolovizOp { +// PyHolovizOp Constructor PYDOC(HolovizOp, R"doc( Holoviz visualization operator using Holoviz module. This is a Vulkan-based visualizer. -)doc") -// PyHolovizOp Constructor -PYDOC(HolovizOp_python, R"doc( -Holoviz visualization operator using Holoviz module. +**==Named Inputs==** -This is a Vulkan-based visualizer. - -Named inputs: - receivers: multi-receiver accepting nvidia::gxf::Tensor and/or nvidia::gxf::VideoBuffer - Any number of upstream ports may be connected to this `receivers` port. This port can + receivers : multi-receiver accepting nvidia::gxf::Tensor and/or nvidia::gxf::VideoBuffer + Any number of upstream ports may be connected to this ``receivers`` port. This port can accept either VideoBuffers or Tensors. These inputs can be in either host or device memory. Each tensor or video buffer will result in a layer. The operator autodetects the layer type for certain input types (e.g. a video buffer will result in an image layer). For other input types or more complex use cases, input specifications can be provided either at - initialization time as a parameter or dynamically at run time (via `input_specs`). On each - call to `compute`, tensors corresponding to all names specified in the `tensors` parameter + initialization time as a parameter or dynamically at run time (via ``input_specs``). On each + call to ``compute``, tensors corresponding to all names specified in the ``tensors`` parameter must be found or an exception will be raised. Any extra, named tensors not present in the - `tensors` parameter specification (or optional, dynamic `input_specs` input) will be + ``tensors`` parameter specification (or optional, dynamic ``input_specs`` input) will be ignored. - input_specs: list[holoscan.operators.HolovizOp.InputSpec] (optional) - A list of `InputSpec` objects. This port can be used to dynamically update the overlay + input_specs : list[holoscan.operators.HolovizOp.InputSpec] (optional) + A list of ``InputSpec`` objects. This port can be used to dynamically update the overlay specification at run time. No inputs are required on this port in order for the operator - to `compute`. - render_buffer_input: nvidia::gxf::VideoBuffer (optional) + to ``compute``. + render_buffer_input : nvidia::gxf::VideoBuffer (optional) An empty render buffer can optionally be provided. The video buffer must have format GXF_VIDEO_FORMAT_RGBA and be in device memory. This input port only exists if - `enable_render_buffer_input` was set to ``True``, in which case `compute` will only be + ``enable_render_buffer_input`` was set to ``True``, in which case ``compute`` will only be called when a message arrives on this input. -Named outputs: - render_buffer_output: nvidia::gxf::VideoBuffer (optional) +**==Named Outputs==** + + render_buffer_output : nvidia::gxf::VideoBuffer (optional) Output for a filled render buffer. If an input render buffer is specified, it is using that one, else it allocates a new buffer. The video buffer will have format GXF_VIDEO_FORMAT_RGBA and will be in device memory. This output is useful for offline - rendering or headless mode. This output port only exists if `enable_render_buffer_output` + rendering or headless mode. This output port only exists if ``enable_render_buffer_output`` was set to ``True``. - camera_pose_output: std::array (optional) + camera_pose_output : std::array (optional) The camera pose. The parameters returned represent the values of a 4x4 row major - projection matrix. This output port only exists if `enable_camera_pose_output` was set to + projection matrix. This output port only exists if ``enable_camera_pose_output`` was set to ``True``. Parameters ---------- -fragment : holoscan.core.Fragment +fragment : holoscan.core.Fragment (constructor only) The fragment that the operator belongs to. allocator : holoscan.core.Allocator, optional - Allocator used to allocate render buffer output. If None, will default to - `holoscan.core.UnboundedAllocator`. + Allocator used to allocate render buffer output. If ``None``, will default to + ``holoscan.core.UnboundedAllocator``. receivers : sequence of holoscan.core.IOSpec, optional List of input receivers. tensors : sequence of dict, optional - List of input tensors. Each tensor is defined by a dictionary where the 'name' key must + List of input tensors. Each tensor is defined by a dictionary where the ``"name"`` key must correspond to a tensor sent to the operator's input. See the notes section below for further details on how the tensor dictionary is defined. color_lut : list of list of float, optional - Color lookup table for tensors of type 'color_lut'. Should be shape `(n_colors, 4)`. + Color lookup table for tensors of type ``color_lut``. Should be shape ``(n_colors, 4)``. window_title : str, optional - Title on window canvas. + Title on window canvas. Default value is ``"Holoviz"``. display_name : str, optional - In exclusive mode, name of display to use as shown with xrandr. + In exclusive mode, name of display to use as shown with xrandr. Default value is ``"DP-0"``. width : int, optional - Window width or display resolution width if in exclusive or fullscreen mode. + Window width or display resolution width if in exclusive or fullscreen mode. Default value is + ``1920``. height : int, optional - Window height or display resolution width if in exclusive or fullscreen mode. + Window height or display resolution width if in exclusive or fullscreen mode. Default value is + ``1080``. framerate : float, optional - Display framerate in Hz if in exclusive mode. + Display framerate in Hz if in exclusive mode. Default value is ``60.0``. use_exclusive_display : bool, optional - Enable exclusive display. + Enable exclusive display. Default value is ``False``. fullscreen : bool, optional - Enable fullscreen window. + Enable fullscreen window. Default value is ``False``. headless : bool, optional Enable headless mode. No window is opened, the render buffer is output to - port `render_buffer_output`. + port ``render_buffer_output``. Default value is ``False``. enable_render_buffer_input : bool, optional - If ``True``, an additional input port, named 'render_buffer_input' is added to the - operator. + If ``True``, an additional input port, named ``"render_buffer_input"`` is added to the + operator. Default value is ``False``. enable_render_buffer_output : bool, optional - If ``True``, an additional output port, named 'render_buffer_output' is added to the - operator. + If ``True``, an additional output port, named ``"render_buffer_output"`` is added to the + operator. Default value is ``False``. enable_camera_pose_output : bool, optional. - If ``True``, an additional output port, named 'camera_pose_output' is added to the - operator. + If ``True``, an additional output port, named ``"camera_pose_output"`` is added to the + operator. Default value is ``False``. font_path : str, optional - File path for the font used for rendering text. + File path for the font used for rendering text. Default value is ``""``. cuda_stream_pool : holoscan.resources.CudaStreamPool, optional - `holoscan.resources.CudaStreamPool` instance to allocate CUDA streams. -name : str, optional - The name of the operator. + ``holoscan.resources.CudaStreamPool`` instance to allocate CUDA streams. Default value is + ``None``. +name : str, optional (constructor only) + The name of the operator. Default value is ``"holoviz_op"``. Notes ----- -The `tensors` argument is used to specify the tensors to display. Each tensor is defined using a +The ``tensors`` argument is used to specify the tensors to display. Each tensor is defined using a dictionary, that must, at minimum include a 'name' key that corresponds to a tensor found on the operator's input. A 'type' key should also be provided to indicate the type of entry to display. -The 'type' key will be one of {"color", "color_lut", "crosses", "lines", "lines_3d", "line_strip", -"line_strip_3d", "ovals", "points", "points_3d", "rectangles", "text", "triangles", "triangles_3d", -"depth_map", "depth_map_color", "unknown"}. The default type is "unknown" which will attempt to +The 'type' key will be one of {``"color"``, ``"color_lut"``, ``"crosses"``, ``"lines"``, +``"lines_3d"``, ``"line_strip"``, ``"line_strip_3d"``, ``"ovals"``, ``"points"``, ``"points_3d"``, +``"rectangles"``, ``"text"``, ``"triangles"``, ``"triangles_3d"``, ``"depth_map"``, +``"depth_map_color"``, ``"unknown"``}. The default type is ``"unknown"`` which will attempt to guess the corresponding type based on the tensor dimensions. Concrete examples are given below. -To show a single 2D RGB or RGBA image, use a list containing a single tensor of type 'color'. +To show a single 2D RGB or RGBA image, use a list containing a single tensor of type ``"color"``. .. code-block:: python tensors = [dict(name="video", type="color", opacity=1.0, priority=0)] -Here, the optional key `opacity` is used to scale the opacity of the tensor. The `priority` key +Here, the optional key ``opacity`` is used to scale the opacity of the tensor. The ``priority`` key is used to specify the render priority for layers. Layers with a higher priority will be rendered on top of those with a lower priority. -If we also had a "boxes" tensor representing rectangular bounding boxes, we could display them +If we also had a ``"boxes"``` tensor representing rectangular bounding boxes, we could display them on top of the image like this. .. code-block:: python @@ -148,7 +148,164 @@ on top of the image like this. dict(name="boxes", type="rectangles", color=[1.0, 0.0, 0.0], line_width=2, priority=1), ] -where the `color` and `line_width` keys specify the color and line width of the bounding box. +where the ``color`` and ``line_width`` keys specify the color and line width of the bounding box. + +The details of the dictionary is as follows: + +- **name**: name of the tensor containing the input data to display + + - type: ``str`` +- **type**: input type (default ``"unknown"``) + + - type: ``str`` + - possible values: + + - **unknown**: unknown type, the operator tries to guess the type by inspecting the + tensor. + - **color**: RGB or RGBA color 2d image. + - **color_lut**: single channel 2d image, color is looked up. + - **points**: point primitives, one coordinate (x, y) per primitive. + - **lines**: line primitives, two coordinates (x0, y0) and (x1, y1) per primitive. + - **line_strip**: line strip primitive, a line primitive i is defined by each + coordinate (xi, yi) and the following (xi+1, yi+1). + - **triangles**: triangle primitive, three coordinates (x0, y0), (x1, y1) and (x2, y2) + per primitive. + - **crosses**: cross primitive, a cross is defined by the center coordinate and the + size (xi, yi, si). + - **rectangles**: axis aligned rectangle primitive, each rectangle is defined by two + coordinates (xi, yi) and (xi+1, yi+1). + - **ovals**: oval primitive, an oval primitive is defined by the center coordinate and + the axis sizes (xi, yi, sxi, syi). + - **text**: text is defined by the top left coordinate and the size (x, y, s) per + string, text strings are defined by InputSpec member **text**. + - **depth_map**: single channel 2d array where each element represents a depth value. + The data is rendered as a 3d object using points, lines or triangles. The color for + the elements can be specified through ``depth_map_color``. Supported format: 8-bit + unsigned normalized format that has a single 8-bit depth component. + - **depth_map_color**: RGBA 2d image, same size as the depth map. One color value for + each element of the depth map grid. Supported format: 32-bit unsigned normalized + format that has an 8-bit R component in byte 0, an 8-bit G component in byte 1, an + 8-bit B component in byte 2, and an 8-bit A component in byte 3. +- **opacity**: layer opacity, 1.0 is fully opaque, 0.0 is fully transparent (default: + ``1.0``) + + - type: ``float`` +- **priority**: layer priority, determines the render order, layers with higher priority + values are rendered on top of layers with lower priority values (default: ``0``) + + - type: ``int`` +- **color**: RGBA color of rendered geometry (default: ``[1.f, 1.f, 1.f, 1.f]``) + + - type: ``List[float]`` +- **line_width**: line width for geometry made of lines (default: ``1.0``) + + - type: ``float`` +- **point_size**: point size for geometry made of points (default: ``1.0``) + + - type: ``float`` +- **text**: array of text strings, used when ``type`` is text (default: ``[]``) + + - type: ``List[str]`` +- **depth_map_render_mode**: depth map render mode (default: ``points``) + + - type: ``str`` + - possible values: + + - **points**: render as points + - **lines**: render as lines + - **triangles**: render as triangles + + +1. Displaying Color Images + + Image data can either be on host or device (GPU). Multiple image formats are supported + + - R 8 bit unsigned + - R 16 bit unsigned + - R 16 bit float + - R 32 bit unsigned + - R 32 bit float + - RGB 8 bit unsigned + - BGR 8 bit unsigned + - RGBA 8 bit unsigned + - BGRA 8 bit unsigned + - RGBA 16 bit unsigned + - RGBA 16 bit float + - RGBA 32 bit float + + When the ``type`` parameter is set to ``color_lut`` the final color is looked up using the values + from the ``color_lut`` parameter. For color lookups these image formats are supported + + - R 8 bit unsigned + - R 16 bit unsigned + - R 32 bit unsigned + +2. Drawing Geometry + + In all cases, ``x`` and ``y`` are normalized coordinates in the range ``[0, 1]``. The ``x`` and ``y`` + correspond to the horizontal and vertical axes of the display, respectively. The origin ``(0, + 0)`` is at the top left of the display. + Geometric primitives outside of the visible area are clipped. + Coordinate arrays are expected to have the shape ``(N, C)`` where ``N`` is the coordinate count + and ``C`` is the component count for each coordinate. + + - Points are defined by a ``(x, y)`` coordinate pair. + - Lines are defined by a set of two ``(x, y)`` coordinate pairs. + - Lines strips are defined by a sequence of ``(x, y)`` coordinate pairs. The first two + coordinates define the first line, each additional coordinate adds a line connecting to the + previous coordinate. + - Triangles are defined by a set of three ``(x, y)`` coordinate pairs. + - Crosses are defined by ``(x, y, size)`` tuples. ``size`` specifies the size of the cross in the + ``x`` direction and is optional, if omitted it's set to ``0.05``. The size in the ``y`` direction + is calculated using the aspect ratio of the window to make the crosses square. + - Rectangles (bounding boxes) are defined by a pair of 2-tuples defining the upper-left and + lower-right coordinates of a box: ``(x1, y1), (x2, y2)``. + - Ovals are defined by ``(x, y, size_x, size_y)`` tuples. ``size_x`` and ``size_y`` are optional, if + omitted they are set to ``0.05``. + - Texts are defined by ``(x, y, size)`` tuples. ``size`` specifies the size of the text in ``y`` + direction and is optional, if omitted it's set to ``0.05``. The size in the ``x`` direction is + calculated using the aspect ratio of the window. The index of each coordinate references a + text string from the ``text`` parameter and the index is clamped to the size of the text + array. For example, if there is one item set for the ``text`` parameter, e.g. + ``text=["my_text"]`` and three coordinates, then ``my_text`` is rendered three times. If + ``text=["first text", "second text"]`` and three coordinates are specified, then ``first text`` + is rendered at the first coordinate, ``second text`` at the second coordinate and then ``second + text`` again at the third coordinate. The ``text`` string array is fixed and can't be changed + after initialization. To hide text which should not be displayed, specify coordinates + greater than ``(1.0, 1.0)`` for the text item, the text is then clipped away. + - 3D Points are defined by a ``(x, y, z)`` coordinate tuple. + - 3D Lines are defined by a set of two ``(x, y, z)`` coordinate tuples. + - 3D Lines strips are defined by a sequence of ``(x, y, z)`` coordinate tuples. The first two + coordinates define the first line, each additional coordinate adds a line connecting to the + previous coordinate. + - 3D Triangles are defined by a set of three ``(x, y, z)`` coordinate tuples. + +3. Displaying Depth Maps + + When ``type`` is ``depth_map`` the provided data is interpreted as a rectangular array of depth + values. Additionally a 2d array with a color value for each point in the grid can be specified + by setting ``type`` to ``depth_map_color``. + + The type of geometry drawn can be selected by setting ``depth_map_render_mode``. + + Depth maps are rendered in 3D and support camera movement. The camera is controlled using the + mouse: + + - Orbit (LMB) + - Pan (LMB + CTRL | MMB) + - Dolly (LMB + SHIFT | RMB | Mouse wheel) + - Look Around (LMB + ALT | LMB + CTRL + SHIFT) + - Zoom (Mouse wheel + SHIFT) + +4. Output + + By default a window is opened to display the rendering, but the extension can also be run in + headless mode with the ``headless`` parameter. + + Using a display in exclusive mode is also supported with the ``use_exclusive_display`` + parameter. This reduces the latency by avoiding the desktop compositor. + + The rendered framebuffer can be output to ``render_buffer_output``. )doc") PYDOC(initialize, R"doc( diff --git a/python/holoscan/operators/inference/inference.cpp b/python/holoscan/operators/inference/inference.cpp index bca80eb0..32fb13eb 100644 --- a/python/holoscan/operators/inference/inference.cpp +++ b/python/holoscan/operators/inference/inference.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -209,7 +209,7 @@ PYBIND11_MODULE(_inference, m) { "is_engine_path"_a = false, "cuda_stream_pool"_a = py::none(), "name"_a = "inference"s, - doc::InferenceOp::doc_InferenceOp_python) + doc::InferenceOp::doc_InferenceOp) .def("initialize", &InferenceOp::initialize, doc::InferenceOp::doc_initialize) .def("setup", &InferenceOp::setup, "spec"_a, doc::InferenceOp::doc_setup); diff --git a/python/holoscan/operators/inference/pydoc.hpp b/python/holoscan/operators/inference/pydoc.hpp index d379d7c8..e80ddb14 100644 --- a/python/holoscan/operators/inference/pydoc.hpp +++ b/python/holoscan/operators/inference/pydoc.hpp @@ -24,33 +24,36 @@ namespace holoscan::doc::InferenceOp { +// PyInferenceOp Constructor PYDOC(InferenceOp, R"doc( Inference operator. -)doc") -// PyInferenceOp_python Constructor -PYDOC(InferenceOp_python, R"doc( -Inference operator. +**==Named Inputs==** + + receivers : multi-receiver accepting nvidia::gxf::Tensor(s) + Any number of upstream ports may be connected to this ``receivers`` port. The operator will + search across all messages for tensors matching those specified in ``in_tensor_names``. + These are the set of input tensors used by the models in ``inference_map``. -Named inputs: - receivers: multi-receiver accepting nvidia::gxf::Tensor(s) - Any number of upstream ports may be connected to this `receivers` port. The operator will - search across all messages for tensors matching those specified in `in_tensor_names`. - These are the set of input tensors used by the models in `inference_map`. +**==Named Outputs==** -Named outputs: - transmitter: nvidia::gxf::Tensor(s) + transmitter : nvidia::gxf::Tensor(s) A message containing tensors corresponding to the inference results from all models will be emitted. The names of the tensors transmitted correspond to those in - `out_tensor_names`. + ``out_tensor_names``. + + +For more details on ``InferenceOp`` parameters, see +[Customizing the Inference Operator](https://docs.nvidia.com/holoscan/sdk-user-guide/examples/byom.html#customizing-the-inference-operator) +or refer to [Inference](https://docs.nvidia.com/holoscan/sdk-user-guide/inference.html). Parameters ---------- -fragment : holoscan.core.Fragment +fragment : holoscan.core.Fragment (constructor only) The fragment that the operator belongs to. backend : {"trt", "onnxrt", "torch"} - Backend to use for inference. Set "trt" for TensorRT, "torch" for LibTorch and "onnxrt" for the - ONNX runtime. + Backend to use for inference. Set ``"trt"`` for TensorRT, ``"torch"`` for LibTorch and + ``"onnxrt"`` for the ONNX runtime. allocator : holoscan.resources.Allocator Memory allocator to use for the output. inference_map : holoscan.operators.InferenceOp.DataVecMap @@ -61,30 +64,31 @@ pre_processor_map : holoscan.operators.InferenceOp::DataVecMap Pre processed data to model map. device_map : holoscan.operators.InferenceOp.DataMap, optional Mapping of model to GPU ID for inference. -backend_map: holoscan.operators.InferenceOp.DataMap, optional - Mapping of model to backend type for inference. Backend options: "trt" or "torch" +backend_map : holoscan.operators.InferenceOp.DataMap, optional + Mapping of model to backend type for inference. Backend options: ``"trt"`` or ``"torch"`` in_tensor_names : sequence of str, optional Input tensors. out_tensor_names : sequence of str, optional Output tensors. infer_on_cpu : bool, optional - Whether to run the computation on the CPU instead of GPU. + Whether to run the computation on the CPU instead of GPU. Default value is ``False``. parallel_inference : bool, optional - Whether to enable parallel execution. + Whether to enable parallel execution. Default value is ``True``. input_on_cuda : bool, optional - Whether the input buffer is on the GPU. + Whether the input buffer is on the GPU. Default value is ``True``. output_on_cuda : bool, optional - Whether the output buffer is on the GPU. + Whether the output buffer is on the GPU. Default value is ``True``. transmit_on_cuda : bool, optional - Whether to transmit the message on the GPU. + Whether to transmit the message on the GPU. Default value is ``True``. enable_fp16 : bool, optional - Use 16-bit floating point computations. + Use 16-bit floating point computations. Default value is ``False``. is_engine_path : bool, optional - Whether the input model path mapping is for trt engine files + Whether the input model path mapping is for trt engine files. Default value is ``False``. cuda_stream_pool : holoscan.resources.CudaStreamPool, optional - `holoscan.resources.CudaStreamPool` instance to allocate CUDA streams. -name : str, optional - The name of the operator. + ``holoscan.resources.CudaStreamPool`` instance to allocate CUDA streams. Default value is + ``None``. +name : str, optional (constructor only) + The name of the operator. Default value is ``"inference"``. )doc") PYDOC(initialize, R"doc( diff --git a/python/holoscan/operators/inference_processor/inference_processor.cpp b/python/holoscan/operators/inference_processor/inference_processor.cpp index 6ef086ff..d076f671 100644 --- a/python/holoscan/operators/inference_processor/inference_processor.cpp +++ b/python/holoscan/operators/inference_processor/inference_processor.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -185,7 +185,7 @@ PYBIND11_MODULE(_inference_processor, m) { "cuda_stream_pool"_a = py::none(), "config_path"_a = ""s, "name"_a = "postprocessor"s, - doc::InferenceProcessorOp::doc_InferenceProcessorOp_python) + doc::InferenceProcessorOp::doc_InferenceProcessorOp) .def("initialize", &InferenceProcessorOp::initialize, doc::InferenceProcessorOp::doc_initialize) diff --git a/python/holoscan/operators/inference_processor/pydoc.hpp b/python/holoscan/operators/inference_processor/pydoc.hpp index f25bbd49..1cb0f482 100644 --- a/python/holoscan/operators/inference_processor/pydoc.hpp +++ b/python/holoscan/operators/inference_processor/pydoc.hpp @@ -24,30 +24,28 @@ namespace holoscan::doc::InferenceProcessorOp { +// PyInferenceProcessorOp Constructor PYDOC(InferenceProcessorOp, R"doc( Holoinfer Processing operator. -)doc") -// PyInferenceProcessorOp Constructor -PYDOC(InferenceProcessorOp_python, R"doc( -Holoinfer Processing operator. +**==Named Inputs==** -Named inputs: - receivers: multi-receiver accepting nvidia::gxf::Tensor(s) - Any number of upstream ports may be connected to this `receivers` port. The operator will - search across all messages for tensors matching those specified in `in_tensor_names`. + receivers : multi-receiver accepting nvidia::gxf::Tensor(s) + Any number of upstream ports may be connected to this ``receivers`` port. The operator will + search across all messages for tensors matching those specified in ``in_tensor_names``. These are the set of input tensors used by the processing operations specified in - `process_map`. + ``process_map``. + +**==Named Outputs==** -Named outputs: - transmitter: nvidia::gxf::Tensor(s) + transmitter : nvidia::gxf::Tensor(s) A message containing tensors corresponding to the processed results from operations will be emitted. The names of the tensors transmitted correspond to those in - `out_tensor_names`. + ``out_tensor_names``. Parameters ---------- -fragment : holoscan.core.Fragment +fragment : holoscan.core.Fragment (constructor only) The fragment that the operator belongs to. allocator : holoscan.resources.Allocator Memory allocator to use for the output. @@ -60,19 +58,21 @@ in_tensor_names : sequence of str, optional out_tensor_names : sequence of str, optional Names of output tensors in the order to be fed into the operator. input_on_cuda : bool, optional - Whether the input buffer is on the GPU. + Whether the input buffer is on the GPU. Default value is ``False``. output_on_cuda : bool, optional - Whether the output buffer is on the GPU. + Whether the output buffer is on the GPU. Default value is ``False``. transmit_on_cuda : bool, optional - Whether to transmit the message on the GPU. + Whether to transmit the message on the GPU. Default value is ``False``. cuda_stream_pool : holoscan.resources.CudaStreamPool, optional - `holoscan.resources.CudaStreamPool` instance to allocate CUDA streams. + ``holoscan.resources.CudaStreamPool`` instance to allocate CUDA streams. + Default value is ``None``. config_path : str, optional - File path to the config file. + File path to the config file. Default value is ``""``. disable_transmitter : bool, optional If ``True``, disable the transmitter output port of the operator. -name : str, optional - The name of the operator. + Default value is ``False``. +name : str, optional (constructor only) + The name of the operator. Default value is ``"postprocessor"``. )doc") PYDOC(initialize, R"doc( diff --git a/python/holoscan/operators/ping_rx/__init__.py b/python/holoscan/operators/ping_rx/__init__.py index 2a9a4d3d..cd3b8f07 100644 --- a/python/holoscan/operators/ping_rx/__init__.py +++ b/python/holoscan/operators/ping_rx/__init__.py @@ -21,12 +21,13 @@ class PingRxOp(Operator): """Simple receiver operator. - Named inputs: - in: any - A received value. - This is an example of a native operator with one input port. On each tick, it receives an integer from the "in" port. + + **==Named Inputs==** + + in : any + A received value. """ def __init__(self, fragment, *args, **kwargs): diff --git a/python/holoscan/operators/ping_tx/__init__.py b/python/holoscan/operators/ping_tx/__init__.py index a7206b39..063b21f2 100644 --- a/python/holoscan/operators/ping_tx/__init__.py +++ b/python/holoscan/operators/ping_tx/__init__.py @@ -21,12 +21,13 @@ class PingTxOp(Operator): """Simple transmitter operator. - Named outputs: - out: int + On each tick, it transmits an integer to the "out" port. + + **==Named Outputs==** + + out : int An index value that increments by one on each call to `compute`. The starting value is 1. - - On each tick, it transmits an integer to the "out" port. """ def __init__(self, fragment, *args, **kwargs): diff --git a/python/holoscan/operators/segmentation_postprocessor/pydoc.hpp b/python/holoscan/operators/segmentation_postprocessor/pydoc.hpp index 034dc54d..40faacd1 100644 --- a/python/holoscan/operators/segmentation_postprocessor/pydoc.hpp +++ b/python/holoscan/operators/segmentation_postprocessor/pydoc.hpp @@ -24,42 +24,40 @@ namespace holoscan::doc::SegmentationPostprocessorOp { +// PySegmentationPostprocessorOp Constructor PYDOC(SegmentationPostprocessorOp, R"doc( Operator carrying out post-processing operations on segmentation outputs. -)doc") -// PySegmentationPostprocessorOp Constructor -PYDOC(SegmentationPostprocessorOp_python, R"doc( -Operator carrying out post-processing operations on segmentation outputs. +**==Named Inputs==** -Named inputs: - in_tensor: nvidia::gxf::Tensor - Expects a message containing a 32-bit floating point tensor with name `in_tensor_name`. + in_tensor : nvidia::gxf::Tensor + Expects a message containing a 32-bit floating point tensor with name ``in_tensor_name``. The expected data layout of this tensor is HWC, NCHW or NHWC format as specified via - `data_format`. + ``data_format``. + +**==Named Outputs==** -Named outputs: - out_tensor: nvidia::gxf::Tensor + out_tensor : nvidia::gxf::Tensor Emits a message containing a tensor named "out_tensor" that contains the segmentation labels. This tensor will have unsigned 8-bit integer data type and shape (H, W, 1). Parameters ---------- -fragment : holoscan.core.Fragment +fragment : holoscan.core.Fragment (constructor only) The fragment that the operator belongs to. allocator : holoscan.resources.Allocator Memory allocator to use for the output. in_tensor_name : str, optional - Name of the input tensor. + Name of the input tensor. Default value is ``""``. network_output_type : str, optional - Network output type (e.g. 'softmax'). + Network output type (e.g. 'softmax'). Default value is ``"softmax"``. data_format : str, optional - Data format of network output. + Data format of network output. Default value is ``"hwc"``. cuda_stream_pool : holoscan.resources.CudaStreamPool, optional - `holoscan.resources.CudaStreamPool` instance to allocate CUDA streams. - -name : str, optional - The name of the operator. + ``holoscan.resources.CudaStreamPool`` instance to allocate CUDA streams. + Default value is ``None``. +name : str, optional (constructor only) + The name of the operator. Default value is ``"segmentation_postprocessor"``. )doc") PYDOC(initialize, R"doc( diff --git a/python/holoscan/operators/segmentation_postprocessor/segmentation_postprocessor.cpp b/python/holoscan/operators/segmentation_postprocessor/segmentation_postprocessor.cpp index 67a9cff8..b936767b 100644 --- a/python/holoscan/operators/segmentation_postprocessor/segmentation_postprocessor.cpp +++ b/python/holoscan/operators/segmentation_postprocessor/segmentation_postprocessor.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -113,7 +113,7 @@ PYBIND11_MODULE(_segmentation_postprocessor, m) { "data_format"_a = "hwc"s, "cuda_stream_pool"_a = py::none(), "name"_a = "segmentation_postprocessor"s, - doc::SegmentationPostprocessorOp::doc_SegmentationPostprocessorOp_python) + doc::SegmentationPostprocessorOp::doc_SegmentationPostprocessorOp) .def("setup", &SegmentationPostprocessorOp::setup, "spec"_a, diff --git a/python/holoscan/operators/v4l2_video_capture/CMakeLists.txt b/python/holoscan/operators/v4l2_video_capture/CMakeLists.txt index 7e55234a..cbc7e321 100644 --- a/python/holoscan/operators/v4l2_video_capture/CMakeLists.txt +++ b/python/holoscan/operators/v4l2_video_capture/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,4 +18,4 @@ holoscan_pybind11_module(v4l2_video_capture ) target_link_libraries(v4l2_video_capture_python PUBLIC holoscan::ops::v4l2 -) \ No newline at end of file +) diff --git a/python/holoscan/operators/v4l2_video_capture/pydoc.hpp b/python/holoscan/operators/v4l2_video_capture/pydoc.hpp index f61a1574..d062dbe1 100644 --- a/python/holoscan/operators/v4l2_video_capture/pydoc.hpp +++ b/python/holoscan/operators/v4l2_video_capture/pydoc.hpp @@ -15,8 +15,8 @@ * limitations under the License. */ -#ifndef PYHOLOHUB_OPERATORS_V4L2_VIDEO_CAPTURE_PYDOC_HPP -#define PYHOLOHUB_OPERATORS_V4L2_VIDEO_CAPTURE_PYDOC_HPP +#ifndef HOLOSCAN_OPERATORS_V4L2_VIDEO_CAPTURE_PYDOC_HPP +#define HOLOSCAN_OPERATORS_V4L2_VIDEO_CAPTURE_PYDOC_HPP #include @@ -24,49 +24,65 @@ namespace holoscan::doc::V4L2VideoCaptureOp { -// Constructor +// V4L2VideoCaptureOp Constructor PYDOC(V4L2VideoCaptureOp, R"doc( Operator to get a video stream from a V4L2 source. -)doc") - -// V4L2VideoCaptureOp Constructor -PYDOC(V4L2VideoCaptureOp_python, R"doc( -Operator to get a video stream from a V4L2 source (e.g. Built-in HDMI capture card or USB camera) https://www.kernel.org/doc/html/v4.9/media/uapi/v4l/v4l2.html Inputs a video stream from a V4L2 node, including USB cameras and HDMI IN. + - Input stream is on host. If no pixel format is specified in the yaml configuration file, the - pixel format will be automatically selected. However, only `AB24` and `YUYV` are then supported. + pixel format will be automatically selected. However, only ``AB24`` and ``YUYV`` are then + supported. If a pixel format is specified in the yaml file, then this format will be used. However, note - that the operator then expects that this format can be encoded as RGBA32. If not, the behaviour + that the operator then expects that this format can be encoded as RGBA32. If not, the behavior is undefined. - Output stream is on host. Always RGBA32 at this time. -Use `holoscan.operators.FormatConverterOp` to move data from the host to a GPU device. +Use ``holoscan.operators.FormatConverterOp`` to move data from the host to a GPU device. + +**==Named Outputs==** -Named output: - signal: nvidia::gxf::VideoBuffer - Emits a message containing a video buffer on the host with format GXF_VIDEO_FORMAT_RGBA. + signal : nvidia::gxf::VideoBuffer + A message containing a video buffer on the host with format GXF_VIDEO_FORMAT_RGBA. Parameters ---------- -fragment : Fragment +fragment : Fragment (constructor only) The fragment that the operator belongs to. -allocator : ``holoscan.resources.Allocator`` +allocator : holoscan.resources.Allocator Memory allocator to use for the output. device : str - The device to target (e.g. "/dev/video0" for device 0) + The device to target (e.g. "/dev/video0" for device 0). Default value is ``"/dev/video0"``. width : int, optional - Width of the video stream. + Width of the video stream. Default value is ``0``. height : int, optional - Height of the video stream. + Height of the video stream. Default value is ``0``. num_buffers : int, optional - Number of V4L2 buffers to use. + Number of V4L2 buffers to use. Default value is ``4``. pixel_format : str - Video stream pixel format (little endian four character code (fourcc)) -name : str, optional - The name of the operator. + Video stream pixel format (little endian four character code (fourcc)). + Default value is ``"auto"``. +name : str, optional (constructor only) + The name of the operator. Default value is ``"v4l2_video_capture"``. +exposure_time : int, optional + Exposure time of the camera sensor in multiples of 100 μs (e.g. setting exposure_time to 100 is + 10 ms). + Default: auto exposure, or camera sensor default. + Use `v4l2-ctl -d /dev/ -L` for a range of values supported by your device. + When not set by the user, V4L2_CID_EXPOSURE_AUTO is set to V4L2_EXPOSURE_AUTO, or to + V4L2_EXPOSURE_APERTURE_PRIORITY if the former is not supported. + When set by the user, V4L2_CID_EXPOSURE_AUTO is set to V4L2_EXPOSURE_SHUTTER_PRIORITY, or to + V4L2_EXPOSURE_MANUAL if the former is not supported. The provided value is then used to set + V4L2_CID_EXPOSURE_ABSOLUTE. +gain : int, optional + Gain of the camera sensor. + Default: auto gain, or camera sensor default. + Use `v4l2-ctl -d /dev/ -L` for a range of values supported by your device. + When not set by the user, V4L2_CID_AUTOGAIN is set to true (if supported). + When set by the user, V4L2_CID_AUTOGAIN is set to false (if supported). The provided value is + then used to set V4L2_CID_GAIN. )doc") PYDOC(setup, R"doc( @@ -87,4 +103,4 @@ and uses a light-weight initialization. } // namespace holoscan::doc::V4L2VideoCaptureOp -#endif // PYHOLOHUB_OPERATORS_V4L2_VIDEO_CAPTURE_PYDOC_HPP +#endif /* HOLOSCAN_OPERATORS_V4L2_VIDEO_CAPTURE_PYDOC_HPP */ diff --git a/python/holoscan/operators/v4l2_video_capture/v4l2_video_capture.cpp b/python/holoscan/operators/v4l2_video_capture/v4l2_video_capture.cpp index 577ff8c2..f21f89f9 100644 --- a/python/holoscan/operators/v4l2_video_capture/v4l2_video_capture.cpp +++ b/python/holoscan/operators/v4l2_video_capture/v4l2_video_capture.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,7 +16,7 @@ */ #include -#include // for unordered_map -> dict, etc. +#include // for std::optional support #include #include @@ -61,13 +61,22 @@ class PyV4L2VideoCaptureOp : public V4L2VideoCaptureOp { const std::string& device = "/dev/video0"s, uint32_t width = 0, uint32_t height = 0, uint32_t num_buffers = 4, const std::string& pixel_format = "auto", - const std::string& name = "v4l2_video_capture") + const std::string& name = "v4l2_video_capture", + std::optional exposure_time = std::nullopt, + std::optional gain = std::nullopt) : V4L2VideoCaptureOp(ArgList{Arg{"allocator", allocator}, Arg{"device", device}, Arg{"width", width}, Arg{"height", height}, Arg{"numBuffers", num_buffers}, Arg{"pixel_format", pixel_format}}) { + if (exposure_time.has_value()) { + this->add_arg(Arg{"exposure_time", exposure_time.value() }); + } + if (gain.has_value()) { + this->add_arg(Arg{"gain", gain.value() }); + } + name_ = name; fragment_ = fragment; spec_ = std::make_shared(fragment); @@ -104,7 +113,9 @@ PYBIND11_MODULE(_v4l2_video_capture, m) { uint32_t, uint32_t, const std::string&, - const std::string&>(), + const std::string&, + std::optional, + std::optional>(), "fragment"_a, "allocator"_a, "device"_a = "0"s, @@ -113,7 +124,9 @@ PYBIND11_MODULE(_v4l2_video_capture, m) { "num_buffers"_a = 4, "pixel_format"_a = "auto"s, "name"_a = "v4l2_video_capture"s, - doc::V4L2VideoCaptureOp::doc_V4L2VideoCaptureOp_python) + "exposure_time"_a = py::none(), + "gain"_a = py::none(), + doc::V4L2VideoCaptureOp::doc_V4L2VideoCaptureOp) .def("initialize", &V4L2VideoCaptureOp::initialize, doc::V4L2VideoCaptureOp::doc_initialize) .def("setup", &V4L2VideoCaptureOp::setup, "spec"_a, doc::V4L2VideoCaptureOp::doc_setup); } // PYBIND11_MODULE NOLINT diff --git a/python/holoscan/operators/video_stream_recorder/pydoc.hpp b/python/holoscan/operators/video_stream_recorder/pydoc.hpp index f9a23095..70ae4fd9 100644 --- a/python/holoscan/operators/video_stream_recorder/pydoc.hpp +++ b/python/holoscan/operators/video_stream_recorder/pydoc.hpp @@ -24,30 +24,30 @@ namespace holoscan::doc::VideoStreamRecorderOp { +// PyVideoStreamRecorderOp Constructor PYDOC(VideoStreamRecorderOp, R"doc( -Operator class to record the video stream to a file. -)doc") +Operator class to record a video stream to a file. -// PyVideoStreamRecorderOp Constructor -PYDOC(VideoStreamRecorderOp_python, R"doc( -Operator class to record the video stream to a file. +**==Named Inputs==** -Named inputs: - input: nvidia::gxf::Tensor - A message containing a video frame to serialize to disk. + input : nvidia::gxf::Tensor + A message containing a video frame to serialize to disk. The input tensor can be on either + the CPU or GPU. This data location will be recorded as part of the metadata serialized to + disk and if the data is later read back in via `VideoStreamReplayerOp`, the tensor output + of that operator will be on the same device (CPU or GPU). Parameters ---------- -fragment : holoscan.core.Fragment +fragment : holoscan.core.Fragment (constructor only) The fragment that the operator belongs to. directory : str Directory path for storing files. basename : str User specified file name without extension. flush_on_tick : bool, optional - Flushes output buffer on every tick when ``True``. -name : str, optional - The name of the operator. + Flushes output buffer on every tick when ``True``. Default value is ``False``. +name : str, optional (constructor only) + The name of the operator. Default value is ``"video_stream_recorder"``. )doc") PYDOC(initialize, R"doc( diff --git a/python/holoscan/operators/video_stream_recorder/video_stream_recorder.cpp b/python/holoscan/operators/video_stream_recorder/video_stream_recorder.cpp index 8697fd1c..df0ebee9 100644 --- a/python/holoscan/operators/video_stream_recorder/video_stream_recorder.cpp +++ b/python/holoscan/operators/video_stream_recorder/video_stream_recorder.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -96,7 +96,7 @@ PYBIND11_MODULE(_video_stream_recorder, m) { "basename"_a, "flush_on_tick"_a = false, "name"_a = "recorder"s, - doc::VideoStreamRecorderOp::doc_VideoStreamRecorderOp_python) + doc::VideoStreamRecorderOp::doc_VideoStreamRecorderOp) .def("initialize", &VideoStreamRecorderOp::initialize, doc::VideoStreamRecorderOp::doc_initialize) diff --git a/python/holoscan/operators/video_stream_replayer/pydoc.hpp b/python/holoscan/operators/video_stream_replayer/pydoc.hpp index aec0590c..cd7dee32 100644 --- a/python/holoscan/operators/video_stream_replayer/pydoc.hpp +++ b/python/holoscan/operators/video_stream_replayer/pydoc.hpp @@ -24,44 +24,43 @@ namespace holoscan::doc::VideoStreamReplayerOp { +// PyVideoStreamReplayerOp Constructor PYDOC(VideoStreamReplayerOp, R"doc( Operator class to replay a video stream from a file. -)doc") -// PyVideoStreamReplayerOp Constructor -PYDOC(VideoStreamReplayerOp_python, R"doc( -Operator class to replay a video stream from a file. +**==Named Outputs==** -Named output: - output: nvidia::gxf::Tensor - A message containing a video frame deserialized from disk. + output : nvidia::gxf::Tensor + A message containing a video frame deserialized from disk. Depending on the metadata in the + file being read, this tensor could be on either CPU or GPU. For the data used in examples + distributed with the SDK, the tensor will be an unnamed GPU tensor (name == ""). Parameters ---------- -fragment : holoscan.core.Fragment +fragment : holoscan.core.Fragment (constructor only) The fragment that the operator belongs to. directory : str Directory path for reading files from. basename : str User specified file name without extension. batch_size : int, optional - Number of entities to read and publish for one tick. + Number of entities to read and publish for one tick. Default value is ``1``. ignore_corrupted_entities : bool, optional If an entity could not be deserialized, it is ignored by default; - otherwise a failure is generated. + otherwise a failure is generated. Default value is ``True``. frame_rate : float, optional Frame rate to replay. If zero value is specified, it follows timings in - timestamps. + timestamps. Default value is ``0.0``. realtime : bool, optional - Playback video in realtime, based on frame_rate or timestamps. + Playback video in realtime, based on frame_rate or timestamps. Default value is ``True``. repeat : bool, optional - Repeat video stream in a loop. + Repeat video stream in a loop. Default value is ``False``. count : int, optional Number of frame counts to playback. If zero value is specified, it is ignored. If the count is less than the number of frames in the video, it - would finish early. -name : str, optional - The name of the operator. + would finish early. Default value is ``0``. +name : str, optional (constructor only) + The name of the operator. Default value is ``"video_stream_replayer"``. )doc") PYDOC(initialize, R"doc( diff --git a/python/holoscan/operators/video_stream_replayer/video_stream_replayer.cpp b/python/holoscan/operators/video_stream_replayer/video_stream_replayer.cpp index 85b4a21c..01ee5217 100644 --- a/python/holoscan/operators/video_stream_replayer/video_stream_replayer.cpp +++ b/python/holoscan/operators/video_stream_replayer/video_stream_replayer.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -117,7 +117,7 @@ PYBIND11_MODULE(_video_stream_replayer, m) { "repeat"_a = false, "count"_a = 0UL, "name"_a = "format_converter"s, - doc::VideoStreamReplayerOp::doc_VideoStreamReplayerOp_python) + doc::VideoStreamReplayerOp::doc_VideoStreamReplayerOp) .def("initialize", &VideoStreamReplayerOp::initialize, doc::VideoStreamReplayerOp::doc_initialize) diff --git a/python/holoscan/resources/CMakeLists.txt b/python/holoscan/resources/CMakeLists.txt index e567a036..931c8d54 100644 --- a/python/holoscan/resources/CMakeLists.txt +++ b/python/holoscan/resources/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,6 +21,6 @@ holoscan_pybind11_module(resources receivers.cpp resources.cpp serialization_buffers.cpp + std_entity_serializer.cpp transmitters.cpp - video_stream_serializer.cpp ) diff --git a/python/holoscan/resources/__init__.py b/python/holoscan/resources/__init__.py index f32e50d8..92b5bcce 100644 --- a/python/holoscan/resources/__init__.py +++ b/python/holoscan/resources/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -28,6 +28,7 @@ holoscan.resources.Receiver holoscan.resources.SerializationBuffer holoscan.resources.StdComponentSerializer + holoscan.resources.StdEntitySerializer holoscan.resources.Transmitter holoscan.resources.UnboundedAllocator holoscan.resources.UcxComponentSerializer @@ -36,7 +37,6 @@ holoscan.resources.UcxReceiver holoscan.resources.UcxSerializationBuffer holoscan.resources.UcxTransmitter - holoscan.resources.VideoStreamSerializer """ from ._resources import ( @@ -52,6 +52,7 @@ Receiver, SerializationBuffer, StdComponentSerializer, + StdEntitySerializer, Transmitter, UcxComponentSerializer, UcxEntitySerializer, @@ -60,7 +61,6 @@ UcxSerializationBuffer, UcxTransmitter, UnboundedAllocator, - VideoStreamSerializer, ) __all__ = [ @@ -76,6 +76,7 @@ "Receiver", "SerializationBuffer", "StdComponentSerializer", + "StdEntitySerializer", "Transmitter", "UcxComponentSerializer", "UcxEntitySerializer", @@ -84,5 +85,4 @@ "UcxSerializationBuffer", "UcxTransmitter", "UnboundedAllocator", - "VideoStreamSerializer", ] diff --git a/python/holoscan/resources/allocators.cpp b/python/holoscan/resources/allocators.cpp index e2453a56..6661c3ea 100644 --- a/python/holoscan/resources/allocators.cpp +++ b/python/holoscan/resources/allocators.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -78,7 +78,6 @@ class PyBlockMemoryPool : public BlockMemoryPool { fragment_ = fragment; spec_ = std::make_shared(fragment); setup(*spec_.get()); - initialize(); } }; @@ -88,8 +87,8 @@ class PyCudaStreamPool : public CudaStreamPool { using CudaStreamPool::CudaStreamPool; // Define a constructor that fully initializes the object. - PyCudaStreamPool(Fragment* fragment, int32_t dev_id, uint32_t stream_flags, - int32_t stream_priority, uint32_t reserved_size, uint32_t max_size, + PyCudaStreamPool(Fragment* fragment, int32_t dev_id = 0, uint32_t stream_flags = 0, + int32_t stream_priority = 0, uint32_t reserved_size = 1, uint32_t max_size = 0, const std::string& name = "cuda_stream_pool") : CudaStreamPool(ArgList{ Arg{"dev_id", dev_id}, @@ -102,7 +101,6 @@ class PyCudaStreamPool : public CudaStreamPool { fragment_ = fragment; spec_ = std::make_shared(fragment); setup(*spec_.get()); - initialize(); } }; @@ -118,7 +116,6 @@ class PyUnboundedAllocator : public UnboundedAllocator { fragment_ = fragment; spec_ = std::make_shared(fragment); setup(*spec_.get()); - initialize(); } }; @@ -135,7 +132,8 @@ void init_allocators(py::module_& m) { "gxf_typename", &Allocator::gxf_typename, doc::Allocator::doc_gxf_typename) .def("is_available", &Allocator::is_available, "size"_a, doc::Allocator::doc_is_available) .def("allocate", &Allocator::allocate, "size"_a, "type"_a, doc::Allocator::doc_allocate) - .def("free", &Allocator::free, "pointer"_a, doc::Allocator::doc_free); + .def("free", &Allocator::free, "pointer"_a, doc::Allocator::doc_free) + .def_property_readonly("block_size", &Allocator::block_size, doc::Allocator::doc_block_size); // TODO(grelee): for allocate / free how does std::byte* get cast to/from Python? py::class_>( @@ -157,11 +155,11 @@ void init_allocators(py::module_& m) { .def( py::init(), "fragment"_a, - "dev_id"_a, - "stream_flags"_a, - "stream_priority"_a, - "reserved_size"_a, - "max_size"_a, + "dev_id"_a = 0, + "stream_flags"_a = 0u, + "stream_priority"_a = 0, + "reserved_size"_a = 1u, + "max_size"_a = 0u, "name"_a = "cuda_stream_pool"s, doc::CudaStreamPool::doc_CudaStreamPool_python) .def_property_readonly( diff --git a/python/holoscan/resources/allocators_pydoc.hpp b/python/holoscan/resources/allocators_pydoc.hpp index 7c35e41e..30d203cc 100644 --- a/python/holoscan/resources/allocators_pydoc.hpp +++ b/python/holoscan/resources/allocators_pydoc.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -48,6 +48,15 @@ bool Availability of the resource. )doc") +PYDOC(block_size, R"doc( +Get the block size of the allocator. + +Returns +------- +int + The block size of the allocator. Returns 1 for byte-based allocators. +)doc") + PYDOC(allocate, R"doc( Allocate the requested amount of memory. @@ -142,15 +151,25 @@ fragment : holoscan.core.Fragment dev_id : int CUDA device ID. Specifies the device on which to create the stream pool. stream_flags : int - Flag values used in creating CUDA streams. + Flags for CUDA streams in the pool. This will be passed to CUDA's + cudaStreamCreateWithPriority [1]_ when creating the streams. The default value of 0 corresponds + to ``cudaStreamDefault``. A value of 1 corresponds to ``cudaStreamNonBlocking``, indicating + that the stream can run concurrently with work in stream 0 (default stream) and should not + perform any implicit synchronization with it. stream_priority : int - Priority values used in creating CUDA streams. + Priority value for CUDA streams in the pool. This is an integer value passed to + cudaSreamCreateWithPriority [1]_. Lower numbers represent higher priorities. reserved_size : int - TODO + The number of CUDA streams to initially reserve in the pool (prior to first request). max_size : int - Maximum stream size. + The maximum number of streams that can be allocated, unlimited by default. name : str, optional The name of the stream pool. + +References +---------- +.. [1] https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__STREAM.html + )doc") PYDOC(gxf_typename, R"doc( diff --git a/python/holoscan/resources/clocks.cpp b/python/holoscan/resources/clocks.cpp index 4a092000..c21d043d 100644 --- a/python/holoscan/resources/clocks.cpp +++ b/python/holoscan/resources/clocks.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -83,7 +83,6 @@ class PyRealtimeClock : public RealtimeClock { fragment_ = fragment; spec_ = std::make_shared(fragment); setup(*spec_.get()); - initialize(); } /* Trampolines (need one for each virtual function) */ @@ -118,7 +117,6 @@ class PyManualClock : public ManualClock { fragment_ = fragment; spec_ = std::make_shared(fragment); setup(*spec_.get()); - initialize(); } /* Trampolines (need one for each virtual function) */ diff --git a/python/holoscan/resources/component_serializers.cpp b/python/holoscan/resources/component_serializers.cpp index 365e6b97..8246c3bd 100644 --- a/python/holoscan/resources/component_serializers.cpp +++ b/python/holoscan/resources/component_serializers.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -49,7 +49,6 @@ class PyStdComponentSerializer : public StdComponentSerializer { fragment_ = fragment; spec_ = std::make_shared(fragment); setup(*spec_.get()); - initialize(); } }; @@ -68,7 +67,6 @@ class PyUcxComponentSerializer : public UcxComponentSerializer { fragment_ = fragment; spec_ = std::make_shared(fragment); setup(*spec_.get()); - initialize(); } }; @@ -87,7 +85,6 @@ class PyUcxHoloscanComponentSerializer : public UcxHoloscanComponentSerializer { fragment_ = fragment; spec_ = std::make_shared(fragment); setup(*spec_.get()); - initialize(); } }; diff --git a/python/holoscan/resources/entity_serializers.cpp b/python/holoscan/resources/entity_serializers.cpp index ba261e6e..19179062 100644 --- a/python/holoscan/resources/entity_serializers.cpp +++ b/python/holoscan/resources/entity_serializers.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -53,7 +53,6 @@ class PyUcxEntitySerializer : public UcxEntitySerializer { fragment_ = fragment; spec_ = std::make_shared(fragment); setup(*spec_.get()); - initialize(); } }; diff --git a/python/holoscan/resources/receivers.cpp b/python/holoscan/resources/receivers.cpp index 1b6b4e1c..acab5bdf 100644 --- a/python/holoscan/resources/receivers.cpp +++ b/python/holoscan/resources/receivers.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -49,7 +49,6 @@ class PyDoubleBufferReceiver : public DoubleBufferReceiver { fragment_ = fragment; spec_ = std::make_shared(fragment); setup(*spec_.get()); - initialize(); } }; @@ -72,7 +71,6 @@ class PyUcxReceiver : public UcxReceiver { fragment_ = fragment; spec_ = std::make_shared(fragment); setup(*spec_.get()); - initialize(); } }; diff --git a/python/holoscan/resources/resources.cpp b/python/holoscan/resources/resources.cpp index 1626bd66..a524fed5 100644 --- a/python/holoscan/resources/resources.cpp +++ b/python/holoscan/resources/resources.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -32,7 +32,7 @@ void init_clocks(py::module_&); void init_serialization_buffers(py::module_&); void init_component_serializers(py::module_&); void init_entity_serializers(py::module_&); -void init_video_stream_serializer(py::module_&); +void init_std_entity_serializer(py::module_&); PYBIND11_MODULE(_resources, m) { m.doc() = R"pbdoc( @@ -58,6 +58,6 @@ PYBIND11_MODULE(_resources, m) { init_serialization_buffers(m); init_component_serializers(m); init_entity_serializers(m); - init_video_stream_serializer(m); + init_std_entity_serializer(m); } // PYBIND11_MODULE } // namespace holoscan diff --git a/python/holoscan/resources/serialization_buffers.cpp b/python/holoscan/resources/serialization_buffers.cpp index 3ae80071..1cb0b8bd 100644 --- a/python/holoscan/resources/serialization_buffers.cpp +++ b/python/holoscan/resources/serialization_buffers.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -53,7 +53,6 @@ class PySerializationBuffer : public SerializationBuffer { fragment_ = fragment; spec_ = std::make_shared(fragment); setup(*spec_.get()); - initialize(); } }; @@ -75,7 +74,6 @@ class PyUcxSerializationBuffer : public UcxSerializationBuffer { fragment_ = fragment; spec_ = std::make_shared(fragment); setup(*spec_.get()); - initialize(); } }; diff --git a/python/holoscan/resources/video_stream_serializer.cpp b/python/holoscan/resources/std_entity_serializer.cpp similarity index 51% rename from python/holoscan/resources/video_stream_serializer.cpp rename to python/holoscan/resources/std_entity_serializer.cpp index 8906ecb7..8f1f5a45 100644 --- a/python/holoscan/resources/video_stream_serializer.cpp +++ b/python/holoscan/resources/std_entity_serializer.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,11 +21,11 @@ #include #include -#include "./video_stream_serializer_pydoc.hpp" +#include "./std_entity_serializer_pydoc.hpp" #include "holoscan/core/component_spec.hpp" #include "holoscan/core/fragment.hpp" #include "holoscan/core/gxf/gxf_resource.hpp" -#include "holoscan/core/resources/gxf/video_stream_serializer.hpp" +#include "holoscan/core/resources/gxf/std_entity_serializer.hpp" using std::string_literals::operator""s; using pybind11::literals::operator""_a; @@ -34,39 +34,37 @@ namespace py = pybind11; namespace holoscan { -class PyVideoStreamSerializer : public VideoStreamSerializer { +class PyStdEntitySerializer : public StdEntitySerializer { public: /* Inherit the constructors */ - using VideoStreamSerializer::VideoStreamSerializer; + using StdEntitySerializer::StdEntitySerializer; // Define a constructor that fully initializes the object. - explicit PyVideoStreamSerializer(Fragment* fragment, - const std::string& name = "video_stream_serializer") - : VideoStreamSerializer() { + explicit PyStdEntitySerializer(Fragment* fragment, + const std::string& name = "std_entity_serializer") + : StdEntitySerializer() { name_ = name; fragment_ = fragment; spec_ = std::make_shared(fragment); setup(*spec_.get()); - initialize(); } }; -void init_video_stream_serializer(py::module_& m) { - py::class_>( - m, "VideoStreamSerializer", doc::VideoStreamSerializer::doc_VideoStreamSerializer) + std::shared_ptr>( + m, "StdEntitySerializer", doc::StdEntitySerializer::doc_StdEntitySerializer) .def(py::init(), "fragment"_a, - "name"_a = "video_stream_serializer"s, - doc::VideoStreamSerializer::doc_VideoStreamSerializer_python) + "name"_a = "std_entity_serializer"s, + doc::StdEntitySerializer::doc_StdEntitySerializer_python) .def_property_readonly("gxf_typename", - &VideoStreamSerializer::gxf_typename, - doc::VideoStreamSerializer::doc_gxf_typename) - .def("setup", &VideoStreamSerializer::setup, "spec"_a, doc::VideoStreamSerializer::doc_setup) - .def("initialize", - &VideoStreamSerializer::initialize, - doc::VideoStreamSerializer::doc_initialize); + &StdEntitySerializer::gxf_typename, + doc::StdEntitySerializer::doc_gxf_typename) + .def("setup", &StdEntitySerializer::setup, "spec"_a, doc::StdEntitySerializer::doc_setup) + .def( + "initialize", &StdEntitySerializer::initialize, doc::StdEntitySerializer::doc_initialize); } } // namespace holoscan diff --git a/python/holoscan/resources/video_stream_serializer_pydoc.hpp b/python/holoscan/resources/std_entity_serializer_pydoc.hpp similarity index 82% rename from python/holoscan/resources/video_stream_serializer_pydoc.hpp rename to python/holoscan/resources/std_entity_serializer_pydoc.hpp index e486c084..6905c317 100644 --- a/python/holoscan/resources/video_stream_serializer_pydoc.hpp +++ b/python/holoscan/resources/std_entity_serializer_pydoc.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,15 +24,15 @@ namespace holoscan::doc { -namespace VideoStreamSerializer { +namespace StdEntitySerializer { -PYDOC(VideoStreamSerializer, R"doc( -Serializer for video streams. +PYDOC(StdEntitySerializer, R"doc( +Default serializer for GXF entities. )doc") // Constructor -PYDOC(VideoStreamSerializer_python, R"doc( -Serializer for video streams. +PYDOC(StdEntitySerializer_python, R"doc( +Default serializer for GXF entities. Parameters ---------- @@ -67,7 +67,7 @@ This method is called only once when the resource is created for the first time, and uses a light-weight initialization. )doc") -} // namespace VideoStreamSerializer +} // namespace StdEntitySerializer } // namespace holoscan::doc diff --git a/python/holoscan/resources/transmitters.cpp b/python/holoscan/resources/transmitters.cpp index 0ac9e2f6..0a9629c8 100644 --- a/python/holoscan/resources/transmitters.cpp +++ b/python/holoscan/resources/transmitters.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -49,7 +49,6 @@ class PyDoubleBufferTransmitter : public DoubleBufferTransmitter { fragment_ = fragment; spec_ = std::make_shared(fragment); setup(*spec_.get()); - initialize(); } }; @@ -78,7 +77,6 @@ class PyUcxTransmitter : public UcxTransmitter { fragment_ = fragment; spec_ = std::make_shared(fragment); setup(*spec_.get()); - initialize(); } }; diff --git a/python/holoscan/schedulers/CMakeLists.txt b/python/holoscan/schedulers/CMakeLists.txt index 7828d3bb..a7d6c5bc 100644 --- a/python/holoscan/schedulers/CMakeLists.txt +++ b/python/holoscan/schedulers/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,6 +14,7 @@ # limitations under the License. holoscan_pybind11_module(schedulers + event_based_scheduler.cpp greedy_scheduler.cpp multithread_scheduler.cpp schedulers.cpp diff --git a/python/holoscan/schedulers/__init__.py b/python/holoscan/schedulers/__init__.py index 1d30ed2b..96dc74f1 100644 --- a/python/holoscan/schedulers/__init__.py +++ b/python/holoscan/schedulers/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,15 +16,17 @@ .. autosummary:: + holoscan.schedulers.EventBasedScheduler holoscan.schedulers.GreedyScheduler holoscan.schedulers.MultiThreadScheduler """ # must first import Clock for the std::shared_ptr arguments in the __init__ methods from ..resources import Clock # noqa -from ._schedulers import GreedyScheduler, MultiThreadScheduler +from ._schedulers import EventBasedScheduler, GreedyScheduler, MultiThreadScheduler __all__ = [ + "EventBasedScheduler", "GreedyScheduler", "MultiThreadScheduler", ] diff --git a/python/holoscan/schedulers/event_based_scheduler.cpp b/python/holoscan/schedulers/event_based_scheduler.cpp new file mode 100644 index 00000000..d0b09497 --- /dev/null +++ b/python/holoscan/schedulers/event_based_scheduler.cpp @@ -0,0 +1,113 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include +#include +#include + +#include "./event_based_scheduler_pydoc.hpp" +#include "holoscan/core/component_spec.hpp" +#include "holoscan/core/fragment.hpp" +#include "holoscan/core/gxf/gxf_component.hpp" +#include "holoscan/core/gxf/gxf_scheduler.hpp" +#include "holoscan/core/resources/gxf/clock.hpp" +#include "holoscan/core/resources/gxf/realtime_clock.hpp" +#include "holoscan/core/schedulers/gxf/event_based_scheduler.hpp" + +using std::string_literals::operator""s; +using pybind11::literals::operator""_a; + +namespace py = pybind11; + +namespace holoscan { + +/* Trampoline classes for handling Python kwargs + * + * These add a constructor that takes a Fragment for which to initialize the scheduler. + * The explicit parameter list and default arguments take care of providing a Pythonic + * kwarg-based interface with appropriate default values matching the scheduler's + * default parameters in the C++ API `setup` method. + * + * The sequence of events in this constructor is based on Fragment::make_scheduler + */ + +class PyEventBasedScheduler : public EventBasedScheduler { + public: + /* Inherit the constructors */ + using EventBasedScheduler::EventBasedScheduler; + + // Define a constructor that fully initializes the object. + explicit PyEventBasedScheduler(Fragment* fragment, std::shared_ptr clock = nullptr, + int64_t worker_thread_number = 1LL, bool stop_on_deadlock = true, + int64_t max_duration_ms = -1LL, + int64_t stop_on_deadlock_timeout = 0LL, + const std::string& name = "event_based_scheduler") + : EventBasedScheduler(ArgList{Arg{"worker_thread_number", worker_thread_number}, + Arg{"stop_on_deadlock", stop_on_deadlock}, + Arg{"stop_on_deadlock_timeout", stop_on_deadlock_timeout}}) { + // max_duration_ms is an optional argument in GXF. We use a negative value in this constructor + // to indicate that the argument should not be set. + if (max_duration_ms >= 0) { this->add_arg(Arg{"max_duration_ms", max_duration_ms}); } + name_ = name; + fragment_ = fragment; + if (clock) { + this->add_arg(Arg{"clock", clock}); + } else { + this->add_arg(Arg{"clock", fragment_->make_resource("realtime_clock")}); + } + spec_ = std::make_shared(fragment); + setup(*spec_.get()); + } +}; + +void init_event_based_scheduler(py::module_& m) { + py::class_>( + m, "EventBasedScheduler", doc::EventBasedScheduler::doc_EventBasedScheduler) + .def(py::init, + int64_t, + bool, + int64_t, + int64_t, + const std::string&>(), + "fragment"_a, + py::kw_only(), + "clock"_a = py::none(), + "worker_thread_number"_a = 1LL, + "stop_on_deadlock"_a = true, + "max_duration_ms"_a = -1LL, + "stop_on_deadlock_timeout"_a = 0LL, + "name"_a = "multithread_scheduler"s, + doc::EventBasedScheduler::doc_EventBasedScheduler_python) + .def_property_readonly("clock", &EventBasedScheduler::clock) + .def_property_readonly("worker_thread_number", &EventBasedScheduler::worker_thread_number) + .def_property_readonly("max_duration_ms", &EventBasedScheduler::max_duration_ms) + .def_property_readonly("stop_on_deadlock", &EventBasedScheduler::stop_on_deadlock) + .def_property_readonly("stop_on_deadlock_timeout", + &EventBasedScheduler::stop_on_deadlock_timeout) + .def_property_readonly("gxf_typename", + &EventBasedScheduler::gxf_typename, + doc::EventBasedScheduler::doc_gxf_typename); +} // PYBIND11_MODULE +} // namespace holoscan diff --git a/python/holoscan/schedulers/event_based_scheduler_pydoc.hpp b/python/holoscan/schedulers/event_based_scheduler_pydoc.hpp new file mode 100644 index 00000000..18a9cceb --- /dev/null +++ b/python/holoscan/schedulers/event_based_scheduler_pydoc.hpp @@ -0,0 +1,75 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef PYHOLOSCAN_SCHEDULERS_EVENT_BASED_SCHEDULER_PYDOC_HPP +#define PYHOLOSCAN_SCHEDULERS_EVENT_BASED_SCHEDULER_PYDOC_HPP + +#include + +#include "../macros.hpp" + +namespace holoscan::doc { + +namespace EventBasedScheduler { + +// Constructor +PYDOC(EventBasedScheduler, R"doc( +Event-based multi-thread scheduler class. +)doc") + +// PyEventBasedScheduler Constructor +PYDOC(EventBasedScheduler_python, R"doc( +Event-based multi-thread scheduler + +Parameters +---------- +fragment : Fragment + The fragment the condition will be associated with +clock : holoscan.resources.Clock or None, optional + The clock used by the scheduler to define the flow of time. If None, a default-constructed + `holoscan.resources.RealtimeClock` will be used. +worker_thread_number : int + The number of worker threads. +stop_on_deadlock : bool, optional + If enabled the scheduler will stop when all entities are in a waiting state, but no periodic + entity exists to break the dead end. Should be disabled when scheduling conditions can be + changed by external actors, for example by clearing queues manually. +max_duration_ms : int, optional + The maximum duration for which the scheduler will execute (in ms). If not specified (or if a + negative value is provided), the scheduler will run until all work is done. If periodic terms + are present, this means the application will run indefinitely. +stop_on_deadlock_timeout : int, optional + The scheduler will wait this amount of time before determining that it is in deadlock + and should stop. It will reset if a job comes in during the wait. A negative value means not + stop on deadlock. This parameter only applies when `stop_on_deadlock=true`", +name : str, optional + The name of the scheduler. +)doc") + +PYDOC(gxf_typename, R"doc( +The GXF type name of the scheduler. + +Returns +------- +str + The GXF type name of the scheduler +)doc") + +} // namespace EventBasedScheduler +} // namespace holoscan::doc + +#endif /* PYHOLOSCAN_SCHEDULERS_EVENT_BASED_SCHEDULER_PYDOC_HPP */ diff --git a/python/holoscan/schedulers/schedulers.cpp b/python/holoscan/schedulers/schedulers.cpp index a7f5bdef..46053127 100644 --- a/python/holoscan/schedulers/schedulers.cpp +++ b/python/holoscan/schedulers/schedulers.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -37,6 +37,7 @@ namespace py = pybind11; namespace holoscan { +void init_event_based_scheduler(py::module_&); void init_greedy_scheduler(py::module_&); void init_multithread_scheduler(py::module_&); @@ -57,6 +58,7 @@ PYBIND11_MODULE(_schedulers, m) { m.attr("__version__") = "dev"; #endif + init_event_based_scheduler(m); init_greedy_scheduler(m); init_multithread_scheduler(m); } // PYBIND11_MODULE diff --git a/python/requirements.txt b/python/requirements.txt index 07c08cad..b1ec5586 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -1,9 +1,9 @@ -pip==23.3.2 +pip>=20.3 cupy-cuda12x==12.2 cloudpickle==2.2.1 python-on-whales==0.60.1 -Jinja2==3.1.2 +Jinja2==3.1.3 packaging==23.1 pyyaml==6.0 -requests==2.28.2 +requests==2.31.0 psutil==5.9.6 diff --git a/python/tests/cli/unit/common/package-source.json b/python/tests/cli/unit/common/package-source.json new file mode 100644 index 00000000..7b4199c4 --- /dev/null +++ b/python/tests/cli/unit/common/package-source.json @@ -0,0 +1,43 @@ +{ + "1.0.3": { + "holoscan": { + "debian-packages": { + "linux/amd64": "https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/holoscan_1.0.3.0-1_amd64.deb", + "linux/arm64": { + "igpu": "https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/arm64/holoscan_1.0.3.0-1_arm64.deb", + "dgpu": "https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/sbsa/holoscan_1.0.3.0-1_arm64.deb" + } + }, + "base-images": { + "igpu": { + "jetson-agx-orin-devkit": "nvcr.io/nvidia/clara-holoscan/holoscan:v1.0.3-igpu", + "igx-orin-devkit": "nvcr.io/nvidia/clara-holoscan/holoscan:v1.0.3-igpu" + }, + "dgpu": { + "x64-workstation": "nvcr.io/nvidia/clara-holoscan/holoscan:v1.0.3-dgpu", + "igx-orin-devkit": "nvcr.io/nvidia/clara-holoscan/holoscan:v1.0.3-dgpu" + }, + "cpu": { + "x64-workstation": "nvcr.io/nvidia/clara-holoscan/holoscan:v1.0.3-dgpu" + } + }, + "build-images": { + "igpu": { + "jetson-agx-orin-devkit": "nvcr.io/nvidia/clara-holoscan/holoscan:v1.0.3-igpu", + "igx-orin-devkit": "nvcr.io/nvidia/clara-holoscan/holoscan:v1.0.3-igpu" + }, + "dgpu": { + "x64-workstation": "nvcr.io/nvidia/clara-holoscan/holoscan:v1.0.3-dgpu", + "igx-orin-devkit": "nvcr.io/nvidia/clara-holoscan/holoscan:v1.0.3-dgpu" + }, + "cpu": { + "x64-workstation": "nvcr.io/nvidia/clara-holoscan/holoscan:v1.0.3-dgpu" + } + } + }, + "health-probes": { + "linux/amd64": "https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/v0.4.24/grpc_health_probe-linux-amd64", + "linux/arm64": "https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/v0.4.24/grpc_health_probe-linux-arm64" + } + } +} \ No newline at end of file diff --git a/python/tests/cli/unit/common/test_artifact_source.py b/python/tests/cli/unit/common/test_artifact_source.py index 3f0ec18a..7731ea95 100644 --- a/python/tests/cli/unit/common/test_artifact_source.py +++ b/python/tests/cli/unit/common/test_artifact_source.py @@ -21,22 +21,18 @@ from holoscan.cli.common.artifact_sources import ArtifactSources from holoscan.cli.common.enum_types import Arch, PlatformConfiguration -from holoscan.cli.common.exceptions import InvalidSourceFileError class TestArtifactSource: - @pytest.fixture(autouse=True) - def _setup(self) -> None: + def _init(self) -> None: self._artifact_source = ArtifactSources() - - def test_default_top_level_attributes(self): - _ = ArtifactSources() - - def test_loads_sample(self): current_file_path = Path(__file__).parent.resolve() - source_file_sample = current_file_path / "../../../../holoscan/cli/package-source.json" - artifact_sources = ArtifactSources() - artifact_sources.load(source_file_sample) + source_file_sample = current_file_path / "./package-source.json" + self._artifact_source.load(str(source_file_sample)) + + def test_loads_from_edge(self, monkeypatch): + artifact_source = ArtifactSources() + artifact_source.download_manifest() def test_loads_invalid_file(self, monkeypatch): monkeypatch.setattr(Path, "read_text", lambda x: "{}") @@ -44,11 +40,29 @@ def test_loads_invalid_file(self, monkeypatch): source_file_sample = Path("some-bogus-file.json") artifact_sources = ArtifactSources() - with pytest.raises(InvalidSourceFileError): - artifact_sources.load(source_file_sample) + with pytest.raises(FileNotFoundError): + artifact_sources.load(str(source_file_sample)) + + @pytest.mark.parametrize( + "arch,platform_config", + [ + (Arch.amd64, PlatformConfiguration.dGPU), + (Arch.arm64, PlatformConfiguration.dGPU), + (Arch.arm64, PlatformConfiguration.iGPU), + ], + ) + def test_debian_package(self, arch, platform_config): + self._init() + assert self._artifact_source.debian_packages("1.0.3", arch, platform_config) is not None + + def test_base_images(self): + self._init() + assert self._artifact_source.base_images("1.0.3") is not None + + def test_build_images(self): + self._init() + assert self._artifact_source.build_images("1.0.3") is not None - def test_debian_package_zero_six_amd64_dgpu(self): - assert ( - self._artifact_source.debian_packages("1.0.3", Arch.amd64, PlatformConfiguration.dGPU) - is not None - ) + def test_health_probe(self): + self._init() + assert self._artifact_source.health_probe("1.0.3") is not None diff --git a/python/tests/cli/unit/common/test_sdk_utils.py b/python/tests/cli/unit/common/test_sdk_utils.py index 5b207915..8faa60ae 100644 --- a/python/tests/cli/unit/common/test_sdk_utils.py +++ b/python/tests/cli/unit/common/test_sdk_utils.py @@ -15,6 +15,7 @@ limitations under the License. """ # noqa: E501 + import pytest from packaging.version import Version @@ -76,22 +77,17 @@ class TestDetectHoloscanVersion: @pytest.fixture(autouse=True) def _setup(self) -> None: self._artifact_source = ArtifactSources() - self._artifact_source._data[SdkType.Holoscan.value][ArtifactSources.SectionVersion].append( - "0.6.0" - ) - self._artifact_source._data[SdkType.Holoscan.value][ArtifactSources.SectionVersion].append( - "1.0.0" - ) + self._artifact_source._supported_holoscan_versions = ["1.0.0"] def test_sdk_version_from_valid_user_input(self, monkeypatch): - assert detect_holoscan_version(self._artifact_source, Version("0.6.0")) == "0.6.0" + assert detect_holoscan_version(self._artifact_source, Version("1.0.0")) == "1.0.0" def test_sdk_version_from_invalid_user_input(self, monkeypatch): with pytest.raises(InvalidSdkError): detect_holoscan_version(self._artifact_source, Version("0.1.0")) def test_detect_sdk_version(self, monkeypatch): - version = "0.6.0" + version = "1.0.0" monkeypatch.setattr("importlib.metadata.version", lambda x: version) @@ -99,12 +95,12 @@ def test_detect_sdk_version(self, monkeypatch): assert result == version def test_detect_sdk_version_with_patch(self, monkeypatch): - version = "0.6.0-beta-1" + version = "1.0.0-beta-1" monkeypatch.setattr("importlib.metadata.version", lambda x: version) result = detect_holoscan_version(self._artifact_source) - assert result == "0.6.0" + assert result == "1.0.0" def test_detect_sdk_version_with_unsupported_version(self, monkeypatch): version = "0.1.2" diff --git a/python/tests/conftest.py b/python/tests/conftest.py index 9ce151cf..6a0ae824 100644 --- a/python/tests/conftest.py +++ b/python/tests/conftest.py @@ -50,6 +50,13 @@ def ping_config_file(): return config_file +@pytest.fixture() +def deprecated_extension_config_file(): + yaml_file_dir = os.path.dirname(__file__) + config_file = os.path.join(yaml_file_dir, "deprecated_stream_playback.yaml") + return config_file + + def pytest_configure(config): os.environ["HOLOSCAN_DISABLE_BACKTRACE"] = "1" config.addinivalue_line("markers", "slow: mark test as slow to run") diff --git a/python/tests/deprecated_stream_playback.yaml b/python/tests/deprecated_stream_playback.yaml new file mode 100644 index 00000000..3eaef295 --- /dev/null +++ b/python/tests/deprecated_stream_playback.yaml @@ -0,0 +1,19 @@ +%YAML 1.2 +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +extensions: + - libgxf_stream_playback.so + - /opt/nvidia/holoscan/lib/libgxf_stream_playback.so diff --git a/python/tests/operator_parameters.yaml b/python/tests/operator_parameters.yaml index 66d1231f..533834fb 100644 --- a/python/tests/operator_parameters.yaml +++ b/python/tests/operator_parameters.yaml @@ -1,5 +1,5 @@ %YAML 1.2 -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -27,6 +27,7 @@ aja: rdma: true enable_overlay: false overlay_rdma: true + channel: "NTV2_CHANNEL1" replayer: basename: "racerx" @@ -176,3 +177,11 @@ demosaic: generate_alpha: false bayer_grid_pos: 2 interpolation_mode: 0 # this is the only interpolation mode supported by NPP currently + +v4l2_video_capture: + width: 320 + height: 240 + pixel_format: "auto" + device: "/dev/video0" + exposure_time: 500 + gain: 100 diff --git a/python/tests/run_cli_unit_tests.sh b/python/tests/run_cli_unit_tests.sh index 19c580df..19b62108 100755 --- a/python/tests/run_cli_unit_tests.sh +++ b/python/tests/run_cli_unit_tests.sh @@ -58,9 +58,12 @@ ensure_dir_exists() { platform=$(get_platform_str $(uname -p)) export APP_BUILD_PATH=${PUBLIC_DIR}/build-${platform}/python/lib +export APP_INSTALL_PATH=${PUBLIC_DIR}/install-${platform}/lib export PYTHONPATH=${APP_BUILD_PATH}:${PYTHONPATH} export HOLOSCAN_TESTS_DATA_PATH=${PUBLIC_DIR}/tests/data +export LD_LIBRARY_PATH=${APP_INSTALL_PATH}:${LD_LIBRARY_PATH} ensure_dir_exists $APP_BUILD_PATH "application build" +ensure_dir_exists $APP_INSTALL_PATH "application install" ensure_dir_exists $HOLOSCAN_TESTS_DATA_PATH "test data" echo "Running CLI unit test in ${PYTHONPATH}..." pytest cli/unit --cov ${PUBLIC_DIR}/build-${platform}/python/lib/holoscan/cli --cov-report=xml --cov-report term --capture=tee-sys diff --git a/python/tests/system/distributed/test_application_exception_distributed.py b/python/tests/system/distributed/test_application_exception_distributed.py index 4c73aa32..4da266c8 100644 --- a/python/tests/system/distributed/test_application_exception_distributed.py +++ b/python/tests/system/distributed/test_application_exception_distributed.py @@ -60,7 +60,7 @@ def compose(self): self.add_flow(tx_fragment, rx_fragment, {("tx.out", "rx.in")}) -def test_exception_handling_distributed(capfd): +def test_exception_handling_distributed(): app = BadDistributedApplication() # Global timeouts set for CTest Python distributed test runs are currently 2500, but this case @@ -70,16 +70,12 @@ def test_exception_handling_distributed(capfd): # set the stop on deadlock timeout to 5s to have enough time to run the test ("HOLOSCAN_STOP_ON_DEADLOCK_TIMEOUT", "5000"), } - with env_var_context(env_var_settings): - app.run() - - # assert that the exception was logged - captured = capfd.readouterr() - # temporarily print full stderr/stdout on failure to help debug on CI - if captured.err.count("ZeroDivisionError") != NUM_EXCEPTIONS: - print(f"\ncaptured stderr: {captured.err}") - print(f"captured stdout: {captured.out}") + exception_occurred = False + with env_var_context(env_var_settings): + try: + app.run() + except ZeroDivisionError: + exception_occurred = True - assert captured.err.count("ZeroDivisionError") == NUM_EXCEPTIONS - assert captured.err.count("Traceback") == NUM_EXCEPTIONS + assert exception_occurred diff --git a/python/tests/system/distributed/test_distributed_app_implicit_broadcast.py b/python/tests/system/distributed/test_distributed_app_implicit_broadcast.py index 8d33206b..6a6e123b 100644 --- a/python/tests/system/distributed/test_distributed_app_implicit_broadcast.py +++ b/python/tests/system/distributed/test_distributed_app_implicit_broadcast.py @@ -18,6 +18,7 @@ import cupy as cp import pytest from env_wrapper import env_var_context +from utils import remove_ignored_errors from holoscan.conditions import CountCondition from holoscan.core import Application, Fragment, Operator, OperatorSpec @@ -150,7 +151,7 @@ def test_distributed_implicit_broadcast_app(message_type, capfd): # avoid catching the expected error message # : "error handling callback was invoked with status -25 (Connection reset by remote peer)" captured_error = captured.err.replace("error handling callback", "ucx handling callback") - assert "error" not in captured_error + assert "error" not in remove_ignored_errors(captured_error) assert "Exception occurred" not in captured_error # assert that the expected number of messages were received diff --git a/python/tests/system/distributed/test_distributed_app_three_ucx_receivers.py b/python/tests/system/distributed/test_distributed_app_three_ucx_receivers.py index 7a2244f7..c833d924 100644 --- a/python/tests/system/distributed/test_distributed_app_three_ucx_receivers.py +++ b/python/tests/system/distributed/test_distributed_app_three_ucx_receivers.py @@ -20,6 +20,7 @@ import numpy as np from env_wrapper import env_var_context +from utils import remove_ignored_errors from holoscan.conditions import CountCondition from holoscan.core import Application, Fragment, Operator, OperatorSpec @@ -162,8 +163,8 @@ def compose(self): def launch_app(): env_var_settings = { - # set the recession period to 10 ms to reduce debug messages - ("HOLOSCAN_CHECK_RECESSION_PERIOD_MS", "10"), + # set the recession period to 5 ms to reduce debug messages + ("HOLOSCAN_CHECK_RECESSION_PERIOD_MS", "5"), # set the max duration to 10s to have enough time to run the test # (connection time takes ~5 seconds) ("HOLOSCAN_MAX_DURATION_MS", "10000"), @@ -193,7 +194,7 @@ def test_distributed_app_three_ucx_receivers(capfd): # avoid catching the expected error message # : "error handling callback was invoked with status -25 (Connection reset by remote peer)" captured_error = captured.err.replace("error handling callback", "ucx handling callback") - assert "error" not in captured_error + assert "error" not in remove_ignored_errors(captured_error) assert "Exception occurred" not in captured_error # assert that the expected number of messages were received diff --git a/python/tests/system/distributed/test_distributed_app_with_invalid_add_flow.py b/python/tests/system/distributed/test_distributed_app_with_invalid_add_flow.py index 3bbd47bd..bea5042e 100644 --- a/python/tests/system/distributed/test_distributed_app_with_invalid_add_flow.py +++ b/python/tests/system/distributed/test_distributed_app_with_invalid_add_flow.py @@ -16,6 +16,7 @@ """ # noqa: E501 import pytest +from utils import remove_ignored_errors from holoscan.conditions import CountCondition from holoscan.core import Application, Fragment @@ -78,7 +79,7 @@ def test_distributed_app_invalid_add_flow(ports_arg, invalid_port, invalid_type, captured = capfd.readouterr() if invalid_port is None: - assert "error" not in captured.err + assert "error" not in remove_ignored_errors(captured.err) assert "Exception occurred" not in captured.err else: assert "error" in captured.err diff --git a/python/tests/system/distributed/test_distributed_app_with_invalid_compose.py b/python/tests/system/distributed/test_distributed_app_with_invalid_compose.py index 47132050..10467717 100644 --- a/python/tests/system/distributed/test_distributed_app_with_invalid_compose.py +++ b/python/tests/system/distributed/test_distributed_app_with_invalid_compose.py @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. """ # noqa: E501 +from utils import remove_ignored_errors from holoscan.conditions import CountCondition from holoscan.core import Application, Fragment @@ -57,5 +58,5 @@ def test_distributed_app_invalid_fragment_compose(capfd): # assert that no errors were logged captured = capfd.readouterr() - assert "error" in captured.err + assert "error" in remove_ignored_errors(captured.err) assert "Fragment 'rx_fragment' does not have any operators" in captured.err diff --git a/python/tests/system/distributed/test_ucx_message_serialization.py b/python/tests/system/distributed/test_ucx_message_serialization.py index ffe01334..24c70810 100644 --- a/python/tests/system/distributed/test_ucx_message_serialization.py +++ b/python/tests/system/distributed/test_ucx_message_serialization.py @@ -19,6 +19,7 @@ import sys import pytest +from utils import remove_ignored_errors from holoscan.conditions import CountCondition from holoscan.core import Application, Fragment, Operator, OperatorSpec @@ -210,7 +211,7 @@ def test_ucx_object_serialization_app(ping_config_file, value, capfd): # : "error handling callback was invoked with status -25 (Connection reset by remote peer)" captured_error = captured.err.replace("error handling callback", "ucx handling callback") assert "received expected value" in captured_error - assert "error" not in captured_error + assert "error" not in remove_ignored_errors(captured_error) assert "Exception occurred" not in captured_error @@ -301,5 +302,5 @@ def test_ucx_object_receivers_serialization_app(ping_config_file, value, capfd): # : "error handling callback was invoked with status -25 (Connection reset by remote peer)" captured_error = captured.err.replace("error handling callback", "ucx handling callback") assert "received expected value" in captured_error - assert "error" not in captured_error + assert "error" not in remove_ignored_errors(captured_error) assert "Exception occurred" not in captured_error diff --git a/python/tests/system/distributed/utils.py b/python/tests/system/distributed/utils.py new file mode 100644 index 00000000..c0faa7fa --- /dev/null +++ b/python/tests/system/distributed/utils.py @@ -0,0 +1,32 @@ +""" + SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + SPDX-License-Identifier: Apache-2.0 + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +""" # noqa: E501 + + +def remove_ignored_errors(captured_error: str) -> str: + """Utility to remove specific, known errors from a captured stderr string""" + + err_lines = captured_error.split("\n") + + errors_to_ignore = [ + # some versions of the UCX extension print this error during application shutdown + "Connection dropped with status -25", + ] + + for err in errors_to_ignore: + err_lines = [line for line in err_lines if err not in line] + + return "\n".join(err_lines) diff --git a/python/tests/system/test_application_exception.py b/python/tests/system/test_application_exception.py index 8fe76375..18d3308e 100644 --- a/python/tests/system/test_application_exception.py +++ b/python/tests/system/test_application_exception.py @@ -94,7 +94,7 @@ def compose(self): @pytest.mark.parametrize("method", ["compute", "initialize", "start", "stop", "all"]) -def test_exception_handling(capfd, method): +def test_exception_handling(method): if method == "compute": app = BadComputeOperatorApp() elif method == "initialize": @@ -107,9 +107,6 @@ def test_exception_handling(capfd, method): app = BadAllOperatorApp() else: raise ValueError("invalid method name") - app.run() - # assert that the exception was logged - captured = capfd.readouterr() - assert "ZeroDivisionError: division by zero" in captured.err - assert captured.err.count("Traceback") == 1 if method != "all" else 4 + with pytest.raises(ZeroDivisionError): + app.run() diff --git a/python/tests/system/test_application_minimal.py b/python/tests/system/test_application_minimal.py index c595e10b..ffb6dfa4 100644 --- a/python/tests/system/test_application_minimal.py +++ b/python/tests/system/test_application_minimal.py @@ -20,7 +20,7 @@ from holoscan.conditions import CountCondition from holoscan.core import Application, Operator, OperatorSpec from holoscan.resources import ManualClock, RealtimeClock -from holoscan.schedulers import GreedyScheduler, MultiThreadScheduler +from holoscan.schedulers import EventBasedScheduler, GreedyScheduler, MultiThreadScheduler class MinimalOp(Operator): @@ -72,7 +72,9 @@ def test_minimal_app(ping_config_file, SchedulerClass, capfd): # noqa: N803 assert captured.out.count("** stop method called **") == 1 -@pytest.mark.parametrize("SchedulerClass", [GreedyScheduler, MultiThreadScheduler]) +@pytest.mark.parametrize( + "SchedulerClass", [EventBasedScheduler, GreedyScheduler, MultiThreadScheduler] +) @pytest.mark.parametrize("ClockClass", [RealtimeClock, ManualClock]) def test_minimal_app_with_clock(ping_config_file, SchedulerClass, ClockClass): # noqa: N803 app = MinimalApp() @@ -89,6 +91,18 @@ def test_app_ping_config_keys(ping_config_file): assert keys == {"mx", "mx.multiplier"} +def test_deprecated_extension(deprecated_extension_config_file, capfd): + app = MinimalApp() + app.config(deprecated_extension_config_file) + + app.run() + + captured_error = capfd.readouterr().err + warning_msg = "no longer require specifying the libgxf_stream_playback.so extension" + # deprecated extension is listed twice in the config file (once with full path) + assert captured_error.count(warning_msg) == 2 + + def test_app_config_keys(config_file): app = MinimalApp() app.config(config_file) diff --git a/python/tests/system/test_multithread_tensor_message.py b/python/tests/system/test_multithread_tensor_message.py index e6e646f2..7c010ef6 100644 --- a/python/tests/system/test_multithread_tensor_message.py +++ b/python/tests/system/test_multithread_tensor_message.py @@ -18,13 +18,14 @@ import sys import cupy as cp +import pytest from env_wrapper import env_var_context import holoscan from holoscan.conditions import CountCondition from holoscan.operators import FormatConverterOp from holoscan.resources import UnboundedAllocator -from holoscan.schedulers import MultiThreadScheduler +from holoscan.schedulers import EventBasedScheduler, MultiThreadScheduler cuda_device = cp.cuda.Device() # disable CuPy memory pool @@ -124,7 +125,7 @@ def compose(self): # 1000: 2094333952 => 5636096, 18874368 (with some background processes running) -def launch_app(): +def launch_app(scheduler="multi_thread"): env_var_settings = { # set the recession period to 100 ms to reduce debug messages ("HOLOSCAN_CHECK_RECESSION_PERIOD_MS", "100"), @@ -134,25 +135,37 @@ def launch_app(): with env_var_context(env_var_settings): app = MultithreadTensorSenderApp() - scheduler = MultiThreadScheduler( - app, - worker_thread_number=4, - stop_on_deadlock=True, - stop_on_deadlock_timeout=500, - check_recession_period_ms=0.0, - name="multithread_scheduler", - ) + if scheduler == "multi_thread": + scheduler = MultiThreadScheduler( + app, + worker_thread_number=4, + stop_on_deadlock=True, + stop_on_deadlock_timeout=500, + check_recession_period_ms=0.0, + name="multithread_scheduler", + ) + elif scheduler == "event_based": + scheduler = EventBasedScheduler( + app, + worker_thread_number=4, + stop_on_deadlock=True, + stop_on_deadlock_timeout=500, + name="ebs", + ) + else: + raise ValueError("scheduler must be one of {'multi_thread', 'event_based'}") app.scheduler(scheduler) app.run() -def test_multithread_tensor_message(capfd): +@pytest.mark.parametrize("scheduler", ["event_based", "multi_thread"]) +def test_multithread_tensor_message(scheduler, capfd): # Issue 4293741: Python application having more than two operators, using MultiThreadScheduler # (including distributed app), and sending Tensor can deadlock at runtime. global NUM_MSGS, GPU_MEMORY_HISTORY - launch_app() + launch_app(scheduler) # assert that no errors were logged captured = capfd.readouterr() diff --git a/python/tests/system/test_pytracing.py b/python/tests/system/test_pytracing.py index 3f3a3aa9..05585706 100644 --- a/python/tests/system/test_pytracing.py +++ b/python/tests/system/test_pytracing.py @@ -19,7 +19,7 @@ from holoscan.conditions import CountCondition from holoscan.core import Application, Operator, OperatorSpec -from holoscan.schedulers import MultiThreadScheduler +from holoscan.schedulers import EventBasedScheduler, MultiThreadScheduler # The following example is extracted from the ping_vector example in the public/examples # directory (public/examples/ping_vector/python/ping_vector.py), adding some methods @@ -154,19 +154,25 @@ def compose(self): def main(scheduler_type="greedy"): app = MyPingApp() - if scheduler_type == "multithread": - # If the multithread scheduler is used, the cProfile or profile module may not work + if scheduler_type in ["multithread", "event_based"]: + # If a multithread scheduler is used, the cProfile or profile module may not work # properly and show some error messages. # For multithread scheduler, please use multithread-aware profilers such as # [pyinstrument](https://github.com/joerick/pyinstrument), # [pprofile](https://github.com/vpelletier/pprofile), or # [yappi](https://github.com/sumerc/yappi). - scheduler = MultiThreadScheduler( + if scheduler_type == "multithread": + scheduler_class = MultiThreadScheduler + name = ("multithread_scheduler",) + else: + scheduler_class = EventBasedScheduler + name = ("event_based_scheduler",) + scheduler = scheduler_class( app, worker_thread_number=3, stop_on_deadlock=True, stop_on_deadlock_timeout=500, - name="multithread_scheduler", + name=name, ) app.scheduler(scheduler) @@ -372,7 +378,7 @@ def yappi_main(scheduler_type="greedy"): "-s", "--scheduler", type=str, - choices=("greedy", "multithread"), + choices=("greedy", "multithread", "event_based"), default="greedy", help="The scheduler to use", ) diff --git a/python/tests/unit/test_core.py b/python/tests/unit/test_core.py index a63ea839..aa63aa9f 100644 --- a/python/tests/unit/test_core.py +++ b/python/tests/unit/test_core.py @@ -51,6 +51,7 @@ from holoscan.core._core import ParameterFlag, PyOperatorSpec from holoscan.executors import GXFExecutor from holoscan.graphs import FlowGraph, OperatorFlowGraph +from holoscan.operators.aja_source import NTV2Channel # noqa: F401 (needed to parse AJA 'channel') from holoscan.resources import ( DoubleBufferReceiver, DoubleBufferTransmitter, @@ -578,7 +579,7 @@ def test_from_config(self, fragment, config_file): aja_kwargs = fragment.from_config("aja") assert isinstance(aja_kwargs, ArgList) - assert aja_kwargs.size == len(aja_kwargs.args) == 5 + assert aja_kwargs.size == len(aja_kwargs.args) == 6 # all arguments in the ArgList are YAML nodes for arg in aja_kwargs.args: assert arg.arg_type.element_type == ArgElementType.YAML_NODE @@ -771,7 +772,7 @@ def test_from_config(self, app, config_file): aja_kwargs = app.from_config("aja") assert isinstance(aja_kwargs, ArgList) - assert aja_kwargs.size == 5 + assert aja_kwargs.size == 6 def test_kwargs(self, app, config_file): app.config(config_file) diff --git a/python/tests/unit/test_logger.py b/python/tests/unit/test_logger.py index 24d639d1..4e1664b0 100644 --- a/python/tests/unit/test_logger.py +++ b/python/tests/unit/test_logger.py @@ -21,16 +21,9 @@ from holoscan.logger import ( LogLevel, - disable_backtrace, - dump_backtrace, - enable_backtrace, - flush, - flush_level, - flush_on, log_level, set_log_level, set_log_pattern, - should_backtrace, ) @@ -74,33 +67,3 @@ def test_set_log_level(level): os.environ["HOLOSCAN_LOG_LEVEL"] = orig_env # restore the logging level prior to the test set_log_level(orig_level) - - -def test_logger_flush(): - level = flush_level() - - flush_on(LogLevel.TRACE) - assert flush_level() == LogLevel.TRACE - - flush() - - # restore original flush level - flush_on(level) - - -def test_logger_backtrace(): - # determine initial setting - enabled = should_backtrace() - - enable_backtrace(32) - assert should_backtrace() - dump_backtrace() - - disable_backtrace() - assert not should_backtrace() - - # restore original backtrace setting - if enabled: - enable_backtrace(32) - else: - disable_backtrace() diff --git a/python/tests/unit/test_operators_native.py b/python/tests/unit/test_operators_native.py index 7fbd20e5..47056ea1 100644 --- a/python/tests/unit/test_operators_native.py +++ b/python/tests/unit/test_operators_native.py @@ -1,18 +1,18 @@ """ - SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - SPDX-License-Identifier: Apache-2.0 +SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +SPDX-License-Identifier: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. """ # noqa: E501 import os @@ -34,6 +34,7 @@ from holoscan.operators.inference import InferenceOp from holoscan.operators.inference_processor import InferenceProcessorOp from holoscan.operators.segmentation_postprocessor import SegmentationPostprocessorOp +from holoscan.operators.v4l2_video_capture import V4L2VideoCaptureOp from holoscan.operators.video_stream_recorder import VideoStreamRecorderOp from holoscan.operators.video_stream_replayer import VideoStreamReplayerOp from holoscan.resources import ( @@ -315,7 +316,8 @@ def test_tensor_round_trip(self, dtype, order, module): xp.testing.assert_array_equal(a, b) @pytest.mark.parametrize("module", ["cupy", "numpy"]) - def test_from_dlpack(self, module): + @pytest.mark.parametrize("convert_method", ["as_tensor", "from_dlpack"]) + def test_from_dlpack(self, module, convert_method): # Check if module is numpy and numpy version is less than 1.23 then skip the test # because numpy.from_dlpack is not available in numpy versions less than 1.23 if module == "numpy" and tuple(map(int, np.__version__.split("."))) < (1, 23): @@ -323,7 +325,8 @@ def test_from_dlpack(self, module): xp = pytest.importorskip(module) arr_in = xp.random.randn(1, 2, 3, 4).astype(xp.float32) - tensor = Tensor.as_tensor(arr_in) + converter = getattr(Tensor, convert_method) + tensor = converter(arr_in) arr_out1 = xp.asarray(tensor) arr_out2 = xp.from_dlpack(tensor) xp.testing.assert_array_equal(arr_in, arr_out1) @@ -341,7 +344,11 @@ def test_kwarg_based_initialization(self, app, config_file, capfd): fragment=app, name=name, channel=NTV2Channel.NTV2_CHANNEL1, - **app.kwargs("aja"), + width=1920, + height=1080, + rdma=True, + enable_overlay=False, + overlay_rdma=True, ) assert isinstance(op, _Operator) assert op.operator_type == Operator.OperatorType.NATIVE @@ -349,12 +356,24 @@ def test_kwarg_based_initialization(self, app, config_file, capfd): # assert no warnings or errors logged captured = capfd.readouterr() + assert "error" not in captured.err + assert "warning" not in captured.err - # Initializing outside the context of app.run() will result in the - # following error being logged because the GXFWrapper will not have - # been created for the operator: - # [error] [gxf_executor.cpp:452] Unable to get GXFWrapper for Operator 'segmentation_postprocessor # noqa: E501 - assert captured.err.count("[error]") <= 1 + def test_initialization_from_yaml(self, app, config_file, capfd): + app.config(config_file) + name = "source" + op = AJASourceOp( + fragment=app, + name=name, + **app.kwargs("aja"), + ) + assert isinstance(op, _Operator) + assert op.operator_type == Operator.OperatorType.NATIVE + assert f"name: {name}" in repr(op) + + # assert no warnings or errors logged + captured = capfd.readouterr() + assert "error" not in captured.err assert "warning" not in captured.err @@ -382,12 +401,8 @@ def test_kwarg_based_initialization(self, app, config_file, capfd): # assert no warnings or errors logged captured = capfd.readouterr() - - # Initializing outside the context of app.run() will result in the - # following error being logged because the GXFWrapper will not have - # been created for the operator: - # [error] [gxf_executor.cpp:452] Unable to get GXFWrapper for Operator 'recorder_format_converter' # noqa: E501 - assert captured.err.count("[error]") <= 1 + assert "error" not in captured.err + assert "warning" not in captured.err class TestInferenceOp: @@ -413,12 +428,7 @@ def test_kwarg_based_initialization(self, app, config_file, capfd): # assert no warnings or errors logged captured = capfd.readouterr() - - # Initializing outside the context of app.run() will result in the - # following error being logged because the GXFWrapper will not have - # been created for the operator: - # [error] [gxf_executor.cpp:452] Unable to get GXFWrapper for Operator 'inference' # noqa: E501 - assert captured.err.count("[error]") <= 1 + assert "error" not in captured.err assert "warning" not in captured.err @@ -438,12 +448,7 @@ def test_kwarg_based_initialization(self, app, config_file, capfd): # assert no warnings or errors logged captured = capfd.readouterr() - - # Initializing outside the context of app.run() will result in the - # following error being logged because the GXFWrapper will not have - # been created for the operator: - # [error] [gxf_executor.cpp:452] Unable to get GXFWrapper for Operator 'processor' # noqa: E501 - assert captured.err.count("[error]") <= 1 + assert "error" not in captured.err assert "warning" not in captured.err @@ -461,12 +466,7 @@ def test_kwarg_based_initialization(self, app, capfd): # assert no warnings or errors logged captured = capfd.readouterr() - - # Initializing outside the context of app.run() will result in the - # following error being logged because the GXFWrapper will not have - # been created for the operator: - # [error] [gxf_executor.cpp:452] Unable to get GXFWrapper for Operator 'segmentation_postprocessor' # noqa: E501 - assert captured.err.count("[error]") <= 1 + assert "error" not in captured.err assert "warning" not in captured.err @@ -481,11 +481,7 @@ def test_kwarg_based_initialization(self, app, config_file, capfd): # assert no warnings or errors logged captured = capfd.readouterr() - # Initializing outside the context of app.run() will result in the - # following error being logged because the GXFWrapper will not have - # been created for the operator: - # [error] [gxf_executor.cpp:452] Unable to get GXFWrapper for Operator 'recorder' # noqa: E501 - assert captured.err.count("[error]") <= 1 + assert "error" not in captured.err assert "warning" not in captured.err @@ -506,11 +502,7 @@ def test_kwarg_based_initialization(self, app, config_file, capfd): # assert no warnings or errors logged captured = capfd.readouterr() - # Initializing outside the context of app.run() will result in the - # following error being logged because the GXFWrapper will not have - # been created for the operator: - # [error] [gxf_executor.cpp:452] Unable to get GXFWrapper for Operator 'replayer' # noqa: E501 - assert captured.err.count("[error]") <= 1 + assert "error" not in captured.err assert "warning" not in captured.err @@ -699,11 +691,7 @@ def test_kwarg_based_initialization(self, app, config_file, capfd): # assert no warnings or errors logged captured = capfd.readouterr() - # Initializing outside the context of app.run() will result in the - # following error being logged because the GXFWrapper will not have - # been created for the operator: - # [error] [gxf_executor.cpp:452] Unable to get GXFWrapper for Operator 'visualizer # noqa: E501 - assert captured.err.count("[error]") <= 1 + assert "error" not in captured.err assert "warning" not in captured.err @pytest.mark.parametrize( @@ -890,10 +878,67 @@ def test_kwarg_based_initialization(self, app, config_file, capfd): # assert no warnings or errors logged captured = capfd.readouterr() + assert "error" not in captured.err + assert "warning" not in captured.err - # Initializing outside the context of app.run() will result in the - # following error being logged because the GXFWrapper will not have - # been created for the operator: - # [error] [gxf_executor.cpp:452] Unable to get GXFWrapper for Operator 'demosaic' # noqa: E501 - assert captured.err.count("[error]") <= 1 - assert captured.err.count("[warning]") <= 1 + +class TestV4L2VideoCaptureOp: + def test_kwarg_based_initialization(self, app, capfd): + name = "video_capture" + op = V4L2VideoCaptureOp( + app, + name=name, + width=320, + height=240, + pixel_format="auto", + device="/dev/video0", + allocator=UnboundedAllocator(app, name="pool"), + exposure_time=500, + gain=100, + ) + assert isinstance(op, _Operator) + assert len(op.args) == 8 + assert op.operator_type == Operator.OperatorType.NATIVE + assert f"name: {name}" in repr(op) + + # assert no warnings or errors logged + captured = capfd.readouterr() + assert "error" not in captured.err + assert "warning" not in captured.err + + def test_default_initialization(self, app, capfd): + name = "video_capture" + op = V4L2VideoCaptureOp( + app, + name=name, + allocator=UnboundedAllocator(app, name="pool"), + ) + assert isinstance(op, _Operator) + assert len(op.args) == 6 # No hardcoded defaults for exposure and gain + assert op.operator_type == Operator.OperatorType.NATIVE + assert f"name: {name}" in repr(op) + + # assert no warnings or errors logged + captured = capfd.readouterr() + assert "error" not in captured.err + assert "warning" not in captured.err + + def test_initialization_from_yaml(self, app, config_file, capfd): + app.config(config_file) + name = "video_capture" + print(f"{app.kwargs('v4l2_video_capture')}") + op = V4L2VideoCaptureOp( + app, + name=name, + allocator=UnboundedAllocator(app, name="pool"), + **app.kwargs("v4l2_video_capture"), + ) + assert isinstance(op, _Operator) + assert len(op.args) == 8 + assert op.operator_type == Operator.OperatorType.NATIVE + assert f"name: {name}" in repr(op) + + # assert no warnings or errors logged + captured = capfd.readouterr() + assert "error" not in captured.err + assert "warning" not in captured.err diff --git a/python/tests/unit/test_resources.py b/python/tests/unit/test_resources.py index af857490..2efd0ad5 100644 --- a/python/tests/unit/test_resources.py +++ b/python/tests/unit/test_resources.py @@ -30,6 +30,7 @@ Receiver, SerializationBuffer, StdComponentSerializer, + StdEntitySerializer, Transmitter, UcxComponentSerializer, UcxEntitySerializer, @@ -38,7 +39,6 @@ UcxSerializationBuffer, UcxTransmitter, UnboundedAllocator, - VideoStreamSerializer, ) @@ -55,7 +55,7 @@ def test_kwarg_based_initialization(self, app, capfd): assert isinstance(pool, Allocator) assert isinstance(pool, GXFResource) assert isinstance(pool, Resource) - assert pool.id != -1 + assert pool.id == -1 assert pool.gxf_typename == "nvidia::gxf::BlockMemoryPool" assert f"name: {name}" in repr(pool) @@ -69,6 +69,20 @@ def test_positional_initialization(self, app): class TestCudaStreamPool: + def test_default_initialization(self, app, capfd): + name = "cuda_stream" + pool = CudaStreamPool(fragment=app, name=name) + assert isinstance(pool, Allocator) + assert isinstance(pool, GXFResource) + assert isinstance(pool, Resource) + assert pool.id == -1 + assert pool.gxf_typename == "nvidia::gxf::CudaStreamPool" + assert f"name: {name}" in repr(pool) + + # assert no warnings or errors logged + captured = capfd.readouterr() + assert "error" not in captured.err + def test_kwarg_based_initialization(self, app, capfd): name = "cuda_stream" pool = CudaStreamPool( @@ -83,7 +97,7 @@ def test_kwarg_based_initialization(self, app, capfd): assert isinstance(pool, Allocator) assert isinstance(pool, GXFResource) assert isinstance(pool, Resource) - assert pool.id != -1 + assert pool.id == -1 assert pool.gxf_typename == "nvidia::gxf::CudaStreamPool" assert f"name: {name}" in repr(pool) @@ -105,7 +119,7 @@ def test_kwarg_based_initialization(self, app, capfd): assert isinstance(alloc, Allocator) assert isinstance(alloc, GXFResource) assert isinstance(alloc, Resource) - assert alloc.id != -1 + assert alloc.id == -1 assert alloc.gxf_typename == "nvidia::gxf::UnboundedAllocator" assert f"name: {name}" in repr(alloc) @@ -130,7 +144,7 @@ def test_kwarg_based_initialization(self, app, capfd): assert isinstance(r, Receiver) assert isinstance(r, GXFResource) assert isinstance(r, Resource) - assert r.id != -1 + assert r.id == -1 assert r.gxf_typename == "nvidia::gxf::DoubleBufferReceiver" assert f"name: {name}" in repr(r) @@ -155,7 +169,7 @@ def test_kwarg_based_initialization(self, app, capfd): assert isinstance(r, Transmitter) assert isinstance(r, GXFResource) assert isinstance(r, Resource) - assert r.id != -1 + assert r.id == -1 assert r.gxf_typename == "nvidia::gxf::DoubleBufferTransmitter" assert f"name: {name}" in repr(r) @@ -177,7 +191,7 @@ def test_kwarg_based_initialization(self, app, capfd): ) assert isinstance(r, GXFResource) assert isinstance(r, Resource) - assert r.id != -1 + assert r.id == -1 assert r.gxf_typename == "nvidia::gxf::StdComponentSerializer" assert f"name: {name}" in repr(r) @@ -190,17 +204,17 @@ def test_default_initialization(self, app): StdComponentSerializer(app) -class TestVideoStreamSerializer: +class TestStdEntitySerializer: def test_kwarg_based_initialization(self, app, capfd): - name = "vid-serializer" - r = VideoStreamSerializer( + name = "std-entity-serializer" + r = StdEntitySerializer( fragment=app, name=name, ) assert isinstance(r, GXFResource) assert isinstance(r, Resource) - assert r.id != -1 - assert r.gxf_typename == "nvidia::holoscan::stream_playback::VideoStreamSerializer" + assert r.id == -1 + assert r.gxf_typename == "nvidia::gxf::StdEntitySerializer" assert f"name: {name}" in repr(r) # assert no warnings or errors logged @@ -209,7 +223,7 @@ def test_kwarg_based_initialization(self, app, capfd): assert "warning" not in captured.err def test_default_initialization(self, app): - VideoStreamSerializer(app) + StdEntitySerializer(app) class TestManualClock: @@ -223,7 +237,7 @@ def test_kwarg_based_initialization(self, app, capfd): assert isinstance(clk, Clock) assert isinstance(clk, GXFResource) assert isinstance(clk, Resource) - assert clk.id != -1 + assert clk.id == -1 assert clk.gxf_typename == "nvidia::gxf::ManualClock" assert f"name: {name}" in repr(clk) @@ -249,7 +263,7 @@ def test_kwarg_based_initialization(self, app, capfd): assert isinstance(clk, Clock) assert isinstance(clk, GXFResource) assert isinstance(clk, Resource) - assert clk.id != -1 + assert clk.id == -1 assert clk.gxf_typename == "nvidia::gxf::RealtimeClock" assert f"name: {name}" in repr(clk) @@ -273,7 +287,7 @@ def test_kwarg_based_initialization(self, app, capfd): ) assert isinstance(res, GXFResource) assert isinstance(res, Resource) - assert res.id != -1 + assert res.id == -1 # -1 because initialize() isn't called assert res.gxf_typename == "nvidia::gxf::SerializationBuffer" assert f"name: {name}" in repr(res) @@ -294,7 +308,7 @@ def test_kwarg_based_initialization(self, app, capfd): ) assert isinstance(res, GXFResource) assert isinstance(res, Resource) - assert res.id != -1 + assert res.id == -1 assert res.gxf_typename == "nvidia::gxf::UcxSerializationBuffer" assert f"name: {name}" in repr(res) @@ -314,7 +328,7 @@ def test_kwarg_based_initialization(self, app, capfd): ) assert isinstance(res, GXFResource) assert isinstance(res, Resource) - assert res.id != -1 + assert res.id == -1 assert res.gxf_typename == "nvidia::gxf::UcxComponentSerializer" assert f"name: {name}" in repr(res) @@ -334,7 +348,7 @@ def test_kwarg_based_initialization(self, app, capfd): ) assert isinstance(res, GXFResource) assert isinstance(res, Resource) - assert res.id != -1 + assert res.id == -1 assert res.gxf_typename == "nvidia::gxf::UcxHoloscanComponentSerializer" assert f"name: {name}" in repr(res) @@ -354,7 +368,7 @@ def test_intialization_default_serializers(self, app, capfd): ) assert isinstance(res, GXFResource) assert isinstance(res, Resource) - assert res.id != -1 + assert res.id == -1 assert res.gxf_typename == "nvidia::gxf::UcxEntitySerializer" assert f"name: {name}" in repr(res) @@ -385,7 +399,7 @@ def test_kwarg_based_initialization(self, app, capfd): assert isinstance(res, GXFResource) assert isinstance(res, Resource) assert isinstance(res, Receiver) - assert res.id != -1 + assert res.id == -1 assert res.gxf_typename == "nvidia::gxf::UcxReceiver" assert f"name: {name}" in repr(res) @@ -419,7 +433,7 @@ def test_kwarg_based_initialization(self, app, capfd): assert isinstance(res, GXFResource) assert isinstance(res, Resource) assert isinstance(res, Transmitter) - assert res.id != -1 + assert res.id == -1 assert res.gxf_typename == "nvidia::gxf::UcxTransmitter" assert f"name: {name}" in repr(res) diff --git a/python/tests/unit/test_schedulers.py b/python/tests/unit/test_schedulers.py index 5e2f6595..71124b3b 100644 --- a/python/tests/unit/test_schedulers.py +++ b/python/tests/unit/test_schedulers.py @@ -20,7 +20,7 @@ from holoscan.core import ComponentSpec, Scheduler from holoscan.gxf import GXFScheduler from holoscan.resources import ManualClock, RealtimeClock -from holoscan.schedulers import GreedyScheduler, MultiThreadScheduler +from holoscan.schedulers import EventBasedScheduler, GreedyScheduler, MultiThreadScheduler class TestGreedyScheduler: @@ -135,3 +135,57 @@ def test_stop_on_deadlock_timeout(self, app): with pytest.raises(RuntimeError): # value will only be initialized by executor once app.run() is called scheduler.stop_on_deadlock_timeout # noqa: B018 + + +class TestEventBasedScheduler: + def test_default_init(self, app): + scheduler = EventBasedScheduler(app) + assert isinstance(scheduler, GXFScheduler) + assert isinstance(scheduler, Scheduler) + assert isinstance(scheduler.spec, ComponentSpec) + + @pytest.mark.parametrize("ClockClass", [ManualClock, RealtimeClock]) + def test_init_kwargs(self, app, ClockClass): # noqa: N803 + name = "event-based-scheduler" + scheduler = EventBasedScheduler( + app, + clock=ClockClass(app), + worker_thread_number=4, + stop_on_deadlock=True, + max_duration_ms=10000, + stop_on_deadlock_timeout=10, + name=name, + ) + assert isinstance(scheduler, GXFScheduler) + assert f"name: {name}" in repr(scheduler) + + def test_clock(self, app): + scheduler = EventBasedScheduler(app) + with pytest.raises(RuntimeError) as err: + # value will only be initialized by executor once app.run() is called + scheduler.clock # noqa: B018 + assert "'clock' is not set" in str(err.value) + + def test_worker_thread_number(self, app): + scheduler = EventBasedScheduler(app) + with pytest.raises(RuntimeError): + # value will only be initialized by executor once app.run() is called + scheduler.worker_thread_number # noqa: B018 + + def test_max_duration_ms(self, app): + scheduler = EventBasedScheduler(app) + + # max_duration_ms is optional and will report -1 if not set + assert scheduler.max_duration_ms == -1 + + def test_stop_on_deadlock(self, app): + scheduler = EventBasedScheduler(app) + with pytest.raises(RuntimeError): + # value will only be initialized by executor once app.run() is called + scheduler.stop_on_deadlock # noqa: B018 + + def test_stop_on_deadlock_timeout(self, app): + scheduler = EventBasedScheduler(app) + with pytest.raises(RuntimeError): + # value will only be initialized by executor once app.run() is called + scheduler.stop_on_deadlock_timeout # noqa: B018 diff --git a/run b/run index 68d8c5ca..c53130e5 100755 --- a/run +++ b/run @@ -199,6 +199,11 @@ get_host_gpu() { fi } +is_cross_gpu() { + [ "$GPU" != $(get_host_gpu) ] +} + + get_arch+gpu_str() { local suffix="" if [ "$ARCH" = "aarch64" ]; then @@ -485,12 +490,17 @@ Arguments: build() { # Prerequisite steps if [ "$DO_STANDALONE" != "true" ]; then - build_image + # Adjust final stage for igpu to support nvdla_compiler + local stage="build" + if [ "$GPU" = "igpu" ]; then + stage="build-igpu" + fi + build_image --target $stage fi # Default cuda architecture local default_cuda_archs="native" - if is_cross_compiling; then + if is_cross_compiling || is_cross_gpu; then default_cuda_archs="all" fi @@ -508,9 +518,13 @@ build() { while [[ $# -gt 0 ]]; do case $1 in --build_libtorch) - build_libtorch='OFF' + build_libtorch_val=$(get_boolean "$2") + if [ "$build_libtorch_val" == "false" ]; then + build_libtorch='OFF' + fi reconfigure=true shift + shift ;; --cudaarchs) cuda_archs=$(get_cuda_archs "$2") @@ -551,17 +565,13 @@ build() { done # Error if requesting native cuda arch explicitly when cross-compiling - if [ "$cuda_archs" = "native" ] && is_cross_compiling; then + if [ "$cuda_archs" = "native" ] && (is_cross_compiling || is_cross_gpu); then fatal Y "Cross-compiling " W "(host: $(get_host_arch), target: $ARCH)" R " does not support 'native' cuda architecture." fi - # - Tegra (iGPU) limitation requires to mount the /usr/lib/aarch64-linux-gnu/tegra folder - # in the container for buildtime, which is handled by the NVIDIA runtime from the container - # toolkit. This is not ideal as it needs these drivers libraries at buildtime, which prevents - # cross-compilation to aarch64-igpu mode from aarch64-dgpu or x86_64 at this time. - # - native means we need the container to access the GPU for CMake to choose the architecture + # Native means we need the container to access the GPU for CMake to choose the architecture # to use for nvcc. - if [ "$GPU" = "igpu" ] || [ "$cuda_archs" = "native" ]; then + if [ "$cuda_archs" = "native" ]; then runtime=nvidia else runtime=runc @@ -585,8 +595,7 @@ build() { # Override the default entrypoint to hide the regular message from the base image # # --runtime ${runtime} - # Docker runtime. Should ideally be runc for build (no need for drivers) but tegra/igpu needs - # nvidia runtime due to limitation described above. + # Docker runtime. Should ideally be runc for build (no need for drivers). # # --platform $(get_platform_str) # Platform to build @@ -881,7 +890,7 @@ Arguments: launch() { # Prerequisite steps if [ "$DO_STANDALONE" != "true" ]; then - build_image + build_image --target build fi local container_mount="/workspace/holoscan-sdk" @@ -964,6 +973,9 @@ launch() { groups+=" --group-add $render_id" fi + # Add docker group to enable DooD + groups+=" --group-add $(get_group_id docker)" + # DOCKER PARAMETERS # # --rm @@ -987,6 +999,10 @@ launch() { # ${groups} (--group-add render; --group-add video) # Run the container as a non-root user. See details above for `groups` variable. # + # -v /var/run/docker.sock:/var/run/docker.sock + # Enable the use of Holoscan CLI with Docker outside of Docker (DooD) for packaging and + # running applications inside the container. + # # -v ${TOP}:/workspace/holoscan-sdk # Mount the source directory # @@ -1034,6 +1050,7 @@ launch() { -v /etc/group:/etc/group:ro \ -v /etc/passwd:/etc/passwd:ro \ ${groups} \ + -v /var/run/docker.sock:/var/run/docker.sock \ -v ${mount_point}:${container_mount} \ ${extra_args[@]} \ -w ${container_top}/${working_dir} \ @@ -1051,7 +1068,7 @@ launch() { --cap-add=CAP_SYS_PTRACE \ --ulimit memlock=-1 \ --ulimit stack=67108864 \ - $img -c "$run_cmd" + $img -c "export PATH=\$PATH:${container_top}/${working_dir}/bin; $run_cmd" # Append the Holoscan bin folder to the existing `PATH` before running } vscode_desc() { c_echo 'Launch VSCode in DevContainer @@ -1240,10 +1257,28 @@ run_html_builder() { } build_html_desc() { c_echo 'Generate HTML pages of the user guide + +Arguments: + --no-api - Skip C++/Python api docs ' } build_html() { - run_html_builder "make -j html" + local tags="" + + # Parse CLI arguments + while [[ $# -gt 0 ]]; do + case $1 in + --no-api) + tags="-t=noapi" + shift + ;; + *) + shift + ;; + esac + done + + run_html_builder "make -j html SPHINXOPTS=${tags}" } live_html_desc() { c_echo "Generate and serve HTML pages of the user guide @@ -1253,11 +1288,13 @@ Auto-reload on updates Arguments: -i|--ip - Specifics IP address to serve the HTML guide (default: $LIVE_HTML_IP) -p|--port - Specifics address port to serve the HTML guide (default: $LIVE_HTML_PORT) + --no-api - Skip C++/Python api docs " } live_html() { local ip=$LIVE_HTML_IP local port=$LIVE_HTML_PORT + local tags="-t=noexhale" # Parse CLI arguments while [[ $# -gt 0 ]]; do @@ -1272,13 +1309,17 @@ live_html() { shift shift ;; + --no-api) + tags="-t=noapi" + shift + ;; *) shift ;; esac done - run_html_builder "make -j livehtml SPHINXOPTS='-t=noexhale --host ${ip} --port ${port}'" + run_html_builder "make -j livehtml SPHINXOPTS='${tags} --host ${ip} --port ${port}'" } build_pdf_desc() { c_echo 'Generate PDF of the user guide diff --git a/runtime_docker/Dockerfile b/runtime_docker/Dockerfile index 7eb3ef18..2bd0124c 100644 --- a/runtime_docker/Dockerfile +++ b/runtime_docker/Dockerfile @@ -15,15 +15,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -############################################################ -# Versions -############################################################ -ARG ONNX_RUNTIME_VERSION=1.15.1 -ARG LIBTORCH_VERSION=2.1.0_23.08 -ARG TORCHVISION_VERSION=0.16.0_23.08 -ARG GRPC_VERSION=1.54.2 -ARG GXF_VERSION=3.1_20231009_b3d4e027 - ############################################################ # SDK Build Image, used to copy installs ############################################################ @@ -57,6 +48,7 @@ FROM base as runtime_cpp_no_mkl # libvulkan1 - for Vulkan apps (Holoviz) # libegl1 - to run headless Vulkan apps # libopenblas0 - libtorch dependency +# libnuma1 - libtorch dependency # libgomp1 - libtorch & CuPy dependency # libv4l2 - V4L2 operator dependency # libpng16-16 - torchvision dependency @@ -85,6 +77,7 @@ RUN apt-get update \ libvulkan1="1.3.204.1-*" \ libegl1="1.4.0-*" \ libopenblas0="0.3.20+ds-*" \ + libnuma1="2.0.14-*" \ libgomp1="12.3.0-*" \ libv4l-0="1.22.1-*" \ libpng16-16="1.6.37-*" \ @@ -106,25 +99,28 @@ RUN apt-get update \ && rm -f /usr/lib/*/libcudnn*train.so* # Copy ONNX Runtime -ARG ONNX_RUNTIME_VERSION +ARG ONNX_RUNTIME_VERSION=1.15.1_23.08 ENV ONNX_RUNTIME=/opt/onnxruntime/${ONNX_RUNTIME_VERSION}/lib COPY --from=build ${ONNX_RUNTIME} ${ONNX_RUNTIME} ENV CMAKE_PREFIX_PATH="${CMAKE_PREFIX_PATH}:${ONNX_RUNTIME}" # Copy Libtorch -ARG LIBTORCH_VERSION +ARG LIBTORCH_VERSION=2.1.0_23.08 ENV LIBTORCH=/opt/libtorch/${LIBTORCH_VERSION}/lib COPY --from=build ${LIBTORCH} ${LIBTORCH} ENV CMAKE_PREFIX_PATH="${CMAKE_PREFIX_PATH}:${LIBTORCH}" # Copy TorchVision -ARG TORCHVISION_VERSION +ARG TORCHVISION_VERSION=0.16.0_23.08 ENV TORCHVISION=/opt/torchvision/${TORCHVISION_VERSION}/lib COPY --from=build ${TORCHVISION} ${TORCHVISION} ENV CMAKE_PREFIX_PATH="${CMAKE_PREFIX_PATH}:${TORCHVISION}" # Copy inherited Torch library dependency that result from building with NGC PyTorch container COPY --from=build /opt/hpcx/ucc/lib/libucc.so.1 $INSTALL_PATH/lib/libucc.so.1 +COPY --from=build /opt/hpcx/ompi/lib/libmpi.so.40 $INSTALL_PATH/lib/libmpi.so.40 +COPY --from=build /opt/hpcx/ompi/lib/libopen-rte.so.40 $INSTALL_PATH/lib/libopen-rte.so.40 +COPY --from=build /opt/hpcx/ompi/lib/libopen-pal.so.40 $INSTALL_PATH/lib/libopen-pal.so.40 # Install GRDAPI (needed by Holoscan-core) ENV os=ubuntu2204 @@ -208,12 +204,14 @@ FROM runtime_cpp_pip_mkl as runtime_cpp_py # Install pip run dependencies # requirements -# cuda-cupy - dependency for holoscan-core +# pip - 20.3+ needed for PEP 600 +# cupy-cuda - dependency for holoscan python + examples # cloudpickle - dependency for distributed apps # python-on-whales - dependency for holoscan CLI # Jinja2 - dependency for holoscan CLI # packaging - dependency for holoscan CLI # pyyaml - dependency for holoscan CLI # requests - dependency for holoscan CLI +# psutil - dependency for holoscan CLI COPY python/requirements.txt /tmp/requirements.txt RUN python3 -m pip install --no-cache-dir -r /tmp/requirements.txt diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt new file mode 100644 index 00000000..b7c03005 --- /dev/null +++ b/scripts/CMakeLists.txt @@ -0,0 +1,36 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Install useful scripts for developers +install( + FILES + convert_gxf_entities_to_images.py + convert_gxf_entities_to_video.py + convert_video_to_gxf_entities.py + download_ngc_data + generate_extension_uuids.py + graph_surgeon.py + gxf_entity_codec.py + video_validation.py + DESTINATION "bin" + PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ + COMPONENT "holoscan-core" +) + +install( + FILES README.md + DESTINATION "bin" + COMPONENT "holoscan-core" +) \ No newline at end of file diff --git a/scripts/README.md b/scripts/README.md index 11558168..6d8b087f 100644 --- a/scripts/README.md +++ b/scripts/README.md @@ -1,37 +1,48 @@ # Holoscan utility scripts This folder includes the following scripts: -- [`convert_video_to_gxf_entities.py`](#convert_video_to_gxf_entitiespy) + +- [`convert_gxf_entities_to_images.py`](#convert_gxf_entities_to_imagespy) - [`convert_gxf_entities_to_video.py`](#convert_gxf_entities_to_videopy) +- [`convert_video_to_gxf_entities.py`](#convert_video_to_gxf_entitiespy) +- [`download_ngc_data`](#download_ngc_data) - [`generate_extension_uuids.py`](#generate_extension_uuidspy) - [`graph_surgeon.py`](#graph_surgeonpy) +- [`gxf_entity_codec.py`](#gxf_entity_codecpy) +- [`video_validation.py`](#video_validationpy) -## convert_video_to_gxf_entities.py +> Note: these will be included in the SDK installation at `/opt/nvidia/holoscan/bin` + +____ + +## convert_gxf_entities_to_images.py -Takes in a raw video feed and emits encoded gxf entities for playback with the `stream_playback` codelet. +Takes in the encoded GXF tensor files generated by the `video_stream_recorder` and export raw frames in .png files. ### Prerequisites ```sh -pip install numpy~=1.21 +pip install numpy~=1.21 pillow ``` This script depends on `gxf_entity_codec.py` which is located in the same folder. ### Usage -Example usage converting the output of a tool like `ffmpeg` to encoded gxf entities: +The command below will read the `racerx.gxf_entities` and `racerx.gxf_index` files from the existing racerx dataset under `data/racerx` (which is a 854x480 video with framerate 25fps and 3 channels) and convert them to PNG files. ```sh -ffmpeg -i video_1920x1080.avi -pix_fmt rgb24 -f rawvideo pipe:1 | python scripts/convert_video_to_gxf_entities.py --width 1920 --height 1080 --channels 3 --framerate 30 --basename my_video +python3 scripts/convert_gxf_entities_to_images.py --directory data/racerx --basename racerx ``` -Above command will create two files: `my_video.gxf_entities` and `my_video.gxf_index` from the `video_1920x1080.avi` video file. -Please use `--directory` to specify the directory where the files will be created. +Use `--outputdir` to specify the directory where the files will be created. +Use `--outputname` to specify a different output name than the default `tensor` prefix. + +____ ## convert_gxf_entities_to_video.py -Takes in the encoded gxf entities (`.gxf_entities` and `.gxf_index` files) and emit the raw video feed. +Takes in the encoded GXF tensor files generated by the `video_stream_recorder` (`.gxf_entities` and `.gxf_index`) and emit the raw video feed. ### Prerequisites @@ -43,40 +54,73 @@ This script depends on `gxf_entity_codec.py` which is located in the same folder ### Usage -Example usage reading encoded gxf entities and converting them to a video file: +The command below will read the `racerx.gxf_entities` and `racerx.gxf_index` files from the existing racerx dataset under `data/racerx` (which is a 854x480 video with framerate 25fps and 3 channels) and use `ffmpeg` to encode the emitted video stream to a video file, `converted_video.mp4`: ```sh -python scripts/convert_gxf_entities_to_video.py --base_name my_video | ffmpeg -f rawvideo -pix_fmt rgb24 -s 1920x1080 -r 30 -i - -f mp4 -vcodec libx264 -pix_fmt yuv420p -r 30 -y converted_video.mp4 +python3 scripts/convert_gxf_entities_to_video.py --directory data/racerx --basename racerx | ffmpeg -f rawvideo -pix_fmt rgb24 -s 854x480 -r 25 -i - -f mp4 -vcodec libx264 -pix_fmt yuv420p -r 25 -y racerx-medium.mp4 ``` -Above command will read the `my_video.gxf_entities` and `my_video.gxf_index` files and convert them to a video file `converted_video.mp4`. +____ + +## convert_video_to_gxf_entities.py +Takes in a raw video feed and emits encoded GXF tensor files entities for playback with the `video_stream_replayer` operator. The tensors will be saved with metadata indicating that the data should be copied to the GPU on read. -With the existing racerx dataset under `data/racerx` (which is a 854x480 video with framerate 25fps and 3 channels), we can run the following command to convert the gxf entities to a video file: +### Prerequisites ```sh -python scripts/convert_gxf_entities_to_video.py --directory data/racerx --basename racerx | ffmpeg -f rawvideo -pix_fmt rgb24 -s 854x480 -r 25 -i - -f mp4 -vcodec libx264 -pix_fmt yuv420p -r 25 -y racerx-medium.mp4 +pip install numpy~=1.21 ``` -The output video (`racerx-medium.mp4`) can be encoded again into gxf entities with the following command: +This script depends on `gxf_entity_codec.py` which is located in the same folder. + +### Usage + +Example usage converting the output of a tool like `ffmpeg` to encoded GXF tensors: ```sh -ffmpeg -i racerx-medium.mp4 -pix_fmt rgb24 -f rawvideo pipe:1 | python scripts/convert_video_to_gxf_entities.py --width 854 --height 480 --channels 3 --framerate 25 +ffmpeg -i video_1920x1080.avi -pix_fmt rgb24 -f rawvideo pipe:1 | python3 scripts/convert_video_to_gxf_entities.py --width 1920 --height 1080 --channels 3 --framerate 30 +``` + +Above command will create two files: `tensor.gxf_entities` and `tensor.gxf_index` from the `video_1920x1080.avi` video file. +Use `--directory` to specify the directory where the files will be created. +Use `--basename` to specify a different output name than the default `tensor` + +____ + +## download_ngc_data + +Download and unzip datasets from NGC. This can optionally run a script to convert video files to GXF tensor files compatible with the `video_stream_replayer` operator. + +## Prerequisites + +- `wget` or `curl` to download datasets using the format `https://api.ngc.nvidia.com/v2/resources/nvidia/[team]//versions//zip` +- [NGC CLI](https://ngc.nvidia.com/setup/installers/cli) to download datasets using the `/[team]/:` format (useful if the above fails or for private registries) -# tensor.gxf_entities and tensor.gxf_index will be created ('tensor' is the default basename) -ls tensor* -# tensor.gxf_entities tensor.gxf_index +## Usage + +The example below will download and unzip the RacerX video from NGC: + +```sh +./scripts/download_ngc_data --url https://api.ngc.nvidia.com/v2/resources/nvidia/clara-holoscan/holoscan_racerx_video/versions/20231009/zip ``` +Use `--help` for more options such as output dir/name or conversion to GXF tensor files. + +____ + ## generate_extension_uuids.py Provides a set of UUIDs to be used by `GXF_EXT_FACTORY_SET_INFO` and `GXF_EXT_FACTORY_ADD` to declare a new GXF extension. ``` sh -python scripts/generate_extension_uuids.py +python3 scripts/generate_extension_uuids.py ``` +____ + ## graph_surgeon.py + When converting a model from PyTorch to ONNX, it is likely that the input of the model is in the form NCHW (batch, channels, height, width), and needs to be converted to NHWC (batch, height, width, channels). This script performs the conversion and generates a modified model. Note that this script modifies the names of the output by appending `_old` to the input and output. @@ -85,3 +129,27 @@ Note that this script modifies the names of the output by appending `_old` to th ```bash python3 scripts/graph_surgeon.py input_model.onnx output_model.onnx ``` + +____ + +## gxf_entity_codec.py + +Utility classes used by `convert_gxf_entities_to_images.py`, `convert_gxf_entities_to_video.py` and `convert_video_to_gxf_entities.py`. + +____ + +## video_validation.py + +This script converts GXF tensor files to frame images to compare each frame with a set of baselines. The difference between them is computed using SSD (Sum of Square difference) for each pixel and an average is reported for a frame. + +### Prerequisites + +```sh +pip install numpy~=1.21 pillow +``` + +This script depends on `convert_gxf_entity_to_images.py` which is located in the same folder. + +### Usage + +See `python3 ./scripts/video_validation.py --help` diff --git a/scripts/convert_gxf_entities_to_images.py b/scripts/convert_gxf_entities_to_images.py index 7032681d..b850ee29 100644 --- a/scripts/convert_gxf_entities_to_images.py +++ b/scripts/convert_gxf_entities_to_images.py @@ -1,18 +1,19 @@ +#!/usr/bin/env python3 """ - SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - SPDX-License-Identifier: Apache-2.0 +SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +SPDX-License-Identifier: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. """ # noqa: E501 import argparse @@ -49,8 +50,8 @@ def convert_gxf_entity_to_images(entity_dir, entity_basename, output_dir, output def main(): parser = argparse.ArgumentParser( description=( - "Command line utility for reading raw video frames in GXF Tensors for " - "stream recording." + "Command line utility for exporting raw video frames from GXF Tensors files " + "as PNG files." ) ) parser.add_argument("--basename", default="tensor", help="Basename for gxf entities to read") diff --git a/scripts/convert_gxf_entities_to_video.py b/scripts/convert_gxf_entities_to_video.py index f5808e0b..e7701e81 100644 --- a/scripts/convert_gxf_entities_to_video.py +++ b/scripts/convert_gxf_entities_to_video.py @@ -1,18 +1,19 @@ +#!/usr/bin/env python3 """ - SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - SPDX-License-Identifier: Apache-2.0 +SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +SPDX-License-Identifier: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. """ # noqa: E501 import argparse diff --git a/scripts/convert_video_to_gxf_entities.py b/scripts/convert_video_to_gxf_entities.py index ac439308..aebc8a5b 100644 --- a/scripts/convert_video_to_gxf_entities.py +++ b/scripts/convert_video_to_gxf_entities.py @@ -1,18 +1,19 @@ +#!/usr/bin/env python3 """ - SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - SPDX-License-Identifier: Apache-2.0 +SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +SPDX-License-Identifier: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. """ # noqa: E501 import argparse diff --git a/scripts/download_example_data b/scripts/download_example_data index ceeff815..ae64c1f3 100644 --- a/scripts/download_example_data +++ b/scripts/download_example_data @@ -25,19 +25,8 @@ mkdir ${DATA_DIR} racerx_version="20231009" racerx_md5="86cd7e5477bb9eaa0cfc0547912d77b2" -${SCRIPT_DIR}/../lib/cmake/holoscan/download_ngc_data \ +${SCRIPT_DIR}/../bin/download_ngc_data \ --url https://api.ngc.nvidia.com/v2/resources/nvidia/clara-holoscan/holoscan_racerx_video/versions/${racerx_version}/zip \ --download_dir ${DATA_DIR} \ --download_name racerx \ --md5 ${racerx_md5} - - -# Download the endoscopy sample data -endoscopy_version="20230128" -endoscopy_md5="9732a54944589f7ca4f1337e8adf0838" - -${SCRIPT_DIR}/../lib/cmake/holoscan/download_ngc_data \ - --url https://api.ngc.nvidia.com/v2/resources/nvidia/clara-holoscan/holoscan_endoscopy_sample_data/versions/${endoscopy_version}/zip \ - --download_dir ${DATA_DIR} \ - --download_name endoscopy \ - --md5 ${endoscopy_md5} diff --git a/scripts/generate_extension_uuids.py b/scripts/generate_extension_uuids.py index 8571e708..4defa314 100644 --- a/scripts/generate_extension_uuids.py +++ b/scripts/generate_extension_uuids.py @@ -1,18 +1,19 @@ +#!/usr/bin/env python3 """ - SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - SPDX-License-Identifier: Apache-2.0 +SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +SPDX-License-Identifier: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. """ # noqa: E501 from uuid import uuid4 diff --git a/scripts/graph_surgeon.py b/scripts/graph_surgeon.py index 298cc83d..3e419786 100644 --- a/scripts/graph_surgeon.py +++ b/scripts/graph_surgeon.py @@ -1,18 +1,19 @@ +#!/usr/bin/env python3 """ - SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - SPDX-License-Identifier: Apache-2.0 +SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +SPDX-License-Identifier: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. """ # noqa: E501 import sys diff --git a/scripts/gxf_entity_codec.py b/scripts/gxf_entity_codec.py index 7da92b38..ebb3ba34 100644 --- a/scripts/gxf_entity_codec.py +++ b/scripts/gxf_entity_codec.py @@ -1,18 +1,19 @@ +#!/usr/bin/env python3 """ - SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - SPDX-License-Identifier: Apache-2.0 +SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +SPDX-License-Identifier: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. """ # noqa: E501 import os diff --git a/scripts/video_validation.py b/scripts/video_validation.py index a4fef1e9..e714ea64 100755 --- a/scripts/video_validation.py +++ b/scripts/video_validation.py @@ -1,18 +1,19 @@ +#!/usr/bin/env python3 """ - SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - SPDX-License-Identifier: Apache-2.0 +SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +SPDX-License-Identifier: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. """ # noqa: E501 import argparse diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 73f9bf05..1ba73e5b 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -56,6 +56,22 @@ function(add_holoscan_operator operator) target_link_libraries(${target} PRIVATE holoscan_security_flags) endfunction() +# ############################################################################## +# # Add library holoscan::spdlog_logger +# ############################################################################## +add_holoscan_library(spdlog_logger common/logger/spdlog_logger.cpp) +target_link_libraries(spdlog_logger + PUBLIC + fmt::fmt-header-only + GXF::logger + PRIVATE + spdlog::spdlog_header_only +) +target_compile_definitions(spdlog_logger + INTERFACE + FMT_HEADER_ONLY=1 +) + # ############################################################################## # # Add library holoscan::logger # ############################################################################## @@ -63,8 +79,7 @@ add_holoscan_library(logger logger/logger.cpp) target_link_libraries(logger PUBLIC fmt::fmt-header-only - PRIVATE - spdlog::spdlog_header_only + spdlog_logger ) target_compile_definitions(logger INTERFACE @@ -123,6 +138,7 @@ add_holoscan_library(core core/fragment_scheduler.cpp core/graphs/flow_graph.cpp core/gxf/entity.cpp + core/gxf/gxf_component.cpp core/gxf/gxf_condition.cpp core/gxf/gxf_execution_context.cpp core/gxf/gxf_extension_manager.cpp @@ -131,7 +147,7 @@ add_holoscan_library(core core/gxf/gxf_operator.cpp core/gxf/gxf_resource.cpp core/gxf/gxf_scheduler.cpp - core/gxf/gxf_tensor.cpp + core/gxf/gxf_utils.cpp core/gxf/gxf_wrapper.cpp core/io_spec.cpp core/messagelabel.cpp @@ -154,6 +170,7 @@ add_holoscan_library(core core/resources/gxf/receiver.cpp core/resources/gxf/serialization_buffer.cpp core/resources/gxf/std_component_serializer.cpp + core/resources/gxf/std_entity_serializer.cpp core/resources/gxf/transmitter.cpp core/resources/gxf/ucx_component_serializer.cpp core/resources/gxf/ucx_entity_serializer.cpp @@ -161,9 +178,9 @@ add_holoscan_library(core core/resources/gxf/ucx_receiver.cpp core/resources/gxf/ucx_serialization_buffer.cpp core/resources/gxf/ucx_transmitter.cpp - core/resources/gxf/video_stream_serializer.cpp core/scheduler.cpp core/schedulers/greedy_fragment_allocation.cpp + core/schedulers/gxf/event_based_scheduler.cpp core/schedulers/gxf/greedy_scheduler.cpp core/schedulers/gxf/multithread_scheduler.cpp core/services/app_driver/client.cpp @@ -181,6 +198,7 @@ add_holoscan_library(core core/system/network_utils.cpp core/system/system_resource_manager.cpp core/system/topology.cpp + utils/cuda_stream_handler.cpp # keep here instead of separate lib for backwards compatibility with 1.0 ${CORE_GRPC_SRCS} ) @@ -190,10 +208,12 @@ target_link_libraries(core holoscan::logger CUDA::cudart fmt::fmt-header-only + GXF::app GXF::core GXF::cuda GXF::serialization # for nvidia::gxf::Endpoint GXF::std + GXF::ucx yaml-cpp PRIVATE hwloc @@ -207,6 +227,7 @@ target_include_directories(core $ $ $ + $ $ ) diff --git a/src/common/logger/spdlog_logger.cpp b/src/common/logger/spdlog_logger.cpp new file mode 100644 index 00000000..9f34526d --- /dev/null +++ b/src/common/logger/spdlog_logger.cpp @@ -0,0 +1,185 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common/logger/spdlog_logger.hpp" + +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace spdlog { + +namespace sinks { + +template +class ansicolor_file_sink : public ansicolor_sink { + public: + explicit ansicolor_file_sink(FILE* file, color_mode mode = color_mode::automatic) + : ansicolor_sink(file, mode) {} +}; + +} // namespace sinks + +static inline std::shared_ptr create_file_logger(std::string name, FILE* file) { + // Do not register to spdlog registry + spdlog::details::registry::instance().set_automatic_registration(false); + + return spdlog::synchronous_factory::template create< + spdlog::sinks::ansicolor_file_sink>( + name, file, spdlog::color_mode::automatic); +} + +} // namespace spdlog + +namespace nvidia { + +/// Namespace for the NVIDIA logger functionality. +namespace logger { + +static void ensure_log_level(int level) { + if (level < 0 || level > spdlog::level::n_levels - 2) { + std::fprintf(stderr, "SpdlogLogger: Invalid log level %d\n", level); + std::abort(); + } +} + +/// Default spdlog Logger implementation. +class DefaultSpdlogLogger : public ILogger { + public: + DefaultSpdlogLogger(std::string& name, std::string& pattern, int& level, + std::vector& sinks); + void log(const char* file, int line, const char* name, int level, const char* message, + void* arg = nullptr) override; + + void pattern(const char* pattern) override; + const char* pattern() const override; + + void level(int level) override; + int level() const override; + + void redirect(int level, void* output) override; + void* redirect(int level) const override; + + protected: + std::string& name_; ///< logger name + std::string& pattern_; ///< log pattern + int& level_; ///< log level + std::vector& sinks_; ///< log sinks + std::shared_ptr loggers_[6]; ///< spdlog loggers +}; + +std::string& SpdlogLogger::pattern_string() { + return pattern_; +} + +SpdlogLogger::SpdlogLogger(const char* name, const std::shared_ptr& logger, + const LogFunction& func) + : Logger(logger, func), name_(name) { + if (logger_ == nullptr && func_ == nullptr) { + logger_ = std::make_shared(name_, pattern_, level_, sinks_); + } + + // Set default sinks (stderr) + for (int level = spdlog::level::n_levels - 2; level >= 0; --level) { redirect(level, stderr); } + + // Set default log level and pattern + level(spdlog::level::info); + pattern("[%^%l%$] [%s:%#] %v"); +} + +DefaultSpdlogLogger::DefaultSpdlogLogger(std::string& name, std::string& pattern, int& level, + std::vector& sinks) + : name_(name), pattern_(pattern), level_(level), sinks_(sinks) {} + +void DefaultSpdlogLogger::log(const char* file, int line, const char* name, int level, + const char* log, void*) { + auto logger = std::static_pointer_cast(loggers_[level]); + if (logger) { + if (file != nullptr) { + logger->log( + spdlog::source_loc{file, line, name}, static_cast(level), log); + } else { + logger->log(static_cast(level), log); + } + } +} + +void DefaultSpdlogLogger::pattern(const char* pattern) { + std::shared_ptr old_logger; + for (int level = spdlog::level::n_levels - 2; level >= 0; --level) { + auto logger = std::static_pointer_cast(loggers_[level]); + if (old_logger != logger) { + old_logger = logger; + logger->set_pattern(pattern); + } + } +} + +const char* DefaultSpdlogLogger::pattern() const { + return pattern_.c_str(); +} + +void DefaultSpdlogLogger::level(int level) { + std::shared_ptr old_logger; + for (int lv = spdlog::level::n_levels - 2; lv >= 0; --lv) { + auto logger = std::static_pointer_cast(loggers_[lv]); + if (old_logger != logger) { + old_logger = logger; + logger->set_level(static_cast(level)); + } + } +} + +int DefaultSpdlogLogger::level() const { + return level_; +} + +void DefaultSpdlogLogger::redirect(int level, void* output) { + ensure_log_level(level); + + bool logger_exists = false; + std::shared_ptr logger; + for (int lv = spdlog::level::n_levels - 2; lv >= 0; --lv) { + if (sinks_[lv] == output) { + logger_exists = true; + logger = std::static_pointer_cast(loggers_[lv]); + break; + } + } + + if (!logger_exists && output != nullptr) { + logger = spdlog::create_file_logger(name_, reinterpret_cast(output)); + // Set pattern and level for the new logger + logger->set_pattern(pattern_); + logger->set_level(static_cast(level_)); + } + loggers_[level] = logger; +} + +void* DefaultSpdlogLogger::redirect(int level) const { + return sinks_[level]; +} + +} // namespace logger + +} // namespace nvidia diff --git a/src/core/app_driver.cpp b/src/core/app_driver.cpp index 81cb3b0b..c2f8495d 100644 --- a/src/core/app_driver.cpp +++ b/src/core/app_driver.cpp @@ -41,6 +41,7 @@ #include "holoscan/core/gxf/gxf_resource.hpp" #include "holoscan/core/network_contexts/gxf/ucx_context.hpp" #include "holoscan/core/schedulers/greedy_fragment_allocation.hpp" +#include "holoscan/core/schedulers/gxf/event_based_scheduler.hpp" #include "holoscan/core/schedulers/gxf/greedy_scheduler.hpp" #include "holoscan/core/schedulers/gxf/multithread_scheduler.hpp" #include "holoscan/core/services/app_driver/server.hpp" @@ -48,7 +49,7 @@ #include "holoscan/core/services/common/network_constants.hpp" #include "holoscan/core/signal_handler.hpp" #include "holoscan/core/system/network_utils.hpp" -#include +#include "holoscan/core/system/system_resource_manager.hpp" #include "services/app_worker/client.hpp" #include "holoscan/logger/logger.hpp" @@ -191,7 +192,7 @@ void AppDriver::run() { if (is_local_) { auto target_fragments = fragment_graph.get_nodes(); auto future = launch_fragments_async(target_fragments); - future.wait(); + future.get(); return; } else { if (need_worker_) { @@ -294,17 +295,16 @@ std::future AppDriver::run_async() { // If there are no separate fragments to run, we run the entire graph. if (fragment_graph.is_empty()) { if (driver_server_) { - auto executor_future = app_->executor().run_async(app_->graph()); auto future = - std::async(std::launch::async, - [future = std::move(executor_future), &driver_server = driver_server_]() { - future.wait(); - if (driver_server) { - driver_server->stop(); - driver_server->wait(); - driver_server = nullptr; - } - }); + std::async(std::launch::async, [app = app_, &driver_server = driver_server_]() { + auto executor_future = app->executor().run_async(app->graph()); + executor_future.get(); + if (driver_server) { + driver_server->stop(); + driver_server->wait(); + driver_server = nullptr; + } + }); return future; } // Application's graph is already composed in check_configuration() so we can run it directly. @@ -1261,28 +1261,35 @@ std::future AppDriver::launch_fragments_async( futures.push_back(std::make_pair(fragment, fragment->executor().run_async(fragment->graph()))); } - auto future = std::async(std::launch::async, - [futures = std::move(futures), &driver_server = driver_server_]() { - bool any_fragment_finished = false; - while (!any_fragment_finished) { - // Check every 500 ms - std::this_thread::sleep_for(std::chrono::milliseconds(500)); - for (auto& [fragment, future_obj] : futures) { - const auto status = future_obj.wait_for(std::chrono::seconds(0)); - if (status == std::future_status::ready) { - any_fragment_finished = true; - break; - } - } - } - - // Stop driver server - if (driver_server) { - driver_server->stop(); - driver_server->wait(); - driver_server = nullptr; - } - }); + auto future = + std::async(std::launch::async, + [futures = std::move(futures), app = app_, &driver_server = driver_server_]() { + // Wait until all fragments have finished + for (auto& [fragment, future_obj] : futures) { future_obj.wait(); } + + // Stop driver server + if (driver_server) { + driver_server->stop(); + driver_server->wait(); + driver_server = nullptr; + } + + // Set the exception if any of the fragments raises an exception + for (auto& [fragment, future_obj] : futures) { + try { + // FIXME: how can I call get() without const_cast? + const_cast&>(future_obj).get(); + } catch (const std::exception&) { + // Store the current exception + app->executor().exception(std::current_exception()); + break; + } + } + + // Rethrow the exception if any + auto& stored_exception = app->executor().exception(); + if (stored_exception) { std::rethrow_exception(stored_exception); } + }); return future; } diff --git a/src/core/application.cpp b/src/core/application.cpp index bee5696b..cbfd16bf 100644 --- a/src/core/application.cpp +++ b/src/core/application.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -30,6 +30,7 @@ #include "holoscan/core/executor.hpp" #include "holoscan/core/graphs/flow_graph.hpp" #include "holoscan/core/operator.hpp" +#include "holoscan/core/schedulers/gxf/event_based_scheduler.hpp" #include "holoscan/core/schedulers/gxf/greedy_scheduler.hpp" #include "holoscan/core/schedulers/gxf/multithread_scheduler.hpp" @@ -104,7 +105,12 @@ namespace holoscan { Application::Application(const std::vector& argv) : Fragment(), argv_(argv) { // Set the log level from the environment variable if it exists. // Or, set the default log level to INFO if it hasn't been set by the user. - if (!Logger::log_level_set_by_user) { holoscan::set_log_level(LogLevel::INFO); } + if (!Logger::log_level_set_by_user) { + holoscan::set_log_level(LogLevel::INFO); + } else { + // Allow log level to be reset from the environment variable if overridden. + holoscan::set_log_level(holoscan::log_level()); + } // Set the log format from the environment variable if it exists. // Or, set the default log format depending on the log level if it hasn't been set by the user. holoscan::set_log_pattern(); @@ -242,8 +248,11 @@ expected Application::get_distributed_app_scheduler_en if (env_value != nullptr && env_value[0] != '\0') { if (std::strcmp(env_value, "greedy") == 0) { return SchedulerType::kGreedy; - } else if (std::strcmp(env_value, "multithread") == 0) { + } else if (std::strcmp(env_value, "multithread") == 0 || + std::strcmp(env_value, "multi_thread") == 0) { return SchedulerType::kMultiThread; + } else if (std::strcmp(env_value, "event_based") == 0) { + return SchedulerType::kEventBased; } else { HOLOSCAN_LOG_ERROR("Invalid value for HOLOSCAN_DISTRIBUTED_APP_SCHEDULER: {}", env_value); return make_unexpected(ErrorCode::kInvalidArgument); @@ -359,6 +368,8 @@ void Application::set_scheduler_for_fragments(std::vector& tar // Check if holoscan::MultiThreadScheduler is already set to the fragment. // If it is, then we should use the default scheduler. // Otherwise, we should set new multi-thread scheduler. + + // TODO: consider use of event-based scheduler? auto multi_thread_scheduler = std::dynamic_pointer_cast(scheduler); if (!multi_thread_scheduler) { scheduler_setting = SchedulerType::kMultiThread; } @@ -396,6 +407,21 @@ void Application::set_scheduler_for_fragments(std::vector& tar scheduler->add_arg(holoscan::Arg("check_recession_period_ms", check_recession_period_ms)); scheduler->add_arg(holoscan::Arg("worker_thread_number", worker_thread_number)); } break; + case SchedulerType::kEventBased: { + scheduler = + fragment->make_scheduler("event-based-scheduler"); + unsigned int num_processors = std::thread::hardware_concurrency(); + // TODO: check number of threads setting needed for event-based scheduler + // Currently, we use the number of operators in the fragment as the number of worker threads + int64_t worker_thread_number = + std::min(fragment->graph().get_nodes().size(), static_cast(num_processors)); + scheduler->add_arg(holoscan::Arg("stop_on_deadlock", stop_on_deadlock)); + scheduler->add_arg(holoscan::Arg("stop_on_deadlock_timeout", stop_on_deadlock_timeout)); + if (max_duration_ms >= 0) { + scheduler->add_arg(holoscan::Arg("max_duration_ms", max_duration_ms)); + } + scheduler->add_arg(holoscan::Arg("worker_thread_number", worker_thread_number)); + } break; } // Override arguments from environment variables diff --git a/src/core/component.cpp b/src/core/component.cpp index 33a85ad3..23fd7079 100644 --- a/src/core/component.cpp +++ b/src/core/component.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,13 +16,16 @@ */ #include "holoscan/core/component.hpp" +#include #include +#include +#include #include "holoscan/core/fragment.hpp" namespace holoscan { -YAML::Node Component::to_yaml_node() const { +YAML::Node ComponentBase::to_yaml_node() const { YAML::Node node; node["id"] = id_; node["name"] = name_; @@ -32,16 +35,102 @@ YAML::Node Component::to_yaml_node() const { node["fragment"] = YAML::Null; } node["args"] = YAML::Node(YAML::NodeType::Sequence); - for (const Arg& arg : args_) { - node["args"].push_back(arg.to_yaml_node()); - } + for (const Arg& arg : args_) { node["args"].push_back(arg.to_yaml_node()); } return node; } -std::string Component::description() const { +std::string ComponentBase::description() const { YAML::Emitter emitter; emitter << to_yaml_node(); return emitter.c_str(); } +void ComponentBase::update_params_from_args( + std::unordered_map& params) { + // Set arguments + for (auto& arg : args_) { + // Find if arg.name() is in spec_->params() + if (params.find(arg.name()) == params.end()) { + HOLOSCAN_LOG_WARN("Arg '{}' not found in spec_.params()", arg.name()); + continue; + } + + // Set arg.value() to spec_->params()[arg.name()] + auto& param_wrap = params[arg.name()]; + + HOLOSCAN_LOG_TRACE("GXFOperator '{}':: setting argument '{}'", name_, arg.name()); + + ArgumentSetter::set_param(param_wrap, arg); + } +} + +void Component::update_params_from_args() { + update_params_from_args(spec_->params()); +} + +void ComponentBase::reset_graph_entities() { + HOLOSCAN_LOG_TRACE("Component '{}'::reset_graph_entities", name_); + // Note: Should NOT also be necessary to reset graph entities in spec_->params() as the + // params are filled in via args. + for (auto& arg : args_) { + auto arg_type = arg.arg_type(); + auto element_type = arg_type.element_type(); + if ((element_type != ArgElementType::kResource) && + (element_type != ArgElementType::kCondition)) { + continue; + } + auto container_type = arg_type.container_type(); + if ((container_type != ArgContainerType::kNative) && + (container_type != ArgContainerType::kVector)) { + HOLOSCAN_LOG_ERROR( + "Error setting GXF entity for argument '{}': Operator currently only supports scalar and " + "vector containers for arguments of Condition or Resource type.", + arg.name()); + continue; + } + if (element_type == ArgElementType::kCondition) { + switch (container_type) { + case ArgContainerType::kNative: { + auto condition = std::any_cast>(arg.value()); + auto gxf_condition = std::dynamic_pointer_cast(condition); + if (gxf_condition) { gxf_condition->reset_gxf_graph_entity(); } + } break; + case ArgContainerType::kVector: { + auto conditions = std::any_cast>>(arg.value()); + for (auto& condition : conditions) { + auto gxf_condition = std::dynamic_pointer_cast(condition); + if (gxf_condition) { gxf_condition->reset_gxf_graph_entity(); } + } + } break; + default: + break; + } + } else if (element_type == ArgElementType::kResource) { + // Only GXF resources will use the GraphEntity + switch (container_type) { + case ArgContainerType::kNative: { + auto resource = std::any_cast>(arg.value()); + auto gxf_resource = std::dynamic_pointer_cast(resource); + if (gxf_resource) { + gxf_resource->reset_gxf_graph_entity(); + continue; + } + } break; + case ArgContainerType::kVector: { + auto resources = std::any_cast>>(arg.value()); + for (auto& resource : resources) { + auto gxf_resource = std::dynamic_pointer_cast(resource); + if (gxf_resource) { + gxf_resource->reset_gxf_graph_entity(); + continue; + } + } + } break; + default: + break; + } + } + } +} + } // namespace holoscan diff --git a/src/core/conditions/gxf/asynchronous.cpp b/src/core/conditions/gxf/asynchronous.cpp index 27b4acdc..61d65802 100644 --- a/src/core/conditions/gxf/asynchronous.cpp +++ b/src/core/conditions/gxf/asynchronous.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -33,25 +33,23 @@ AsynchronousCondition::AsynchronousCondition(const std::string& name, } } +nvidia::gxf::AsynchronousSchedulingTerm* AsynchronousCondition::get() const { + return static_cast(gxf_cptr_); +} + void AsynchronousCondition::setup(ComponentSpec& spec) { (void)spec; // no parameters to set } void AsynchronousCondition::event_state(AsynchronousEventState state) { - if (gxf_cptr_) { - nvidia::gxf::AsynchronousSchedulingTerm* asynchronous_scheduling_term = - static_cast(gxf_cptr_); - asynchronous_scheduling_term->setEventState(state); - } + auto asynchronous_scheduling_term = get(); + if (asynchronous_scheduling_term) { asynchronous_scheduling_term->setEventState(state); } event_state_ = state; } AsynchronousEventState AsynchronousCondition::event_state() const { - if (gxf_cptr_) { - nvidia::gxf::AsynchronousSchedulingTerm* asynchronous_scheduling_term = - static_cast(gxf_cptr_); - return asynchronous_scheduling_term->getEventState(); - } + auto asynchronous_scheduling_term = get(); + if (asynchronous_scheduling_term) { return asynchronous_scheduling_term->getEventState(); } return event_state_; } diff --git a/src/core/conditions/gxf/boolean.cpp b/src/core/conditions/gxf/boolean.cpp index 14d2ba47..343ab2de 100644 --- a/src/core/conditions/gxf/boolean.cpp +++ b/src/core/conditions/gxf/boolean.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -31,6 +31,17 @@ BooleanCondition::BooleanCondition(const std::string& name, enable_tick_ = term->checkTickEnabled(); } +nvidia::gxf::BooleanSchedulingTerm* BooleanCondition::get() const { + // Could use Component APIs, but keep gxf_cptr_ for now to handle any case + // where gxf_graph_entity_ is null. + + // return gxf_component_.is_null() + // ? nullptr + // : dynamic_cast(gxf_component_.get()); + + return static_cast(gxf_cptr_); +} + void BooleanCondition::setup(ComponentSpec& spec) { spec.param(enable_tick_, "enable_tick", @@ -40,30 +51,20 @@ void BooleanCondition::setup(ComponentSpec& spec) { } void BooleanCondition::enable_tick() { - if (gxf_cptr_) { - nvidia::gxf::BooleanSchedulingTerm* boolean_condition = - static_cast(gxf_cptr_); - boolean_condition->enable_tick(); - } + auto boolean_condition = get(); + if (boolean_condition) { boolean_condition->enable_tick(); } enable_tick_ = true; } void BooleanCondition::disable_tick() { - if (gxf_cptr_) { - nvidia::gxf::BooleanSchedulingTerm* boolean_condition = - static_cast(gxf_cptr_); - boolean_condition->disable_tick(); - } + auto boolean_condition = get(); + if (boolean_condition) { boolean_condition->disable_tick(); } enable_tick_ = false; } bool BooleanCondition::check_tick_enabled() { - if (gxf_cptr_) { - nvidia::gxf::BooleanSchedulingTerm* boolean_condition = - static_cast(gxf_cptr_); - enable_tick_ = boolean_condition->checkTickEnabled(); - } - return enable_tick_.get(); + auto boolean_condition = get(); + return boolean_condition ? boolean_condition->checkTickEnabled() : enable_tick_.get(); } } // namespace holoscan diff --git a/src/core/conditions/gxf/message_available.cpp b/src/core/conditions/gxf/message_available.cpp index 7a8af0ae..c1ae26fd 100644 --- a/src/core/conditions/gxf/message_available.cpp +++ b/src/core/conditions/gxf/message_available.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -43,4 +43,20 @@ void MessageAvailableCondition::setup(ComponentSpec& spec) { ParameterFlag::kOptional); } +nvidia::gxf::MessageAvailableSchedulingTerm* MessageAvailableCondition::get() const { + return static_cast(gxf_cptr_); +} + +void MessageAvailableCondition::min_size(uint64_t min_size) { + auto cond = get(); + if (cond) { cond->setMinSize(min_size); } + min_size_ = min_size; +} + +void MessageAvailableCondition::front_stage_max_size(size_t front_stage_max_size) { + auto cond = get(); + if (cond) { cond->setFrontStageMaxSize(front_stage_max_size); } + front_stage_max_size_ = front_stage_max_size; +} + } // namespace holoscan diff --git a/src/core/conditions/gxf/periodic.cpp b/src/core/conditions/gxf/periodic.cpp index 253f4ba1..0ebafdfc 100644 --- a/src/core/conditions/gxf/periodic.cpp +++ b/src/core/conditions/gxf/periodic.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,6 +24,10 @@ namespace holoscan { +nvidia::gxf::PeriodicSchedulingTerm* PeriodicCondition::get() const { + return static_cast(gxf_cptr_); +} + PeriodicCondition::PeriodicCondition(int64_t recess_period_ns) { recess_period_ = std::to_string(recess_period_ns); recess_period_ns_ = recess_period_ns; @@ -52,19 +56,16 @@ void PeriodicCondition::setup(ComponentSpec& spec) { void PeriodicCondition::recess_period(int64_t recess_period_ns) { std::string recess_period = std::to_string(recess_period_ns); - if (gxf_cid_) { - HOLOSCAN_GXF_CALL_FATAL( - GxfParameterSetStr(gxf_context_, gxf_cid_, "recess_period", recess_period.c_str())); - } + auto periodic = get(); + if (periodic) { periodic->setParameter("recess_period", recess_period); } recess_period_ = recess_period; recess_period_ns_ = recess_period_ns; } int64_t PeriodicCondition::recess_period_ns() { - if (gxf_cptr_) { - nvidia::gxf::PeriodicSchedulingTerm* periodic_scheduling_term = - static_cast(gxf_cptr_); - auto recess_period_ns = periodic_scheduling_term->recess_period_ns(); + auto periodic = get(); + if (periodic) { + auto recess_period_ns = periodic->recess_period_ns(); if (recess_period_ns != recess_period_ns_) { recess_period_ns_ = recess_period_ns; recess_period_ = std::to_string(recess_period_ns_); @@ -75,17 +76,16 @@ int64_t PeriodicCondition::recess_period_ns() { int64_t PeriodicCondition::last_run_timestamp() { int64_t last_run_timestamp = 0; - if (gxf_cptr_) { - nvidia::gxf::PeriodicSchedulingTerm* periodic_scheduling_term = - static_cast(gxf_cptr_); - auto result = periodic_scheduling_term->last_run_timestamp(); + auto periodic = get(); + if (periodic) { + auto result = periodic->last_run_timestamp(); if (result) { last_run_timestamp = result.value(); } else { HOLOSCAN_LOG_ERROR("PeriodicCondition: Unable to get the result of 'last_run_timestamp()'"); } } else { - HOLOSCAN_LOG_ERROR("PeriodicCondition: gxf_cptr_ is null"); + HOLOSCAN_LOG_ERROR("PeriodicCondition: GXF component pointer is null"); } return last_run_timestamp; } diff --git a/src/core/dataflow_tracker.cpp b/src/core/dataflow_tracker.cpp index 51569c09..6840e4bd 100644 --- a/src/core/dataflow_tracker.cpp +++ b/src/core/dataflow_tracker.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -40,8 +40,7 @@ DataFlowTracker::~DataFlowTracker() { } void DataFlowTracker::end_logging() { - if (!logger_ofstream_.is_open()) - return; + if (!logger_ofstream_.is_open()) return; // Write out the remaining messages from the log buffer and close ofstream for (auto it : buffered_messages_) { logger_ofstream_ << it << "\n"; } diff --git a/src/core/domain/tensor.cpp b/src/core/domain/tensor.cpp index ef9762cf..b12bef18 100644 --- a/src/core/domain/tensor.cpp +++ b/src/core/domain/tensor.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,39 +15,19 @@ * limitations under the License. */ -#include - #include #include #include #include +#include "gxf/std/dlpack_utils.hpp" #include "holoscan/core/common.hpp" #include "holoscan/core/domain/tensor.hpp" -#define CUDA_TRY(stmt) \ - { \ - cuda_status = stmt; \ - if (cudaSuccess != cuda_status) { \ - HOLOSCAN_LOG_ERROR("Runtime call {} in line {} of file {} failed with '{}' ({})", \ - #stmt, \ - __LINE__, \ - __FILE__, \ - cudaGetErrorString(cuda_status), \ - cuda_status); \ - } \ - } - namespace holoscan { -DLManagedMemoryBuffer::DLManagedMemoryBuffer(DLManagedTensor* self) : self_(self) {} - -DLManagedMemoryBuffer::~DLManagedMemoryBuffer() { - if (self_ && self_->deleter != nullptr) { self_->deleter(self_); } -} - Tensor::Tensor(DLManagedTensor* dl_managed_tensor_ptr) { - dl_ctx_ = std::make_shared(); + dl_ctx_ = std::make_shared(); dl_ctx_->memory_ref = std::make_shared(dl_managed_tensor_ptr); auto& dl_managed_tensor = dl_ctx_->tensor; @@ -55,14 +35,14 @@ Tensor::Tensor(DLManagedTensor* dl_managed_tensor_ptr) { } DLManagedTensor* Tensor::to_dlpack() { - auto dl_managed_tensor_ctx = new DLManagedTensorCtx; + auto dl_managed_tensor_ctx = new DLManagedTensorContext; auto& dl_managed_tensor = dl_managed_tensor_ctx->tensor; dl_managed_tensor_ctx->memory_ref = dl_ctx_->memory_ref; dl_managed_tensor.manager_ctx = dl_managed_tensor_ctx; dl_managed_tensor.deleter = [](DLManagedTensor* self) { - auto dl_managed_tensor_ctx = static_cast(self->manager_ctx); + auto dl_managed_tensor_ctx = static_cast(self->manager_ctx); dl_managed_tensor_ctx->memory_ref.reset(); delete dl_managed_tensor_ctx; }; @@ -99,31 +79,13 @@ int64_t Tensor::size() const { } DLDevice dldevice_from_pointer(void* ptr) { - cudaError_t cuda_status; - - DLDevice device{.device_type = kDLCUDA, .device_id = 0}; - - cudaPointerAttributes attributes; - CUDA_TRY(cudaPointerGetAttributes(&attributes, ptr)); - if (cuda_status != cudaSuccess) { - throw std::runtime_error(fmt::format("Unable to get pointer attributes from 0x{:x}", ptr)); + auto maybe_device = nvidia::gxf::DLDeviceFromPointer(ptr); + if (!maybe_device) { + throw std::runtime_error( + fmt::format("Failed to determine DLDevice based on pointer with error: {}", + GxfResultStr(maybe_device.error()))); } - - switch (attributes.type) { - case cudaMemoryTypeUnregistered: - device.device_type = kDLCPU; - break; - case cudaMemoryTypeHost: - device = {.device_type = kDLCUDAHost, .device_id = attributes.device}; - break; - case cudaMemoryTypeDevice: - device = {.device_type = kDLCUDA, .device_id = attributes.device}; - break; - case cudaMemoryTypeManaged: - device = {.device_type = kDLCUDAManaged, .device_id = attributes.device}; - break; - } - return device; + return maybe_device.value(); } void calc_strides(const DLTensor& tensor, std::vector& strides, bool to_num_elements) { @@ -144,83 +106,23 @@ void calc_strides(const DLTensor& tensor, std::vector& strides, bool to } DLDataType dldatatype_from_typestr(const std::string& typestr) { - uint8_t code; - uint8_t bits; - uint16_t lanes = 1; - if (typestr.substr(0, 1) == ">") { throw std::runtime_error("big endian types not supported"); } - std::string kind = typestr.substr(1, 1); - if (kind == "i") { - code = kDLInt; - } else if (kind == "u") { - code = kDLUInt; - } else if (kind == "f") { - code = kDLFloat; - } else if (kind == "c") { - code = kDLComplex; - } else { - throw std::logic_error(fmt::format("dtype.kind: {} is not supported!", kind)); + auto maybe_dtype = nvidia::gxf::DLDataTypeFromTypeString(typestr); + if (!maybe_dtype) { + throw std::runtime_error( + fmt::format("Failed to determine DLDataType from type string with error: {}", + GxfResultStr(maybe_dtype.error()))); } - bits = std::stoi(typestr.substr(2)) * 8; - DLDataType data_type{code, bits, lanes}; - return data_type; + return maybe_dtype.value(); } const char* numpy_dtype(const DLDataType dtype) { - // TODO: consider bfloat16: https://github.com/dmlc/dlpack/issues/45 - // TODO: consider other byte-order - uint8_t code = dtype.code; - uint8_t bits = dtype.bits; - switch (code) { - case kDLInt: - switch (bits) { - case 8: - return "|i1"; - case 16: - return " #include #include +#include #include #include #include @@ -35,6 +36,8 @@ #include #include "holoscan/core/application.hpp" +#include "holoscan/core/arg.hpp" +#include "holoscan/core/condition.hpp" #include "holoscan/core/conditions/gxf/downstream_affordable.hpp" #include "holoscan/core/conditions/gxf/message_available.hpp" #include "holoscan/core/config.hpp" @@ -49,12 +52,12 @@ #include "holoscan/core/gxf/gxf_operator.hpp" #include "holoscan/core/gxf/gxf_resource.hpp" #include "holoscan/core/gxf/gxf_scheduler.hpp" -#include "holoscan/core/gxf/gxf_tensor.hpp" #include "holoscan/core/gxf/gxf_utils.hpp" #include "holoscan/core/gxf/gxf_wrapper.hpp" #include "holoscan/core/message.hpp" #include "holoscan/core/messagelabel.hpp" #include "holoscan/core/operator.hpp" +#include "holoscan/core/resource.hpp" #include "holoscan/core/resources/gxf/annotated_double_buffer_receiver.hpp" #include "holoscan/core/resources/gxf/annotated_double_buffer_transmitter.hpp" #include "holoscan/core/resources/gxf/dfft_collector.hpp" @@ -64,14 +67,46 @@ #include "holoscan/core/services/common/virtual_operator.hpp" #include "holoscan/core/signal_handler.hpp" +#include "gxf/app/arg.hpp" #include "gxf/std/default_extension.hpp" - #include "gxf/std/extension_factory_helper.hpp" #include "gxf/std/monitor.hpp" #include "gxf/test/components/entity_monitor.hpp" namespace holoscan::gxf { +namespace { +std::pair get_capacity_and_policy( + nvidia::gxf::Handle component) { + uint64_t capacity = 1; + uint64_t policy = 2; + if (component.is_null()) { + HOLOSCAN_LOG_ERROR("Null component handle"); + return std::make_pair(capacity, policy); + } + auto maybe_capacity = component->getParameter("capacity"); + if (maybe_capacity) { + capacity = maybe_capacity.value(); + } else { + HOLOSCAN_LOG_ERROR("Failed to get capacity, using default value of {}", capacity); + } + auto maybe_policy = component->getParameter("policy"); + if (maybe_policy) { + policy = maybe_policy.value(); + } else { + HOLOSCAN_LOG_ERROR("Failed to get policy, using default value of {}", policy); + } + return std::make_pair(capacity, policy); +} + +bool has_ucx_connector(std::shared_ptr graph_entity) { + auto has_ucx_receiver = graph_entity->try_get("nvidia::gxf::UcxReceiver"); + auto has_ucx_transmitter = graph_entity->try_get("nvidia::gxf::UcxTransmitter"); + return has_ucx_receiver || has_ucx_transmitter; +} + +} // namespace + static const std::vector kDefaultGXFExtensions{ "libgxf_std.so", "libgxf_cuda.so", @@ -81,8 +116,7 @@ static const std::vector kDefaultGXFExtensions{ }; static const std::vector kDefaultHoloscanGXFExtensions{ - "libgxf_stream_playback.so", // keep for use of VideoStreamSerializer - "libgxf_ucx_holoscan.so", // serialize holoscan::gxf::GXFTensor and holoscan::Message + "libgxf_ucx_holoscan.so", // serialize holoscan::Message }; static nvidia::Severity s_gxf_log_level = nvidia::Severity::INFO; @@ -208,7 +242,9 @@ GXFExecutor::GXFExecutor(holoscan::Fragment* fragment, bool create_gxf_context) // GxfGetSharedContext(application->executor().context(), &shared_context)); // HOLOSCAN_GXF_CALL_FATAL(GxfContextCreate1(shared_context, &context_)); // } else { - GXF_LOG_INFO("Creating context"); + auto frag_name_display = fragment_->name(); + if (!frag_name_display.empty()) { frag_name_display = "[" + frag_name_display + "] "; } + HOLOSCAN_LOG_INFO("{}Creating context", frag_name_display); HOLOSCAN_GXF_CALL_FATAL(GxfContextCreate(&context_)); // } own_gxf_context_ = true; @@ -225,9 +261,19 @@ GXFExecutor::GXFExecutor(holoscan::Fragment* fragment, bool create_gxf_context) } GXFExecutor::~GXFExecutor() { + implicit_broadcast_entities_.clear(); + util_entity_.reset(); + gpu_device_entity_.reset(); + scheduler_entity_.reset(); + network_context_entity_.reset(); + connections_entity_.reset(); + // Deinitialize GXF context only if `own_gxf_context_` is true if (own_gxf_context_) { - GXF_LOG_INFO("Destroying context"); + auto frag_name_display = fragment_->name(); + if (!frag_name_display.empty()) { frag_name_display = "[" + frag_name_display + "] "; } + HOLOSCAN_LOG_INFO("{}Destroying context", frag_name_display); + // Unregister signal handlers if any SignalHandler::unregister_signal_handler(context_, SIGINT); SignalHandler::unregister_signal_handler(context_, SIGTERM); @@ -241,17 +287,32 @@ GXFExecutor::~GXFExecutor() { } } +void GXFExecutor::initialize_gxf_resources( + std::unordered_map>& resources, gxf_uid_t eid, + std::shared_ptr graph_entity) { + for (const auto& [name, resource] : resources) { + // Note: native resources are only supported on Operator, not for NetworkContext or Scheduler + auto gxf_resource = std::dynamic_pointer_cast(resource); + // Initialize GXF component if it is not already initialized. + if (gxf_resource->gxf_context() == nullptr) { + gxf_resource->fragment(fragment()); + if (graph_entity) { gxf_resource->gxf_graph_entity(graph_entity); } + gxf_resource->gxf_eid(eid); // set GXF entity id + gxf_resource->initialize(); + } else { + HOLOSCAN_LOG_ERROR("Resource '{}' is not a holoscan::gxf::GXFResource and will be ignored", + name); + } + } +} + void GXFExecutor::add_operator_to_entity_group(gxf_context_t context, gxf_uid_t entity_group_gid, std::shared_ptr op) { - gxf_uid_t op_eid = kNullUid; - if (op->operator_type() == Operator::OperatorType::kGXF) { - op_eid = std::dynamic_pointer_cast(op)->gxf_eid(); - } else { - // get the GXF entity ID corresponding to the native operator's GXF Codelet - const std::string op_entity_name = fmt::format("{}{}", entity_prefix_, op->name()); - HOLOSCAN_GXF_CALL_FATAL(GxfEntityFind(context, op_entity_name.c_str(), &op_eid)); + auto graph_entity = op->graph_entity(); + gxf_uid_t op_eid = graph_entity->eid(); + if (!graph_entity) { + HOLOSCAN_LOG_ERROR("null GraphEntity found during add_operator_to_entity_group"); } - HOLOSCAN_LOG_DEBUG("Adding operator eid '{}' to entity group '{}'", op_eid, entity_group_gid); HOLOSCAN_GXF_CALL_FATAL(GxfUpdateEntityGroup(context, entity_group_gid, op_eid)); } @@ -262,25 +323,16 @@ void GXFExecutor::run(OperatorGraph& graph) { return; } - if (!run_gxf_graph()) { - HOLOSCAN_LOG_ERROR("Failed to run GXF graph"); - return; - } + // Note that run_gxf_graph() can raise an exception. + run_gxf_graph(); } std::future GXFExecutor::run_async(OperatorGraph& graph) { if (!is_gxf_graph_initialized_) { initialize_gxf_graph(graph); } return std::async(std::launch::async, [this, &graph]() { - try { - this->run_gxf_graph(); - } catch (const RuntimeError& e) { - // Do not propagate the exception to the caller because the failure is already logged - // by the GXF and the failure on GxfGraphWait() is expected when the graph is interrupted. - // (Normal execution with the distributed application.) - HOLOSCAN_LOG_DEBUG("Exception in GXFExecutor::run_gxf_graph - {}", e.what()); - } - HOLOSCAN_LOG_INFO("Fragment '{}' Terminated", this->fragment()->name()); + // Note that run_gxf_graph() can raise an exception. + this->run_gxf_graph(); }); } @@ -302,6 +354,100 @@ std::shared_ptr GXFExecutor::extension_manager() { return gxf_extension_manager_; } +namespace { +/* @brief Utility function used internally by the GXFExecutor::create_input_port static method. + * + * This function will only be called in the case of a GXF application that is wrapping a + * holoscan::Operator as a GXF codelet (as in examples/wrap_operator_as_gxf_extension). + * + * It will update the native operator's connectors to use the existing GXF receiver. + * + * Note: cannot use GXF GraphEntity C++ APIs here as the Operator here wraps a codelet which does + * not have a GraphEntity data member. + */ +void bind_input_port(Fragment* fragment, gxf_context_t gxf_context, gxf_uid_t eid, IOSpec* io_spec, + const char* rx_name, IOSpec::ConnectorType rx_type, Operator* op) { + // Can't currently use GraphEntity API for this OperatorWrapper/bind_port code path + if (rx_type != IOSpec::ConnectorType::kDefault) { + // TODO: update bind_port code path for types other than ConnectorType::kDefault + throw std::runtime_error(fmt::format( + "Unable to support types other than ConnectorType::kDefault (rx_name: '{}')", rx_name)); + } + const char* entity_name = ""; + HOLOSCAN_GXF_CALL_FATAL(GxfComponentName(gxf_context, eid, &entity_name)); + + gxf_tid_t receiver_find_tid{}; + HOLOSCAN_GXF_CALL_FATAL( + GxfComponentTypeId(gxf_context, "nvidia::gxf::Receiver", &receiver_find_tid)); + + gxf_uid_t receiver_cid = 0; + HOLOSCAN_GXF_CALL_FATAL( + GxfComponentFind(gxf_context, eid, receiver_find_tid, rx_name, nullptr, &receiver_cid)); + + gxf_tid_t receiver_tid{}; + HOLOSCAN_GXF_CALL_FATAL(GxfComponentType(gxf_context, receiver_cid, &receiver_tid)); + + gxf_tid_t double_buffer_receiver_tid{}; + + if (fragment->data_flow_tracker()) { + HOLOSCAN_GXF_CALL_FATAL(GxfComponentTypeId( + gxf_context, "holoscan::AnnotatedDoubleBufferReceiver", &double_buffer_receiver_tid)); + } else { + HOLOSCAN_GXF_CALL_FATAL(GxfComponentTypeId( + gxf_context, "nvidia::gxf::DoubleBufferReceiver", &double_buffer_receiver_tid)); + } + + if (receiver_tid == double_buffer_receiver_tid) { + // It could be made more succinct by casting appropriately at the + // std::make_shared call, but, I don't have an example to test if it is working + if (fragment->data_flow_tracker()) { + holoscan::AnnotatedDoubleBufferReceiver* double_buffer_receiver_ptr = nullptr; + HOLOSCAN_GXF_CALL_FATAL( + GxfComponentPointer(gxf_context, + receiver_cid, + receiver_tid, + reinterpret_cast(&double_buffer_receiver_ptr))); + + if (double_buffer_receiver_ptr) { + auto receiver = + std::make_shared(rx_name, double_buffer_receiver_ptr); + // Set the existing DoubleBufferReceiver for this input + io_spec->connector(receiver); + double_buffer_receiver_ptr->op(op); + } else { + HOLOSCAN_LOG_ERROR( + "Unable to get AnnotatedDoubleBufferReceiver pointer for the handle: '{}' in '{}' " + "entity", + rx_name, + entity_name); + } + } else { + nvidia::gxf::DoubleBufferReceiver* double_buffer_receiver_ptr = nullptr; + GxfComponentPointer(gxf_context, + receiver_cid, + receiver_tid, + reinterpret_cast(&double_buffer_receiver_ptr)); + + if (double_buffer_receiver_ptr) { + auto receiver = + std::make_shared(rx_name, double_buffer_receiver_ptr); + // Set the existing DoubleBufferReceiver for this input + io_spec->connector(receiver); + } else { + HOLOSCAN_LOG_ERROR( + "Unable to get DoubleBufferReceiver pointer for the handle: '{}' in '{}' entity", + rx_name, + entity_name); + } + } + } else { + HOLOSCAN_LOG_ERROR( + "Unsupported GXF receiver type for the handle: '{}' in '{}' entity", rx_name, entity_name); + } + return; +} +} // namespace + void GXFExecutor::create_input_port(Fragment* fragment, gxf_context_t gxf_context, gxf_uid_t eid, IOSpec* io_spec, bool bind_port, Operator* op) { const char* rx_name = io_spec->name().c_str(); // input port name @@ -315,93 +461,23 @@ void GXFExecutor::create_input_port(Fragment* fragment, gxf_context_t gxf_contex "ConnectorType::kDoubleBuffer."); } } + auto graph_entity = op->graph_entity(); // If this executor is used by OperatorWrapper (bind_port == true) to wrap Native Operator, - // then we need to call `io_spec->connector(...)` to set the existing GXF Receiver for this - // input. + // then we need to call `bind_input_port` to set the existing GXF Receiver for this input. if (bind_port) { - if (rx_type != IOSpec::ConnectorType::kDefault) { - throw std::runtime_error( - "TODO: update bind_port code path for types other than ConnectorType::kDefault"); - } - const char* entity_name = ""; - HOLOSCAN_GXF_CALL_FATAL(GxfComponentName(gxf_context, eid, &entity_name)); - - gxf_tid_t receiver_find_tid{}; - HOLOSCAN_GXF_CALL_FATAL( - GxfComponentTypeId(gxf_context, "nvidia::gxf::Receiver", &receiver_find_tid)); - - gxf_uid_t receiver_cid = 0; - HOLOSCAN_GXF_CALL_FATAL( - GxfComponentFind(gxf_context, eid, receiver_find_tid, rx_name, nullptr, &receiver_cid)); - - gxf_tid_t receiver_tid{}; - HOLOSCAN_GXF_CALL_FATAL(GxfComponentType(gxf_context, receiver_cid, &receiver_tid)); - - gxf_tid_t double_buffer_receiver_tid{}; - - if (fragment->data_flow_tracker()) { - HOLOSCAN_GXF_CALL_FATAL(GxfComponentTypeId( - gxf_context, "holoscan::AnnotatedDoubleBufferReceiver", &double_buffer_receiver_tid)); - } else { - HOLOSCAN_GXF_CALL_FATAL(GxfComponentTypeId( - gxf_context, "nvidia::gxf::DoubleBufferReceiver", &double_buffer_receiver_tid)); - } - - if (receiver_tid == double_buffer_receiver_tid) { - // It could be made more succinct by casting appropriately at the - // std::make_shared call, but, I don't have an example to test if it is working - if (fragment->data_flow_tracker()) { - holoscan::AnnotatedDoubleBufferReceiver* double_buffer_receiver_ptr = nullptr; - HOLOSCAN_GXF_CALL_FATAL( - GxfComponentPointer(gxf_context, - receiver_cid, - receiver_tid, - reinterpret_cast(&double_buffer_receiver_ptr))); - - if (double_buffer_receiver_ptr) { - auto receiver = - std::make_shared(rx_name, double_buffer_receiver_ptr); - // Set the existing DoubleBufferReceiver for this input - io_spec->connector(receiver); - double_buffer_receiver_ptr->op(op); - } else { - HOLOSCAN_LOG_ERROR( - "Unable to get AnnotatedDoubleBufferReceiver pointer for the handle: '{}' in '{}' " - "entity", - rx_name, - entity_name); - } - } else { - nvidia::gxf::DoubleBufferReceiver* double_buffer_receiver_ptr = nullptr; - GxfComponentPointer(gxf_context, - receiver_cid, - receiver_tid, - reinterpret_cast(&double_buffer_receiver_ptr)); - - if (double_buffer_receiver_ptr) { - auto receiver = - std::make_shared(rx_name, double_buffer_receiver_ptr); - // Set the existing DoubleBufferReceiver for this input - io_spec->connector(receiver); - } else { - HOLOSCAN_LOG_ERROR( - "Unable to get DoubleBufferReceiver pointer for the handle: '{}' in '{}' entity", - rx_name, - entity_name); - } - } - } else { - HOLOSCAN_LOG_ERROR("Unsupported GXF receiver type for the handle: '{}' in '{}' entity", - rx_name, - entity_name); - } + bind_input_port(fragment, gxf_context, eid, io_spec, rx_name, rx_type, op); return; } auto connector = std::dynamic_pointer_cast(io_spec->connector()); - - if (!connector || (connector->gxf_cptr() == nullptr)) { + if (connector && (connector->gxf_cptr() != nullptr)) { + auto gxf_receiver = std::dynamic_pointer_cast(connector); + if (gxf_receiver && graph_entity) { + gxf_receiver->gxf_eid(graph_entity->eid()); + gxf_receiver->gxf_graph_entity(graph_entity); + } + } else { // Create Receiver component for this input std::shared_ptr rx_resource; switch (rx_type) { @@ -433,8 +509,22 @@ void GXFExecutor::create_input_port(Fragment* fragment, gxf_context_t gxf_contex rx_resource->setup(*rx_spec); rx_resource->spec(std::move(rx_spec)); - rx_resource->gxf_eid(eid); - rx_resource->initialize(); + // Note: had to make sure GXFComponent calls addComponent and not addReceiver or addTransmitter + // or errors will occur as follows: + // [error] [component.hpp:160] Expression 'parameter_registrar_->getComponentParameterInfoPtr( + // tid, key)' failed with error 'GXF_ENTITY_COMPONENT_NOT_FOUND'. + // [error] [graph_entity.cpp:52] Expression 'codelet_->getParameterInfo(rx_name)' failed with + // error 'GXF_ENTITY_COMPONENT_NOT_FOUND'. + // [error] [gxf_component.cpp:112] Failed to add component 'values:27' of type: + // 'nvidia::gxf::DoubleBufferReceiver' + // [info] [gxf_component.cpp:119] Initializing component '__condition_input__1' in entity + // '370' via GxfComponentAdd + // [error] [gxf_condition.cpp:97] GXF call ::holoscan::gxf::GXFParameterAdaptor::set_param( + // gxf_context_, gxf_cid_, key.c_str(), param_wrap) in line 97 of file + + // Add to the same entity as the operator and initialize + // Note: it is important that GXFComponent calls addComponent and not addTransmitter for this + rx_resource->add_to_graph_entity(op); if (fragment->data_flow_tracker()) { holoscan::AnnotatedDoubleBufferReceiver* dbl_ptr; @@ -476,17 +566,17 @@ void GXFExecutor::create_input_port(Fragment* fragment, gxf_context_t gxf_contex case ConditionType::kMessageAvailable: { std::shared_ptr message_available_condition = std::dynamic_pointer_cast(condition); - + // Note: GraphEntity::addSchedulingTerm requires a unique name here + std::string cond_name = + fmt::format("__{}_{}_cond_{}", op->name(), rx_name, condition_index); message_available_condition->receiver(connector); - message_available_condition->name( - ::holoscan::gxf::create_name("__condition_input_", condition_index).c_str()); + message_available_condition->name(cond_name); message_available_condition->fragment(fragment); auto rx_condition_spec = std::make_shared(fragment); message_available_condition->setup(*rx_condition_spec); message_available_condition->spec(std::move(rx_condition_spec)); - - message_available_condition->gxf_eid(eid); - message_available_condition->initialize(); + // Add to the same entity as the operator and initialize + message_available_condition->add_to_graph_entity(op); break; } case ConditionType::kNone: @@ -498,6 +588,97 @@ void GXFExecutor::create_input_port(Fragment* fragment, gxf_context_t gxf_contex } } +namespace { +/* @brief Utility function used internally by the GXFExecutor::create_output_port static method. + * + * This function will only be called in the case of a GXF application that is wrapping a + * holoscan::Operator as a GXF codelet (as in examples/wrap_operator_as_gxf_extension). + * + * It will update the native operator's connectors to use the existing GXF transmitter/ + * + * Note: cannot use GXF GraphEntity C++ APIs here as the Operator here wraps a codelet which does + * not have a GraphEntity data member. + */ +void bind_output_port(Fragment* fragment, gxf_context_t gxf_context, gxf_uid_t eid, IOSpec* io_spec, + const char* tx_name, IOSpec::ConnectorType tx_type, Operator* op) { + if (tx_type != IOSpec::ConnectorType::kDefault) { + // TODO: update bind_port code path for types other than ConnectorType::kDefault + throw std::runtime_error(fmt::format( + "Unable to support types other than ConnectorType::kDefault (tx_name: '{}')", tx_name)); + } + const char* entity_name = ""; + HOLOSCAN_GXF_CALL_FATAL(GxfComponentName(gxf_context, eid, &entity_name)); + + gxf_tid_t transmitter_find_tid{}; + HOLOSCAN_GXF_CALL_FATAL( + GxfComponentTypeId(gxf_context, "nvidia::gxf::Transmitter", &transmitter_find_tid)); + + gxf_uid_t transmitter_cid = 0; + HOLOSCAN_GXF_CALL_FATAL( + GxfComponentFind(gxf_context, eid, transmitter_find_tid, tx_name, nullptr, &transmitter_cid)); + + gxf_tid_t transmitter_tid{}; + HOLOSCAN_GXF_CALL_FATAL(GxfComponentType(gxf_context, transmitter_cid, &transmitter_tid)); + + gxf_tid_t double_buffer_transmitter_tid{}; + + if (fragment->data_flow_tracker()) { + HOLOSCAN_GXF_CALL_FATAL(GxfComponentTypeId( + gxf_context, "holoscan::AnnotatedDoubleBufferTransmitter", &double_buffer_transmitter_tid)); + } else { + HOLOSCAN_GXF_CALL_FATAL(GxfComponentTypeId( + gxf_context, "nvidia::gxf::DoubleBufferTransmitter", &double_buffer_transmitter_tid)); + } + + if (transmitter_tid == double_buffer_transmitter_tid) { + if (fragment->data_flow_tracker()) { + holoscan::AnnotatedDoubleBufferTransmitter* double_buffer_transmitter_ptr = nullptr; + HOLOSCAN_GXF_CALL_FATAL( + GxfComponentPointer(gxf_context, + transmitter_cid, + transmitter_tid, + reinterpret_cast(&double_buffer_transmitter_ptr))); + + if (double_buffer_transmitter_ptr) { + auto transmitter = std::make_shared( + tx_name, double_buffer_transmitter_ptr); + // Set the existing DoubleBufferTransmitter for this output + io_spec->connector(transmitter); + double_buffer_transmitter_ptr->op(op); + } else { + HOLOSCAN_LOG_ERROR( + "Unable to get AnnotatedDoubleBufferTransmitter pointer for the handle: '{}' in '{}' " + "entity", + tx_name, + entity_name); + } + } else { + nvidia::gxf::DoubleBufferTransmitter* double_buffer_transmitter_ptr = nullptr; + GxfComponentPointer(gxf_context, + transmitter_cid, + transmitter_tid, + reinterpret_cast(&double_buffer_transmitter_ptr)); + + if (double_buffer_transmitter_ptr) { + auto transmitter = std::make_shared( + tx_name, double_buffer_transmitter_ptr); + // Set the existing DoubleBufferTransmitter for this output + io_spec->connector(transmitter); + } else { + HOLOSCAN_LOG_ERROR( + "Unable to get DoubleBufferTransmitter pointer for the handle: '{}' in '{}' entity", + tx_name, + entity_name); + } + } + } else { + HOLOSCAN_LOG_ERROR("Unsupported GXF transmitter type for the handle: '{}' in '{}' entity", + tx_name, + entity_name); + } +} +} // namespace + void GXFExecutor::create_output_port(Fragment* fragment, gxf_context_t gxf_context, gxf_uid_t eid, IOSpec* io_spec, bool bind_port, Operator* op) { const char* tx_name = io_spec->name().c_str(); @@ -511,91 +692,22 @@ void GXFExecutor::create_output_port(Fragment* fragment, gxf_context_t gxf_conte "ConnectorType::kDoubleBuffer."); } } + auto graph_entity = op->graph_entity(); // If this executor is used by OperatorWrapper (bind_port == true) to wrap Native Operator, - // then we need to call `io_spec->connector(...)` to set the existing GXF Transmitter for this - // output. + // then we need to call `bind_output_port` to set the existing GXF Transmitter for this output. if (bind_port) { - if (tx_type != IOSpec::ConnectorType::kDefault) { - throw std::runtime_error( - "TODO: update bind_port code path for types other than ConnectorType::kDefault"); - } - const char* entity_name = ""; - HOLOSCAN_GXF_CALL_FATAL(GxfComponentName(gxf_context, eid, &entity_name)); - - gxf_tid_t transmitter_find_tid{}; - HOLOSCAN_GXF_CALL_FATAL( - GxfComponentTypeId(gxf_context, "nvidia::gxf::Transmitter", &transmitter_find_tid)); - - gxf_uid_t transmitter_cid = 0; - HOLOSCAN_GXF_CALL_FATAL(GxfComponentFind( - gxf_context, eid, transmitter_find_tid, tx_name, nullptr, &transmitter_cid)); - - gxf_tid_t transmitter_tid{}; - HOLOSCAN_GXF_CALL_FATAL(GxfComponentType(gxf_context, transmitter_cid, &transmitter_tid)); - - gxf_tid_t double_buffer_transmitter_tid{}; - - if (fragment->data_flow_tracker()) { - HOLOSCAN_GXF_CALL_FATAL(GxfComponentTypeId(gxf_context, - "holoscan::AnnotatedDoubleBufferTransmitter", - &double_buffer_transmitter_tid)); - } else { - HOLOSCAN_GXF_CALL_FATAL(GxfComponentTypeId( - gxf_context, "nvidia::gxf::DoubleBufferTransmitter", &double_buffer_transmitter_tid)); - } - - if (transmitter_tid == double_buffer_transmitter_tid) { - if (fragment->data_flow_tracker()) { - holoscan::AnnotatedDoubleBufferTransmitter* double_buffer_transmitter_ptr = nullptr; - HOLOSCAN_GXF_CALL_FATAL( - GxfComponentPointer(gxf_context, - transmitter_cid, - transmitter_tid, - reinterpret_cast(&double_buffer_transmitter_ptr))); - - if (double_buffer_transmitter_ptr) { - auto transmitter = std::make_shared( - tx_name, double_buffer_transmitter_ptr); - // Set the existing DoubleBufferTransmitter for this output - io_spec->connector(transmitter); - double_buffer_transmitter_ptr->op(op); - } else { - HOLOSCAN_LOG_ERROR( - "Unable to get AnnotatedDoubleBufferTransmitter pointer for the handle: '{}' in '{}' " - "entity", - tx_name, - entity_name); - } - } else { - nvidia::gxf::DoubleBufferTransmitter* double_buffer_transmitter_ptr = nullptr; - GxfComponentPointer(gxf_context, - transmitter_cid, - transmitter_tid, - reinterpret_cast(&double_buffer_transmitter_ptr)); - - if (double_buffer_transmitter_ptr) { - auto transmitter = std::make_shared( - tx_name, double_buffer_transmitter_ptr); - // Set the existing DoubleBufferTransmitter for this output - io_spec->connector(transmitter); - } else { - HOLOSCAN_LOG_ERROR( - "Unable to get DoubleBufferTransmitter pointer for the handle: '{}' in '{}' entity", - tx_name, - entity_name); - } - } - } else { - HOLOSCAN_LOG_ERROR("Unsupported GXF transmitter type for the handle: '{}' in '{}' entity", - tx_name, - entity_name); - } + bind_output_port(fragment, gxf_context, eid, io_spec, tx_name, tx_type, op); return; } auto connector = std::dynamic_pointer_cast(io_spec->connector()); - - if (!connector || (connector->gxf_cptr() == nullptr)) { + if (connector && (connector->gxf_cptr() != nullptr)) { + auto gxf_transmitter = std::dynamic_pointer_cast(connector); + if (gxf_transmitter && graph_entity) { + gxf_transmitter->gxf_eid(graph_entity->eid()); + gxf_transmitter->gxf_graph_entity(graph_entity); + } + } else { // Create Transmitter component for this output std::shared_ptr tx_resource; switch (tx_type) { @@ -626,9 +738,9 @@ void GXFExecutor::create_output_port(Fragment* fragment, gxf_context_t gxf_conte auto tx_spec = std::make_shared(fragment); tx_resource->setup(*tx_spec); tx_resource->spec(std::move(tx_spec)); - - tx_resource->gxf_eid(eid); - tx_resource->initialize(); + // add to the same entity as the operator and initialize + // Note: it is important that GXFComponent calls addComponent and not addTransmitter for this + tx_resource->add_to_graph_entity(op); if (fragment->data_flow_tracker()) { holoscan::AnnotatedDoubleBufferTransmitter* dbl_ptr; @@ -671,17 +783,17 @@ void GXFExecutor::create_output_port(Fragment* fragment, gxf_context_t gxf_conte case ConditionType::kDownstreamMessageAffordable: { std::shared_ptr downstream_msg_affordable_condition = std::dynamic_pointer_cast(condition); - + // Note: GraphEntity::addSchedulingTerm requires a unique name here + std::string cond_name = + fmt::format("__{}_{}_cond_{}", op->name(), tx_name, condition_index); downstream_msg_affordable_condition->transmitter(connector); - downstream_msg_affordable_condition->name( - ::holoscan::gxf::create_name("__condition_output_", condition_index).c_str()); + downstream_msg_affordable_condition->name(cond_name); downstream_msg_affordable_condition->fragment(fragment); auto tx_condition_spec = std::make_shared(fragment); downstream_msg_affordable_condition->setup(*tx_condition_spec); downstream_msg_affordable_condition->spec(std::move(tx_condition_spec)); - - downstream_msg_affordable_condition->gxf_eid(eid); - downstream_msg_affordable_condition->initialize(); + // add to the same entity as the operator and initialize + downstream_msg_affordable_condition->add_to_graph_entity(op); break; } case ConditionType::kNone: @@ -875,27 +987,38 @@ void connect_ucx_transmitters_to_virtual_ops( } } -using BroadcastEidMapType = std::unordered_map>; +} // unnamed namespace -/** - * @brief Add connection between the prior Broadcast component and the current operator's input - * port(s). - * - * Creates a transmitter on the broadcast component and connects it to the input port of `op`. - * - * Any connected ports of the operator are removed from port_map_val - */ -void connect_broadcast_to_previous_op(gxf_context_t context, // context_ - Fragment* fragment_, // fragment_ - const BroadcastEidMapType& broadcast_eids, - holoscan::OperatorGraph::NodeType op, - holoscan::OperatorGraph::NodeType prev_op, - holoscan::OperatorGraph::EdgeDataType port_map_val) { +gxf_result_t GXFExecutor::add_connection(gxf_uid_t source_cid, gxf_uid_t target_cid) { + gxf_result_t code; + + auto connection = connections_entity_->addComponent("nvidia::gxf::Connection"); + if (!connection) { + HOLOSCAN_LOG_ERROR( + "Failed to add nvidia::gxf::Connection between source cid['{}'] and target cid['{}']", + source_cid, + target_cid); + return GXF_FAILURE; + } + // Use C API instead of Connection::setReceiver and Connection::setTransmitter since we don't + // already have Handle for source and target. + gxf_uid_t connect_cid = connection->cid(); + gxf_context_t context = connections_entity_->context(); + HOLOSCAN_GXF_CALL(GxfParameterSetHandle(context, connect_cid, "source", source_cid)); + code = GxfParameterSetHandle(context, connect_cid, "target", target_cid); + return code; +} + +void GXFExecutor::connect_broadcast_to_previous_op( + const BroadcastEntityMapType& broadcast_entities, holoscan::OperatorGraph::NodeType op, + holoscan::OperatorGraph::NodeType prev_op, holoscan::OperatorGraph::EdgeDataType port_map_val) { auto op_type = op->operator_type(); + // counter to ensure unique broadcast component names as required by nvidia::gxf::GraphEntity + static uint32_t btx_count = 0; + // A Broadcast component was added for prev_op - for (const auto& [port_name, broadcast_eid] : broadcast_eids.at(prev_op)) { + for (const auto& [port_name, broadcast_entity] : broadcast_entities.at(prev_op)) { // Find the Broadcast component's source port name in the port-map. if (port_map_val->find(port_name) != port_map_val->end()) { // There is an output port of the prev_op that is associated with a Broadcast component. @@ -905,7 +1028,6 @@ void connect_broadcast_to_previous_op(gxf_context_t context, // context_ auto target_ports = port_map_val->at(port_name); for (const auto& target_port : target_ports) { // Create a Transmitter in the Broadcast entity. - gxf_uid_t tx_cid; auto& prev_op_io_spec = prev_op->spec()->outputs()[port_name]; auto prev_connector_type = prev_op_io_spec->connector_type(); auto prev_connector = prev_op_io_spec->connector(); @@ -925,26 +1047,36 @@ void connect_broadcast_to_previous_op(gxf_context_t context, // context_ uint64_t prev_connector_policy = 2; // fault // Create a transmitter based on the prev_connector_type. - // TODO(gbae): create a special resource for the broadcast codelet and use it. switch (prev_connector_type) { case IOSpec::ConnectorType::kDefault: case IOSpec::ConnectorType::kDoubleBuffer: { // We don't create a AnnotatedDoubleBufferTransmitter even if DFFT is on because // we don't want to annotate a message at the Broadcast component. - auto prev_double_buffer_connector = - std::dynamic_pointer_cast(prev_connector); - prev_connector_capacity = prev_double_buffer_connector->capacity_; - prev_connector_policy = prev_double_buffer_connector->policy_; - create_gxf_component( - context, "nvidia::gxf::DoubleBufferTransmitter", "", broadcast_eid, &tx_cid); - HOLOSCAN_GXF_CALL_FATAL( - GxfParameterSetUInt64(context, tx_cid, "capacity", prev_connector_capacity)); - HOLOSCAN_GXF_CALL_FATAL( - GxfParameterSetUInt64(context, tx_cid, "policy", prev_connector_policy)); + // Clone the capacity and policy from previous connector + auto prev_ucx_connector = + std::dynamic_pointer_cast(prev_connector); + if (prev_ucx_connector) { + prev_connector_capacity = prev_ucx_connector->capacity_; + prev_connector_capacity = prev_ucx_connector->policy_; + } else { + HOLOSCAN_LOG_ERROR( + "Failed to cast connector to DoubleBufferTransmitter, using default capacity and " + "policy"); + } - // Clone the condition of the prev_op's output port and set it as the - // transmitter's condition for the broadcast entity. + // Note: have to use add instead of addTransmitter because the + // Transmitter is not a Parameter on the Broadcast codelet. + std::string btx_name = fmt::format("btx_{}", btx_count); + auto btx_handle = broadcast_entity->add( + btx_name.c_str(), + nvidia::gxf::Arg("capacity", prev_connector_capacity), + nvidia::gxf::Arg("policy", prev_connector_policy)); + if (!btx_handle) { + HOLOSCAN_LOG_ERROR("Failed to create broadcast transmitter for entity {}", + broadcast_entity->name()); + } + btx_count += 1; // increment to ensure unique names // 1. Find the output port's condition. // (ConditionType::kDownstreamMessageAffordable) @@ -960,29 +1092,32 @@ void connect_broadcast_to_previous_op(gxf_context_t context, // context_ // 2. If it exists, clone it and set it as the transmitter's condition unless // the connector type is kUCX. + uint64_t prev_min_size = 1; if (prev_condition) { auto prev_downstream_condition = std::dynamic_pointer_cast(prev_condition); - auto min_size = prev_downstream_condition->min_size(); - - gxf_uid_t tx_term_cid; - create_gxf_component(context, - "nvidia::gxf::DownstreamReceptiveSchedulingTerm", - "", - broadcast_eid, - &tx_term_cid); - HOLOSCAN_GXF_CALL_FATAL( - GxfParameterSetHandle(context, tx_term_cid, "transmitter", tx_cid)); - HOLOSCAN_GXF_CALL_FATAL( - GxfParameterSetUInt64(context, tx_term_cid, "min_size", min_size)); + prev_min_size = prev_downstream_condition->min_size(); + + // use add to get the specific Handle so setTransmitter method can be used + std::string btx_term_name = fmt::format("btx_sched_term_{}", btx_count); + auto btx_term_handle = + broadcast_entity->add( + btx_term_name.c_str(), nvidia::gxf::Arg("min_size", prev_min_size)); + if (!btx_term_handle) { + HOLOSCAN_LOG_ERROR( + "Failed to create broadcast transmitter scheduling term for entity {}", + broadcast_entity->name()); + } + btx_term_handle->setTransmitter(btx_handle); } + // Get the current Operator's input port auto target_gxf_resource = std::dynamic_pointer_cast( op->spec()->inputs()[target_port]->connector()); gxf_uid_t target_cid = target_gxf_resource->gxf_cid(); // Connect the newly created Transmitter with current operator's input port - ::holoscan::gxf::add_connection(context, tx_cid, target_cid); + add_connection(btx_handle->cid(), target_cid); HOLOSCAN_LOG_DEBUG( "Connected DownstreamReceptiveSchedulingTerm for Broadcast source : {} -> " "target : {}", @@ -1003,20 +1138,17 @@ void connect_broadcast_to_previous_op(gxf_context_t context, // context_ arg_list); } else { auto prev_ucx_connector = std::dynamic_pointer_cast(prev_connector); - prev_connector_capacity = prev_ucx_connector->capacity_; - prev_connector_policy = prev_ucx_connector->policy_; - - auto prev_connector_receiver_address = prev_ucx_connector->receiver_address(); - auto prev_connector_port = prev_ucx_connector->port(); - auto prev_connector_local_address = prev_ucx_connector->local_address(); - auto prev_connector_local_port = prev_ucx_connector->local_port(); + if (!prev_ucx_connector) { + throw std::runtime_error("failed to cast connector to UcxTransmitter"); + } + // could also get these via prev_tx_handle->getParameter(name) calls transmitter = std::make_shared( - Arg("capacity", prev_connector_capacity), - Arg("policy", prev_connector_policy), - Arg("receiver_address", prev_connector_receiver_address), - Arg("port", prev_connector_port), - Arg("local_address", prev_connector_local_address), - Arg("local_port", prev_connector_local_port)); + Arg("capacity", prev_ucx_connector->capacity_), + Arg("policy", prev_ucx_connector->policy_), + Arg("receiver_address", prev_ucx_connector->receiver_address()), + Arg("port", prev_ucx_connector->port()), + Arg("local_address", prev_ucx_connector->local_address()), + Arg("local_port", prev_ucx_connector->local_port())); } auto broadcast_out_port_name = fmt::format("{}_{}", op->name(), port_name); transmitter->name(broadcast_out_port_name); @@ -1026,7 +1158,8 @@ void connect_broadcast_to_previous_op(gxf_context_t context, // context_ transmitter->spec(spec); // Set eid to the broadcast entity's eid so that this component is bound to the // broadcast entity. - transmitter->gxf_eid(broadcast_eid); + transmitter->gxf_eid(broadcast_entity->eid()); + transmitter->gxf_graph_entity(broadcast_entity); // Create a transmitter in the broadcast entity. transmitter->initialize(); } break; @@ -1043,38 +1176,15 @@ void connect_broadcast_to_previous_op(gxf_context_t context, // context_ } } -// Map of connections indexed by source port uid and stores a pair of the target operator name -// and target port name -using TargetPort = std::pair; -using TargetsInfo = std::pair>; -using TargetConnectionsMapType = std::unordered_map; - -/** - * @brief Create Broadcast components and add their IDs to broadcast_eids. - * - * Creates broadcast components for any output ports of `op` that connect to more than one - * input port. - * - * Does not add any transmitter to the Broadcast entity. The transmitters will be added later - * when the incoming edges to the respective operators are processed. - * - * Any connected ports of the operator are removed from port_map_val - */ -void create_broadcast_components(gxf_context_t context, // context_ - const std::string& entity_prefix, // entity_prefix_ - holoscan::OperatorGraph::NodeType op, - std::list& implicit_broadcast_entities, - BroadcastEidMapType& broadcast_eids, - const TargetConnectionsMapType& connections) { - gxf_tid_t broadcast_tid = GxfTidNull(); - gxf_tid_t rx_term_tid = GxfTidNull(); - - gxf_tid_t rx_double_buffer_tid = GxfTidNull(); - +void GXFExecutor::create_broadcast_components(holoscan::OperatorGraph::NodeType op, + BroadcastEntityMapType& broadcast_entities, + const TargetConnectionsMapType& connections) { auto& op_name = op->name(); + auto context = context_; + auto entity_prefix = entity_prefix_; for (const auto& [source_cid, target_info] : connections) { - auto& [connector_type, target_ports] = target_info; + auto& [source_cname, connector_type, target_ports] = target_info; if (target_ports.empty()) { HOLOSCAN_LOG_ERROR("No target component found for source_id: {}", source_cid); continue; @@ -1082,10 +1192,7 @@ void create_broadcast_components(gxf_context_t context, // context_ // Insert GXF's Broadcast component if source port is connected to multiple targets if (target_ports.size() > 1) { - gxf_tid_t rx_tid = GxfTidNull(); - - const char* source_cname = ""; - HOLOSCAN_GXF_CALL_FATAL(GxfComponentName(context, source_cid, &source_cname)); + std::string rx_type_name; uint64_t curr_min_size = 1; uint64_t curr_connector_capacity = 1; @@ -1111,21 +1218,19 @@ void create_broadcast_components(gxf_context_t context, // context_ curr_min_size = curr_downstream_condition->min_size(); } - gxf_uid_t broadcast_eid; + auto broadcast_entity = std::make_shared(); auto broadcast_entity_name = fmt::format("{}_broadcast_{}_{}", entity_prefix, op_name, source_cname); - // TODO(gbae): create an operator class for the broadcast codelet and use it here - // instead of using GXF API directly. - const GxfEntityCreateInfo broadcast_entity_create_info = {broadcast_entity_name.c_str(), - GXF_ENTITY_CREATE_PROGRAM_BIT}; - HOLOSCAN_GXF_CALL_FATAL( - GxfCreateEntity(context, &broadcast_entity_create_info, &broadcast_eid)); - - // Add the broadcast_eid to the list of implicit broadcast entities - implicit_broadcast_entities.push_back(broadcast_eid); + auto maybe = broadcast_entity->setup(context, broadcast_entity_name.c_str()); + if (!maybe) { + throw std::runtime_error( + fmt::format("Failed to create broadcast entity: '{}'", broadcast_entity_name)); + } + // Add the broadcast_entity to the list of implicit broadcast entities + implicit_broadcast_entities_.push_back(broadcast_entity); - // Add the broadcast_eid for the current operator and the source port name - broadcast_eids[op][source_cname] = broadcast_eid; + // Add the broadcast_entity for the current operator and the source port name + broadcast_entities[op][source_cname] = broadcast_entity; switch (connector_type) { case IOSpec::ConnectorType::kDefault: @@ -1135,76 +1240,51 @@ void create_broadcast_components(gxf_context_t context, // context_ // We don't create a holoscan::AnnotatedDoubleBufferReceiver even if data flow // tracking is on because we don't want to mark annotations for the Broadcast // component. - if (rx_double_buffer_tid == GxfTidNull()) { - HOLOSCAN_GXF_CALL_FATAL(GxfComponentTypeId( - context, "nvidia::gxf::DoubleBufferReceiver", &rx_double_buffer_tid)); + rx_type_name = "nvidia::gxf::DoubleBufferReceiver"; + auto curr_tx_handle = + op->graph_entity()->get(source_cname.c_str()); + if (curr_tx_handle.is_null()) { + HOLOSCAN_LOG_ERROR( + "Failed to get nvidia::gxf::DoubleBufferTransmitter, a default receive capacity " + "and policy will be used for the inserted broadcast component."); + } else { + HOLOSCAN_LOG_TRACE("getting capacity and policy from curr_tx_handle"); + auto p = get_capacity_and_policy(curr_tx_handle); + curr_connector_capacity = p.first; + curr_connector_policy = p.second; } - rx_tid = rx_double_buffer_tid; - - // Get the connector capacity and policy of the current operator's output port. - nvidia::gxf::DoubleBufferReceiver* curr_double_buffer_connector = nullptr; - - HOLOSCAN_GXF_CALL_FATAL( - GxfComponentPointer(context, - source_cid, - rx_tid, - reinterpret_cast(&curr_double_buffer_connector))); - - curr_connector_capacity = curr_double_buffer_connector->capacity_; - curr_connector_policy = curr_double_buffer_connector->policy_; } break; default: HOLOSCAN_LOG_ERROR("Unrecognized connector_type '{}' for source name '{}'", static_cast(connector_type), source_cname); } - gxf_uid_t rx_cid; - HOLOSCAN_GXF_CALL_FATAL(GxfComponentAdd(context, broadcast_eid, rx_tid, "", &rx_cid)); - // Set capacity and policy of the receiver component. - HOLOSCAN_GXF_CALL_FATAL( - GxfParameterSetUInt64(context, rx_cid, "capacity", curr_connector_capacity)); - HOLOSCAN_GXF_CALL_FATAL( - GxfParameterSetUInt64(context, rx_cid, "policy", curr_connector_policy)); - - if (rx_term_tid == GxfTidNull()) { - HOLOSCAN_GXF_CALL_FATAL(GxfComponentTypeId( - context, "nvidia::gxf::MessageAvailableSchedulingTerm", &rx_term_tid)); - } - gxf_uid_t rx_term_cid; - HOLOSCAN_GXF_CALL_FATAL( - GxfComponentAdd(context, broadcast_eid, rx_term_tid, "", &rx_term_cid)); - HOLOSCAN_GXF_CALL_FATAL(GxfParameterSetHandle(context, rx_term_cid, "receiver", rx_cid)); - HOLOSCAN_GXF_CALL_FATAL( - GxfParameterSetUInt64(context, rx_term_cid, "min_size", curr_min_size)); - - if (broadcast_tid == GxfTidNull()) { - HOLOSCAN_GXF_CALL_FATAL( - GxfComponentTypeId(context, "nvidia::gxf::Broadcast", &broadcast_tid)); - } - gxf_uid_t broadcast_cid; auto broadcast_component_name = fmt::format("{}_broadcast_component_{}_{}", entity_prefix, op_name, source_cname); - HOLOSCAN_GXF_CALL_FATAL(GxfComponentAdd( - context, broadcast_eid, broadcast_tid, broadcast_component_name.c_str(), &broadcast_cid)); - HOLOSCAN_GXF_CALL_FATAL(GxfParameterSetHandle(context, broadcast_cid, "source", rx_cid)); - - HOLOSCAN_LOG_DEBUG( - "Connected MessageAvailableSchedulingTerm to receiver for Broadcast entity : {}", - broadcast_entity_name); + auto broadcast_codelet = + broadcast_entity->addCodelet("nvidia::gxf::Broadcast", broadcast_component_name.c_str()); + if (broadcast_codelet.is_null()) { + HOLOSCAN_LOG_ERROR("Failed to create broadcast codelet for entity: {}", + broadcast_entity->name()); + } + // Broadcast component's receiver Parameter is named "source" so have to use that here + auto broadcast_rx = broadcast_entity->addReceiver(rx_type_name.c_str(), "source"); + if (broadcast_rx.is_null()) { + HOLOSCAN_LOG_ERROR("Failed to create receiver for broadcast component: {}", + broadcast_entity->name()); + } + broadcast_entity->configReceiver( + "source", curr_connector_capacity, curr_connector_policy, curr_min_size); // Connect Broadcast entity's receiver with the transmitter of the current operator - ::holoscan::gxf::add_connection(context, source_cid, rx_cid); + add_connection(source_cid, broadcast_rx->cid()); } } } -} // unnamed namespace - bool GXFExecutor::initialize_fragment() { HOLOSCAN_LOG_DEBUG("Initializing Fragment."); - auto context = context_; - // Initialize the GXF graph by creating GXF entities related to the Holoscan operators in a // topologically sorted order. Operators are created as nodes in the graph of the fragment are // visited. @@ -1237,10 +1317,10 @@ bool GXFExecutor::initialize_fragment() { std::unordered_set visited_nodes; visited_nodes.reserve(operators.size()); - // Keep a list of all the broadcast entity ids, if an operator's output port is connected to - // multiple inputs. The map is indexed by the operators. Each value in the map is indexed by the - // source port name - BroadcastEidMapType broadcast_eids; + // Keep a list of all the nvidia::gxf::GraphEntity entities holding broadcast codelets, if an + // operator's output port is connected to multiple inputs. The map is indexed by the operators. + // Each value in the map is indexed by the source port name. + BroadcastEntityMapType broadcast_entities; // Initialize the indegrees of all nodes in the graph and add root operators to the worklist. for (auto& node : operators) { @@ -1283,7 +1363,14 @@ bool GXFExecutor::initialize_fragment() { HOLOSCAN_LOG_DEBUG("Operator: {}", op_name); // Initialize the operator while we are visiting a node in the graph - op->initialize(); + try { + op->initialize(); + } catch (const std::exception& e) { + HOLOSCAN_LOG_ERROR( + "Exception occurred during initialization of operator: '{}' - {}", op->name(), e.what()); + throw; + } + auto op_type = op->operator_type(); HOLOSCAN_LOG_DEBUG("Connecting earlier operators of Op: {}", op_name); @@ -1303,11 +1390,10 @@ bool GXFExecutor::initialize_fragment() { // If the previous operator is found to be one that is connected to the current operator via // the Broadcast component, then add the connection between the Broadcast component and the // current operator's input port. - if (broadcast_eids.find(prev_op) != broadcast_eids.end()) { + if (broadcast_entities.find(prev_op) != broadcast_entities.end()) { // Add transmitter to the prev_op's broadcast component and connect it to op's input port. // Any connected ports are removed from port_map_val. - connect_broadcast_to_previous_op( - context_, fragment_, broadcast_eids, op, prev_op, port_map_val); + connect_broadcast_to_previous_op(broadcast_entities, op, prev_op, port_map_val); } if (port_map_val->size()) { @@ -1351,7 +1437,7 @@ bool GXFExecutor::initialize_fragment() { // visit the operator which is connected to the current operator. if (prev_op->id() != -1) { gxf_uid_t target_cid = target_gxf_resource->gxf_cid(); - ::holoscan::gxf::add_connection(context, source_cid, target_cid); + add_connection(source_cid, target_cid); HOLOSCAN_LOG_DEBUG( "Connected directly source : {} -> target : {}", source_port, *target_port); } else { @@ -1391,14 +1477,17 @@ bool GXFExecutor::initialize_fragment() { auto source_gxf_resource = std::dynamic_pointer_cast( op_spec->outputs()[source_port]->connector()); gxf_uid_t source_cid = source_gxf_resource->gxf_cid(); + std::string source_cname = source_gxf_resource->name(); auto connector_type = op_spec->outputs()[source_port]->connector_type(); if (connections.find(source_cid) == connections.end()) { - connections[source_cid] = TargetsInfo{connector_type, std::set{}}; + connections[source_cid] = + TargetsInfo{source_cname, connector_type, std::set{}}; } // For the source port, add a target in the tuple form (next operator, receiving port // name) - connections[source_cid].second.insert(std::make_pair(next_op, target_port)); + std::get>(connections[source_cid]) + .insert(std::make_pair(next_op, target_port)); } } } @@ -1415,19 +1504,20 @@ bool GXFExecutor::initialize_fragment() { // Iterate through downstream connections and find the direct ones to connect, only if // downstream operator is already initialized. This is to handle cycles in the graph. for (auto [source_cid, target_info] : connections) { - if (target_info.second.size() == 1) { + auto& [source_cname, connector_type, target_ports] = target_info; + if (target_ports.size() == 1) { // There is a direct connection without Broadcast // Check if next op is already initialized, that means, it's a cycle and we can add the // connection now - auto tmp_next_op = target_info.second.begin()->first; - if (tmp_next_op->id() != -1 && target_info.first != IOSpec::ConnectorType::kUCX) { + auto tmp_next_op = target_ports.begin()->first; + if (tmp_next_op->id() != -1 && connector_type != IOSpec::ConnectorType::kUCX) { // Operator is already initialized HOLOSCAN_LOG_DEBUG("next op {} is already initialized, due to a cycle.", tmp_next_op->name()); auto target_gxf_resource = std::dynamic_pointer_cast( - tmp_next_op->spec()->inputs()[target_info.second.begin()->second]->connector()); + tmp_next_op->spec()->inputs()[target_ports.begin()->second]->connector()); gxf_uid_t target_cid = target_gxf_resource->gxf_cid(); - ::holoscan::gxf::add_connection(context, source_cid, target_cid); + add_connection(source_cid, target_cid); HOLOSCAN_LOG_TRACE( "Next Op {} is connected to the current Op {} as a downstream connection due to a " "cycle.", @@ -1437,11 +1527,10 @@ bool GXFExecutor::initialize_fragment() { } } - // Create the Broadcast components and add their IDs to broadcast_eids, but do not add any + // Create the Broadcast components and add their IDs to broadcast_entities, but do not add any // transmitter to the Broadcast entity. The transmitters will be added later when the incoming // edges to the respective operators are processed. - create_broadcast_components( - context, entity_prefix_, op, implicit_broadcast_entities_, broadcast_eids, connections); + create_broadcast_components(op, broadcast_entities, connections); if (op_type != Operator::OperatorType::kVirtual) { for (auto& next_op : graph.get_next_nodes(op)) { if (next_op->id() != -1 && next_op->operator_type() != Operator::OperatorType::kVirtual) { @@ -1454,9 +1543,8 @@ bool GXFExecutor::initialize_fragment() { HOLOSCAN_LOG_ERROR("Could not find port map for {} -> {}", op_name, next_op->name()); return false; } - if (broadcast_eids.find(op) != broadcast_eids.end()) { - connect_broadcast_to_previous_op( - context_, fragment_, broadcast_eids, next_op, op, port_map.value()); + if (broadcast_entities.find(op) != broadcast_entities.end()) { + connect_broadcast_to_previous_op(broadcast_entities, next_op, op, port_map.value()); } } } @@ -1482,63 +1570,14 @@ bool GXFExecutor::initialize_operator(Operator* op) { return false; } - // If the type name is not set, the operator is assumed to be a Holoscan native operator and use - // `holoscan::gxf::GXFWrapper` as the GXF Codelet. - const bool is_native_operator = (op->operator_type() == Operator::OperatorType::kNative); - ops::GXFOperator* gxf_op = static_cast(op); - - const char* codelet_typename = nullptr; - if (is_native_operator) { - codelet_typename = "holoscan::gxf::GXFWrapper"; - } else { - codelet_typename = gxf_op->gxf_typename(); - } - auto& spec = *(op->spec()); - gxf_uid_t eid = 0; - - // Create Entity for the operator if `op_eid_` is 0 - if (op_eid_ == 0) { - const std::string op_entity_name = fmt::format("{}{}", entity_prefix_, op->name()); - const GxfEntityCreateInfo entity_create_info = {op_entity_name.c_str(), - GXF_ENTITY_CREATE_PROGRAM_BIT}; - HOLOSCAN_GXF_CALL_MSG_FATAL( - GxfCreateEntity(context_, &entity_create_info, &eid), - "Unable to create GXF entity for operator '{}'. Please check if the " - "operator name is unique.", - op->name()); - } else { - eid = op_eid_; - } + // op_eid_ should only be nonzero if OperatorWrapper wraps a codelet created by GXF. + // In that case GXF has already created the entity and we can't create a GraphEntity. + gxf_uid_t eid = (op_eid_ == 0) ? op->initialize_graph_entity(context_, entity_prefix_) : op_eid_; - gxf_uid_t codelet_cid; // Create Codelet component if `op_cid_` is 0 - if (op_cid_ == 0) { - gxf_tid_t codelet_tid; - HOLOSCAN_GXF_CALL(GxfComponentTypeId(context_, codelet_typename, &codelet_tid)); - HOLOSCAN_GXF_CALL_FATAL( - GxfComponentAdd(context_, eid, codelet_tid, op->name().c_str(), &codelet_cid)); - - // Set the operator to the GXFWrapper if it is a native operator - if (is_native_operator) { - holoscan::gxf::GXFWrapper* gxf_wrapper = nullptr; - HOLOSCAN_GXF_CALL_FATAL(GxfComponentPointer( - context_, codelet_cid, codelet_tid, reinterpret_cast(&gxf_wrapper))); - if (gxf_wrapper) { - gxf_wrapper->set_operator(op); - } else { - HOLOSCAN_LOG_ERROR("Unable to get GXFWrapper for Operator '{}'", op->name()); - } - } else { - // Set the entity id - gxf_op->gxf_eid(eid); - // Set the codelet component id - gxf_op->gxf_cid(codelet_cid); - } - } else { - codelet_cid = op_cid_; - } + gxf_uid_t codelet_cid = (op_cid_ == 0) ? op->add_codelet_to_graph_entity() : op_cid_; // Set GXF Codelet ID as the ID of the operator op->id(codelet_cid); @@ -1556,68 +1595,17 @@ bool GXFExecutor::initialize_operator(Operator* op) { fragment(), context_, eid, io_spec.get(), op_eid_ != 0, op); } - // Create Components for condition - for (const auto& [name, condition] : op->conditions()) { - auto gxf_condition = std::dynamic_pointer_cast(condition); - // Initialize GXF component if it is not already initialized. - if (gxf_condition != nullptr && gxf_condition->gxf_context() == nullptr) { - gxf_condition->fragment(fragment()); - - gxf_condition->gxf_eid(eid); // set GXF entity id - } - // Initialize condition - gxf_condition->initialize(); - } - - // Create Components for resource - for (const auto& [name, resource] : op->resources()) { - auto gxf_resource = std::dynamic_pointer_cast(resource); - // Initialize GXF component if it is not already initialized. - if (gxf_resource != nullptr && gxf_resource->gxf_context() == nullptr) { - gxf_resource->fragment(fragment()); + HOLOSCAN_LOG_TRACE("Configuring operator: {}", op->name()); - gxf_resource->gxf_eid(eid); // set GXF entity id - } - // Initialize resource - resource->initialize(); - } + // add Component(s) and/or Resource(s) added as Arg/ArgList to the graph entity + add_component_args_to_graph_entity(op->args(), op->graph_entity()); - // Set arguments - auto& params = spec.params(); - for (auto& arg : op->args()) { - // Find if arg.name() is in spec_->params() - if (params.find(arg.name()) == params.end()) { - HOLOSCAN_LOG_WARN("Argument '{}' is not defined in spec", arg.name()); - continue; - } + // Initialize components and resources (and add any GXF components to the Operator's graph_entity) + op->initialize_conditions(); + op->initialize_resources(); - // Set arg.value() to spec_->params()[arg.name()] - auto& param_wrap = params[arg.name()]; - - HOLOSCAN_LOG_TRACE("GXFOperator '{}':: setting argument '{}'", op->name(), arg.name()); - - ArgumentSetter::set_param(param_wrap, arg); - } - - // Set Handler parameters if it is an operator that wraps an existing GXF Codelet. - if (!is_native_operator) { - // Set Handler parameters - for (auto& [key, param_wrap] : params) { - HOLOSCAN_GXF_CALL_WARN_MSG(::holoscan::gxf::GXFParameterAdaptor::set_param( - context_, codelet_cid, key.c_str(), param_wrap), - "GXFOperator '{}':: failed to set GXF parameter '{}'", - op->name(), - key); - HOLOSCAN_LOG_TRACE("GXFOperator '{}':: setting GXF parameter '{}'", op->name(), key); - } - } else { - // Set only default parameter values - for (auto& [key, param_wrap] : params) { - // If no value is specified, the default value will be used by setting an empty argument. - Arg empty_arg(""); - ArgumentSetter::set_param(param_wrap, empty_arg); - } - } + // Set any parameters based on the specified arguments and parameter value defaults. + op->set_parameters(); return true; } @@ -1633,10 +1621,6 @@ bool GXFExecutor::add_receivers(const std::shared_ptr& op, const std::string& new_input_label = fmt::format("{}:{}", receivers_name, iospec_vector.size()); HOLOSCAN_LOG_TRACE("add_receivers: Creating new input port with label '{}'", new_input_label); auto& input_port = downstream_op_spec->input(new_input_label); - // TODO: Currently, there is no convenient API to set the condition of the receivers (input - // ports) - // from the setup() method of the operator. We need to add a new API to set the condition - // of the receivers (input ports) from the setup() method of the operator. // Add the new input port to the vector. iospec_vector.push_back(&input_port); @@ -1650,6 +1634,20 @@ bool GXFExecutor::add_receivers(const std::shared_ptr& op, return true; } +bool GXFExecutor::is_holoscan() const { + bool zero_eid = (op_eid_ == 0); + bool zero_cid = (op_cid_ == 0); + if (zero_eid ^ zero_cid) { + // Both will be zero for Holoscan applications, but nonzero for GXF applications + HOLOSCAN_LOG_ERROR( + "Both op_eid_ and op_cid_ should be zero or nonzero. op_eid_: {}, op_cid_: {}", + op_eid_, + op_cid_); + return false; + } + return zero_eid && zero_cid; +} + bool GXFExecutor::initialize_gxf_graph(OperatorGraph& graph) { if (is_gxf_graph_initialized_) { HOLOSCAN_LOG_WARN("GXF graph is already initialized. Skipping initialization."); @@ -1684,16 +1682,27 @@ bool GXFExecutor::initialize_gxf_graph(OperatorGraph& graph) { std::scoped_lock lock{gxf_execution_mutex}; // Additional setup for GXF Application - gxf_uid_t eid; const std::string utility_entity_name = fmt::format("{}_holoscan_util_entity", entity_prefix_); - const GxfEntityCreateInfo entity_create_info = {utility_entity_name.c_str(), - GXF_ENTITY_CREATE_PROGRAM_BIT}; - HOLOSCAN_GXF_CALL_FATAL(GxfCreateEntity(context, &entity_create_info, &eid)); + util_entity_ = std::make_shared(); + auto maybe = util_entity_->setup(context, utility_entity_name.c_str()); + if (!maybe) { + throw std::runtime_error( + fmt::format("Failed to create utility entity: '{}'", utility_entity_name)); + } + gxf_uid_t eid = util_entity_->eid(); + + connections_entity_ = std::make_shared(); + const std::string connections_entity_name = + fmt::format("{}_holoscan_connections_entity", entity_prefix_); + connections_entity_ = std::make_shared(); + maybe = connections_entity_->setup(context_, connections_entity_name.c_str()); + if (!maybe) { + throw std::runtime_error( + "Failed to create entity to hold nvidia::gxf::Connection components."); + } auto scheduler = std::dynamic_pointer_cast(fragment_->scheduler()); - // have to set the application eid before initialize() can be called - scheduler->gxf_eid(eid); - scheduler->initialize(); + scheduler->initialize(); // will call GXFExecutor::initialize_scheduler // Initialize the fragment and its operators if (!initialize_fragment()) { @@ -1703,21 +1712,12 @@ bool GXFExecutor::initialize_gxf_graph(OperatorGraph& graph) { // If DFFT is on, then attach the DFFTCollector EntityMonitor to the main entity if (fragment_->data_flow_tracker()) { - gxf_tid_t monitor_tid; - HOLOSCAN_GXF_CALL_FATAL(GxfComponentTypeId(context, "holoscan::DFFTCollector", &monitor_tid)); - - gxf_uid_t monitor_cid; - HOLOSCAN_GXF_CALL_FATAL( - GxfComponentAdd(context, eid, monitor_tid, "dft_tracker", &monitor_cid)); - - holoscan::DFFTCollector* dfft_collector_ptr = nullptr; - HOLOSCAN_GXF_CALL_FATAL(GxfComponentPointer( - context, monitor_cid, monitor_tid, reinterpret_cast(&dfft_collector_ptr))); - if (!dfft_collector_ptr) { - throw std::runtime_error( - fmt::format("Unable to retrieve holoscan::DFFTCollector pointer.")); + auto dft_tracker_handle = util_entity_->add("dft_tracker", {}); + if (dft_tracker_handle.is_null()) { + throw std::runtime_error(fmt::format("Unable to add holoscan::DFFTCollector component.")); } + holoscan::DFFTCollector* dfft_collector_ptr = dft_tracker_handle.get(); dfft_collector_ptr->data_flow_tracker(fragment_->data_flow_tracker()); // Identify leaf and root operators and add to the DFFTCollector object @@ -1730,14 +1730,6 @@ bool GXFExecutor::initialize_gxf_graph(OperatorGraph& graph) { } } - // Cache TIDs for UcxReceiver and UcxTransmitter - gxf_tid_t ucx_receiver_tid = GxfTidNull(); - gxf_tid_t ucx_transmitter_tid = GxfTidNull(); - HOLOSCAN_GXF_CALL_FATAL( - GxfComponentTypeId(context, "nvidia::gxf::UcxReceiver", &ucx_receiver_tid)); - HOLOSCAN_GXF_CALL_FATAL( - GxfComponentTypeId(context, "nvidia::gxf::UcxTransmitter", &ucx_transmitter_tid)); - // network context initialization after connection entities were created (see GXF's program.cpp) if (fragment_->network_context()) { HOLOSCAN_LOG_DEBUG("GXFExecutor::run: initializing NetworkContext"); @@ -1750,26 +1742,35 @@ bool GXFExecutor::initialize_gxf_graph(OperatorGraph& graph) { std::string entity_group_name = "network_entity_group"; auto entity_group_gid = ::holoscan::gxf::add_entity_group(context_, entity_group_name); - std::string device_entity_name = fmt::format("{}gpu_device_entity", entity_prefix_); - std::string device_component_name = "gpu_device_component"; - auto [gpu_device_tid, gpu_device_eid] = - ::holoscan::gxf::create_gpu_device_entity(context, device_entity_name); - int32_t gpu_id = static_cast(AppDriver::get_int_env_var("HOLOSCAN_UCX_DEVICE_ID", 0)); - ::holoscan::gxf::create_gpu_device_component( - context, gpu_device_tid, gpu_device_eid, device_component_name, gpu_id); + std::string device_entity_name = fmt::format("{}gpu_device_entity", entity_prefix_); + gpu_device_entity_ = std::make_shared(); + auto maybe = gpu_device_entity_->setup(context, device_entity_name.c_str()); + if (!maybe) { + throw std::runtime_error( + fmt::format("Failed to create GPU device entity: '{}'", device_entity_name)); + } + // TODO (GXF4): should have an addResource to add to resources_ member instead of components_? + auto device_handle = gpu_device_entity_->addComponent( + "nvidia::gxf::GPUDevice", "gpu_device_component", {nvidia::gxf::Arg("dev_id", gpu_id)}); + if (device_handle.is_null()) { + HOLOSCAN_LOG_ERROR("Failed to create GPU device resource for device {}", gpu_id); + } // Note: GxfUpdateEntityGroup // calls Runtime::GxfUpdateEntityGroup(gid, eid) - // which calls EntityGroups::groupAddEntity(gid, eid); (entity_groups_ in SharedContext) - // which calls EntityGroupItem::addEntity for the EntityGroupItem corresponding to gid + // which calls EntityGroups::groupAddEntity(gid, eid); (entity_groups_ in + // SharedContext) + // which calls EntityGroupItem::addEntity for the EntityGroupItem corresponding to + // gid // any eid corresponding to a ResourceBase class like GPUDevice or ThreadPool is // stored in internal resources_ vector // all other eid are stored in the entities vector // add GPUDevice resource to the networking entity group - GXF_ASSERT_SUCCESS(GxfUpdateEntityGroup(context_, entity_group_gid, gpu_device_eid)); + GXF_ASSERT_SUCCESS( + GxfUpdateEntityGroup(context_, entity_group_gid, gpu_device_entity_->eid())); // add the network context to the entity group auto gxf_network_context = @@ -1799,16 +1800,10 @@ bool GXFExecutor::initialize_gxf_graph(OperatorGraph& graph) { } // Add implicit broadcast entities to the network entity group if they have a UCX connector - for (auto& broadcast_eid : implicit_broadcast_entities_) { - bool has_ucx_connector = false; - - if (gxf::has_component(context, broadcast_eid, ucx_receiver_tid) || - gxf::has_component(context, broadcast_eid, ucx_transmitter_tid)) { - has_ucx_connector = true; - } - + for (auto& broadcast_entity : implicit_broadcast_entities_) { // Add the entity to the entity group if it has a UCX connector - if (has_ucx_connector) { + if (has_ucx_connector(broadcast_entity)) { + auto broadcast_eid = broadcast_entity->eid(); HOLOSCAN_LOG_DEBUG("Adding implicit broadcast eid '{}' to entity group '{}'", broadcast_eid, entity_group_gid); @@ -1818,36 +1813,19 @@ bool GXFExecutor::initialize_gxf_graph(OperatorGraph& graph) { } else { HOLOSCAN_LOG_DEBUG("GXFExecutor::run: no NetworkContext to initialize"); - // Loop through all operator ports and raise an error if any are UCX-based. - // (UCX-based connections require a UcxContext). + const std::string ucx_error_msg{ + "UCX-based connection found, but there is no NetworkContext."}; + + // Raise an error if any operator has a UCX connector. auto operator_graph = static_cast(fragment_->graph()); for (auto& node : operator_graph.get_nodes()) { - auto op_spec = node->spec(); - for (const auto& [_, io_spec] : op_spec->inputs()) { - if (io_spec->connector_type() == IOSpec::ConnectorType::kUCX) { - throw std::runtime_error("UCX-based connection found, but there is no NetworkContext."); - } - } - for (const auto& [_, io_spec] : op_spec->outputs()) { - if (io_spec->connector_type() == IOSpec::ConnectorType::kUCX) { - throw std::runtime_error("UCX-based connection found, but there is no NetworkContext."); - } - } + if (node->has_ucx_connector()) { throw std::runtime_error(ucx_error_msg); } } - // Find any implicit broadcast entities with a UCX connector and raise an error. - for (auto& broadcast_eid : implicit_broadcast_entities_) { - bool has_ucx_connector = false; - - if (gxf::has_component(context, broadcast_eid, ucx_receiver_tid) || - gxf::has_component(context, broadcast_eid, ucx_transmitter_tid)) { - has_ucx_connector = true; - } - + // Raise an error if any broadcast entity has a UCX connector + for (auto& broadcast_entity : implicit_broadcast_entities_) { // Add the entity to the entity group if it has a UCX connector - if (has_ucx_connector) { - throw std::runtime_error("UCX-based connection found, but there is no NetworkContext."); - } + if (has_ucx_connector(broadcast_entity)) { throw std::runtime_error(ucx_error_msg); } } } } @@ -1867,7 +1845,7 @@ void GXFExecutor::activate_gxf_graph() { } } -bool GXFExecutor::run_gxf_graph() { +void GXFExecutor::run_gxf_graph() { auto context = context_; // Install signal handler @@ -1890,25 +1868,37 @@ bool GXFExecutor::run_gxf_graph() { SignalHandler::register_signal_handler(context, SIGTERM, sig_handler); // Run the graph + auto frag_name_display = fragment_->name(); + if (!frag_name_display.empty()) { frag_name_display = "[" + frag_name_display + "] "; } activate_gxf_graph(); - HOLOSCAN_LOG_INFO("Running Graph..."); + HOLOSCAN_LOG_INFO("{}Running Graph...", frag_name_display); HOLOSCAN_GXF_CALL_FATAL(GxfGraphRunAsync(context)); - HOLOSCAN_LOG_INFO("Waiting for completion..."); - HOLOSCAN_LOG_INFO("Graph execution waiting. Fragment: {}", fragment_->name()); + HOLOSCAN_LOG_INFO("{}Waiting for completion...", frag_name_display); auto wait_result = HOLOSCAN_GXF_CALL_WARN(GxfGraphWait(context)); - if (wait_result != GXF_SUCCESS) { - // Usually the graph is already deactivated when GxfGraphWait() fails. - is_gxf_graph_activated_ = false; - HOLOSCAN_LOG_ERROR("GxfGraphWait Error: {}", GxfResultStr(wait_result)); - throw RuntimeError(ErrorCode::kFailure, "Failed to wait for graph to complete"); + if (wait_result == GXF_SUCCESS) { + HOLOSCAN_LOG_INFO("{}Deactivating Graph...", frag_name_display); + // Usually the graph is already deactivated by the GXF framework (program.cpp) + // when GxfGraphWait() fails. + HOLOSCAN_GXF_CALL_WARN(GxfGraphDeactivate(context)); } - - HOLOSCAN_LOG_INFO("Graph execution deactivating. Fragment: {}", fragment_->name()); - HOLOSCAN_LOG_INFO("Deactivating Graph..."); - HOLOSCAN_GXF_CALL_WARN(GxfGraphDeactivate(context)); is_gxf_graph_activated_ = false; - HOLOSCAN_LOG_INFO("Graph execution finished. Fragment: {}", fragment_->name()); - return true; + + // TODO: do we want to move the log level of these info messages to debug? + HOLOSCAN_LOG_INFO("{}Graph execution finished.", frag_name_display); + + // clean up any shared pointers to graph entities within operators, scheulder, network context + fragment_->reset_graph_entities(); + + if (wait_result != GXF_SUCCESS) { + const std::string error_msg = + fmt::format("{}Graph execution error: {}", frag_name_display, GxfResultStr(wait_result)); + HOLOSCAN_LOG_ERROR(error_msg); + auto& stored_exception = exception_; + if (stored_exception) { + // Rethrow the stored exception if there is one + std::rethrow_exception(stored_exception); + } + } } bool GXFExecutor::connection_items( @@ -1954,9 +1944,6 @@ void GXFExecutor::register_extensions() { "GXF wrapper to support Holoscan SDK native operators"); extension_factory.add_type("Holoscan message type", {0x61510ca06aa9493b, 0x8a777d0bf87476b7}); - - extension_factory.add_component( - "Holoscan's GXF Tensor type", {0xa02945eaf20e418c, 0x8e6992b68672ce40}); extension_factory.add_type("Holoscan's Tensor type", {0xa5eb0ed57d7f4aa2, 0xb5865ccca0ef955c}); @@ -1992,83 +1979,39 @@ bool GXFExecutor::initialize_scheduler(Scheduler* sch) { } gxf::GXFScheduler* gxf_sch = static_cast(sch); - - auto& spec = *(sch->spec()); - - gxf_uid_t eid = 0; - // Create Entity for the scheduler if `op_eid_` is 0 - if (op_eid_ == 0) { + gxf_sch->gxf_context(context_); + + // op_eid_ and op_cid_ will only be nonzero if OperatorWrapper wraps a codelet created by GXF. + // (i.e. this executor belongs to a GXF application using a Holoscan operator as a codelet) + // In that case GXF we do not create a GraphEntity or a Component for the scheduler. + gxf_uid_t eid = op_eid_; + gxf_uid_t scheduler_cid = op_cid_; + if (is_holoscan()) { const std::string scheduler_entity_name = fmt::format("{}{}", entity_prefix_, sch->name()); - const GxfEntityCreateInfo entity_create_info = {scheduler_entity_name.c_str(), - GXF_ENTITY_CREATE_PROGRAM_BIT}; - HOLOSCAN_GXF_CALL_MSG_FATAL( - GxfCreateEntity(context_, &entity_create_info, &eid), - "Unable to create GXF entity for scheduler '{}'. Please check if the " - "scheduler name is unique.", - sch->name()); - } else { - eid = op_eid_; - } - - gxf_uid_t scheduler_cid; - // Create Codelet component if `op_cid_` is 0 - if (op_cid_ == 0) { - gxf_tid_t scheduler_tid; - HOLOSCAN_GXF_CALL_FATAL(GxfComponentTypeId(context_, gxf_sch->gxf_typename(), &scheduler_tid)); - HOLOSCAN_GXF_CALL_FATAL( - GxfComponentAdd(context_, eid, scheduler_tid, sch->name().c_str(), &scheduler_cid)); - - // Set the entity id - gxf_sch->gxf_eid(eid); - // Set the scheduler component id - gxf_sch->gxf_cid(scheduler_cid); - } else { - scheduler_cid = op_cid_; - } - - // Set GXF Scheduler ID as the ID of the scheduler - sch->id(scheduler_cid); - - // Create Components for resource - for (const auto& [name, resource] : sch->resources()) { - auto gxf_resource = std::dynamic_pointer_cast(resource); - // Initialize GXF component if it is not already initialized. - if (gxf_resource->gxf_context() == nullptr) { - gxf_resource->fragment(fragment()); - - gxf_resource->gxf_eid(eid); // set GXF entity id - gxf_resource->initialize(); - } - } - - // Set arguments - auto& params = spec.params(); - for (auto& arg : sch->args()) { - // Find if arg.name() is in spec_->params() - if (params.find(arg.name()) == params.end()) { - HOLOSCAN_LOG_WARN("Argument '{}' is not defined in spec", arg.name()); - continue; + scheduler_entity_ = std::make_shared(); + auto maybe = scheduler_entity_->setup(context_, scheduler_entity_name.c_str()); + if (!maybe) { + throw std::runtime_error( + fmt::format("Failed to create entity for scheduler: '{}'", scheduler_entity_name)); } + eid = scheduler_entity_->eid(); + // Set the entity id and graph entity shared pointer + gxf_sch->gxf_graph_entity(scheduler_entity_); + gxf_sch->gxf_eid(eid); - // Set arg.value() to spec_->params()[arg.name()] - auto& param_wrap = params[arg.name()]; + // Create Scheduler component + gxf_sch->gxf_initialize(); + scheduler_cid = gxf_sch->gxf_cid(); - HOLOSCAN_LOG_TRACE("GXFScheduler '{}':: setting argument '{}'", sch->name(), arg.name()); + // initialize all GXF resources and assign them to a graph entity + initialize_gxf_resources(sch->resources(), eid, scheduler_entity_); - ArgumentSetter::set_param(param_wrap, arg); + // Set any parameters based on the specified arguments and parameter value defaults. + add_component_args_to_graph_entity(sch->args(), scheduler_entity_); + sch->set_parameters(); } - - // Set Handler parameters - for (auto& [key, param_wrap] : params) { - HOLOSCAN_LOG_TRACE("GXFScheduler '{}':: setting GXF parameter '{}'", sch->name(), key); - HOLOSCAN_GXF_CALL_WARN_MSG(::holoscan::gxf::GXFParameterAdaptor::set_param( - context_, scheduler_cid, key.c_str(), param_wrap), - "GXFScheduler '{}':: failed to set GXF parameter '{}'", - sch->name(), - key); - // TODO: handle error - } - + // Set GXF Scheduler ID as the ID of the scheduler + sch->id(scheduler_cid); return true; } @@ -2080,88 +2023,155 @@ bool GXFExecutor::initialize_network_context(NetworkContext* network_context) { gxf::GXFNetworkContext* gxf_network_context = static_cast(network_context); - - auto& spec = *(network_context->spec()); - - gxf_uid_t eid = 0; - - // Create Entity for the network_context if `op_eid_` is 0 - if (op_eid_ == 0) { + gxf_network_context->gxf_context(context_); + + // op_eid_ and op_cid_ will only be nonzero if OperatorWrapper wraps a codelet created by GXF. + // (i.e. this executor belongs to a GXF application using a Holoscan operator as a codelet) + // In that case GXF we do not create a GraphEntity or a Component for the network context. + gxf_uid_t eid = op_eid_; + gxf_uid_t network_context_cid = op_cid_; + if (is_holoscan()) { const std::string network_context_entity_name = fmt::format("{}{}", entity_prefix_, network_context->name()); - const GxfEntityCreateInfo entity_create_info = {network_context_entity_name.c_str(), - GXF_ENTITY_CREATE_PROGRAM_BIT}; - HOLOSCAN_GXF_CALL_MSG_FATAL( - GxfCreateEntity(context_, &entity_create_info, &eid), - "Unable to create GXF entity for scheduler '{}'. Please check if the " - "scheduler name is unique.", - network_context->name()); - } else { - eid = op_eid_; - } - - gxf_uid_t network_context_cid; - // Create Codelet component if `op_cid_` is 0 - if (op_cid_ == 0) { - gxf_tid_t network_context_tid; - HOLOSCAN_GXF_CALL_FATAL( - GxfComponentTypeId(context_, gxf_network_context->gxf_typename(), &network_context_tid)); - HOLOSCAN_GXF_CALL_FATAL(GxfComponentAdd( - context_, eid, network_context_tid, network_context->name().c_str(), &network_context_cid)); - - // Set the entity id + // TODO (GXF4): add way to check error code and throw runtime_error if setup call failed + network_context_entity_ = std::make_shared(); + auto maybe = network_context_entity_->setup(context_, network_context_entity_name.c_str()); + if (!maybe) { + throw std::runtime_error(fmt::format("Failed to create entity for network context: '{}'", + network_context_entity_name)); + } + eid = network_context_entity_->eid(); + // Set the entity id and graph entity shared pointer + gxf_network_context->gxf_graph_entity(network_context_entity_); gxf_network_context->gxf_eid(eid); - // Set the network_context component id - gxf_network_context->gxf_cid(network_context_cid); - } else { - network_context_cid = op_cid_; - } + // Create NetworkContext component + gxf_network_context->gxf_initialize(); + network_context_cid = gxf_network_context->gxf_cid(); + + // initialize all GXF resources and assign them to a graph entity + initialize_gxf_resources(network_context->resources(), eid, network_context_entity_); + + // Set any parameters based on the specified arguments and parameter value defaults. + add_component_args_to_graph_entity(network_context->args(), network_context_entity_); + network_context->set_parameters(); + } // Set GXF NetworkContext ID as the ID of the network_context network_context->id(network_context_cid); + return true; +} - // Create Components for resource - for (const auto& [name, resource] : network_context->resources()) { - auto gxf_resource = std::dynamic_pointer_cast(resource); - // Initialize GXF component if it is not already initialized. - if (gxf_resource->gxf_context() == nullptr) { - gxf_resource->fragment(fragment()); +bool GXFExecutor::add_condition_to_graph_entity( + std::shared_ptr condition, std::shared_ptr graph_entity) { + if (condition && graph_entity) { + add_component_args_to_graph_entity(condition->args(), graph_entity); + auto gxf_condition = std::dynamic_pointer_cast(condition); - gxf_resource->gxf_eid(eid); // set GXF entity id - gxf_resource->initialize(); + // do not overwrite previous graph entity if this condition is already associated with one + if (gxf_condition && !gxf_condition->gxf_graph_entity()) { + HOLOSCAN_LOG_TRACE( + "Adding Condition '{}' to graph entity '{}'", condition->name(), graph_entity->name()); + gxf_condition->gxf_eid(graph_entity->eid()); + gxf_condition->gxf_graph_entity(graph_entity); + // Don't have to call initialize() here, ArgumentSetter already calls it later. + return true; + } else { + // Non-GXF condition isn't supported, so log an error if this unexpected path is reached. + HOLOSCAN_LOG_ERROR("Failed to cast condition '{}' to holoscan::gxf::GXFCondition", + condition->name()); } } + return false; +} - // Set arguments - auto& params = spec.params(); - for (auto& arg : network_context->args()) { - // Find if arg.name() is in spec_->params() - if (params.find(arg.name()) == params.end()) { - HOLOSCAN_LOG_WARN("Argument '{}' is not defined in spec", arg.name()); - continue; - } - - // Set arg.value() to spec_->params()[arg.name()] - auto& param_wrap = params[arg.name()]; - - HOLOSCAN_LOG_TRACE( - "GXFNetworkContext '{}':: setting argument '{}'", network_context->name(), arg.name()); +bool GXFExecutor::add_resource_to_graph_entity( + std::shared_ptr resource, std::shared_ptr graph_entity) { + if (resource && graph_entity) { + add_component_args_to_graph_entity(resource->args(), graph_entity); + // Native Resources will not be added to the GraphEntity + auto gxf_resource = std::dynamic_pointer_cast(resource); - ArgumentSetter::set_param(param_wrap, arg); + // do not overwrite previous graph entity if this resource is already associated with one + // (e.g. sometimes the same allocator may be used across multiple operators) + if (gxf_resource && !gxf_resource->gxf_graph_entity()) { + HOLOSCAN_LOG_TRACE( + "Adding Resource '{}' to graph entity '{}'", resource->name(), graph_entity->name()); + gxf_resource->gxf_eid(graph_entity->eid()); + gxf_resource->gxf_graph_entity(graph_entity); + // Don't have to call initialize() here, ArgumentSetter already calls it later. + return true; + } } + return false; +} - // Set Handler parameters - for (auto& [key, param_wrap] : params) { - HOLOSCAN_LOG_TRACE( - "GXFNetworkContext '{}':: setting GXF parameter '{}'", network_context->name(), key); - HOLOSCAN_GXF_CALL_WARN_MSG(::holoscan::gxf::GXFParameterAdaptor::set_param( - context_, network_context_cid, key.c_str(), param_wrap), - "GXFNetworkContext '{}':: failed to set GXF parameter '{}'", - network_context->name(), - key); +bool GXFExecutor::add_iospec_to_graph_entity( + IOSpec* io_spec, std::shared_ptr graph_entity) { + if (!io_spec || !graph_entity) { return false; } + auto resource = io_spec->connector(); + bool overall_status = false; + if (!resource) { + HOLOSCAN_LOG_ERROR("IOSpec: failed to cast io_spec->connector() to GXFResource"); + return overall_status; + } + overall_status = add_resource_to_graph_entity(resource, graph_entity); + if (!overall_status) { + HOLOSCAN_LOG_ERROR("IOSpec: failed to add connector '{}' to graph entity", resource->name()); + } + for (auto& [_, condition] : io_spec->conditions()) { + bool condition_status = add_condition_to_graph_entity(condition, graph_entity); + if (!condition_status) { + HOLOSCAN_LOG_ERROR("IOSpec: failed to add connector '{}' to graph entity", condition->name()); + } + overall_status = overall_status && condition_status; } + return overall_status; +} - return true; +void GXFExecutor::add_component_args_to_graph_entity( + std::vector& args, std::shared_ptr graph_entity) { + for (auto& arg : args) { + auto arg_type = arg.arg_type(); + auto element_type = arg_type.element_type(); + if ((element_type != ArgElementType::kResource) && + (element_type != ArgElementType::kCondition) && (element_type != ArgElementType::kIOSpec)) { + continue; + } + auto container_type = arg_type.container_type(); + if ((container_type != ArgContainerType::kNative) && + (container_type != ArgContainerType::kVector)) { + HOLOSCAN_LOG_ERROR( + "Error setting GXF entity for argument '{}': Operator currently only supports scalar and " + "vector containers for arguments of Condition, Resource or IOSpec type.", + arg.name()); + continue; + } + if (container_type == ArgContainerType::kNative) { + if (element_type == ArgElementType::kCondition) { + auto condition = std::any_cast>(arg.value()); + add_condition_to_graph_entity(condition, graph_entity); + } else if (element_type == ArgElementType::kResource) { + auto resource = std::any_cast>(arg.value()); + add_resource_to_graph_entity(resource, graph_entity); + } else if (element_type == ArgElementType::kIOSpec) { + auto io_spec = std::any_cast(arg.value()); + add_iospec_to_graph_entity(io_spec, graph_entity); + } + } else if (container_type == ArgContainerType::kVector) { + if (element_type == ArgElementType::kCondition) { + auto conditions = std::any_cast>>(arg.value()); + for (auto& condition : conditions) { + add_condition_to_graph_entity(condition, graph_entity); + } + } else if (element_type == ArgElementType::kResource) { + auto resources = std::any_cast>>(arg.value()); + for (auto& resource : resources) { add_resource_to_graph_entity(resource, graph_entity); } + } else if (element_type == ArgElementType::kIOSpec) { + auto io_specs = std::any_cast>(arg.value()); + for (auto& io_spec : io_specs) { add_iospec_to_graph_entity(io_spec, graph_entity); } + } + } + } } } // namespace holoscan::gxf diff --git a/src/core/executors/gxf/gxf_parameter_adaptor.cpp b/src/core/executors/gxf/gxf_parameter_adaptor.cpp index 39143317..1f784be5 100644 --- a/src/core/executors/gxf/gxf_parameter_adaptor.cpp +++ b/src/core/executors/gxf/gxf_parameter_adaptor.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,6 @@ #include #include - namespace holoscan::gxf { GXFParameterAdaptor& GXFParameterAdaptor::get_instance() { diff --git a/src/core/fragment.cpp b/src/core/fragment.cpp index 03698425..a4714e97 100644 --- a/src/core/fragment.cpp +++ b/src/core/fragment.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -35,6 +35,8 @@ #include "holoscan/core/executors/gxf/gxf_executor.hpp" #include "holoscan/core/graphs/flow_graph.hpp" #include "holoscan/core/operator.hpp" +#include "holoscan/core/gxf/gxf_network_context.hpp" +#include "holoscan/core/gxf/gxf_scheduler.hpp" #include "holoscan/core/schedulers/gxf/greedy_scheduler.hpp" using std::string_literals::operator""s; @@ -165,9 +167,7 @@ std::unordered_set nested_yaml_map_keys_(YAML::Node yaml_node) { keys.emplace(key); if (value.IsMap()) { std::unordered_set inner_keys = nested_yaml_map_keys_(it->second); - for (const auto& inner_key : inner_keys) { - keys.emplace(key + "."s + inner_key); - } + for (const auto& inner_key : inner_keys) { keys.emplace(key + "."s + inner_key); } } } return keys; @@ -182,9 +182,7 @@ std::unordered_set Fragment::config_keys() { for (const auto& yaml_node : yaml_nodes) { if (yaml_node.IsMap()) { auto node_keys = nested_yaml_map_keys_(yaml_node); - for (const auto& k : node_keys) { - all_keys.insert(k); - } + for (const auto& k : node_keys) { all_keys.insert(k); } } } return all_keys; @@ -492,9 +490,9 @@ void Fragment::compose_graph() { // Protect against the case where no add_operator or add_flow calls were made if (!graph_) { HOLOSCAN_LOG_ERROR(fmt::format( - "Fragment '{}' does not have any operators. Please check that there is at least one call to" - "`add_operator` or `add_flow` during `Fragment::compose`.", - name())); + "Fragment '{}' does not have any operators. Please check that there is at least one call to" + "`add_operator` or `add_flow` during `Fragment::compose`.", + name())); graph(); } } @@ -540,4 +538,15 @@ FragmentPortMap Fragment::port_info() const { return fragment_port_info; } +void Fragment::reset_graph_entities() { + // Explicitly clean up graph entities. This is necessary for Python apps, because the Python + // object lifetime may outlive the Application runtime and these must be released prior to the + // call to `GxfContextDestroy` to avoid a segfault in the `nvidia::gxf::GraphEntity` destructor. + for (auto& op : graph().get_nodes()) { op->reset_graph_entities(); } + auto gxf_sch = std::dynamic_pointer_cast(scheduler()); + if (gxf_sch) { gxf_sch->reset_graph_entities(); } + auto gxf_network_context = std::dynamic_pointer_cast(network_context()); + if (gxf_network_context) { gxf_network_context->reset_graph_entities(); } +} + } // namespace holoscan diff --git a/src/core/graphs/flow_graph.cpp b/src/core/graphs/flow_graph.cpp index 5a1ffef1..a8b1c389 100644 --- a/src/core/graphs/flow_graph.cpp +++ b/src/core/graphs/flow_graph.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -59,41 +59,9 @@ void FlowGraph::add_node(const NodeT& node) { template void FlowGraph::add_flow(const NodeType& node_u, const NodeType& node_v, const EdgeDataType& port_map) { - if (succ_.find(node_u) == succ_.end()) { - if (!node_u) { - HOLOSCAN_LOG_ERROR("Calling add_flow() with nullptr (node_u is nullptr)"); - return; - } - // If there is already a node with the same name, it will raise an error. - if (name_map_.find(node_u->name()) != name_map_.end()) { - HOLOSCAN_LOG_ERROR("Calling add_flow() with a node ('{}') that has a duplicate name", - node_u->name()); - throw RuntimeError(ErrorCode::kDuplicateName); - } - - succ_[node_u] = std::unordered_map(); - pred_[node_u] = std::unordered_map(); - ordered_nodes_.push_back(node_u); - name_map_[node_u->name()] = node_u; - } - if (succ_.find(node_v) == succ_.end()) { - if (!node_v) { - HOLOSCAN_LOG_ERROR("Calling add_flow() with nullptr (node_v is nullptr)"); - return; - } - // If there is already a node with the same name, it will raise an error. - if (name_map_.find(node_v->name()) != name_map_.end()) { - HOLOSCAN_LOG_ERROR("Calling add_flow() with a node ('{}') that has a duplicate name", - node_v->name()); - throw RuntimeError(ErrorCode::kDuplicateName); - } - - succ_[node_v] = std::unordered_map(); - pred_[node_v] = std::unordered_map(); - ordered_nodes_.push_back(node_v); - name_map_[node_v->name()] = node_v; - } - + // Note: add_node does nothing if the node was already added + add_node(node_u); + add_node(node_v); auto it_edgedata = succ_[node_u].find(node_v); if (it_edgedata != succ_[node_u].end()) { const auto& datadict = it_edgedata->second; diff --git a/src/core/gxf/gxf_component.cpp b/src/core/gxf/gxf_component.cpp new file mode 100644 index 00000000..a928a9fd --- /dev/null +++ b/src/core/gxf/gxf_component.cpp @@ -0,0 +1,145 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "holoscan/core/gxf/gxf_component.hpp" + +#include + +#include +#include +#include + +#include "gxf/app/arg.hpp" +#include "gxf/app/graph_entity.hpp" +#include "holoscan/core/component_spec.hpp" +#include "holoscan/core/executors/gxf/gxf_executor.hpp" +#include "holoscan/core/fragment.hpp" +#include "holoscan/core/gxf/gxf_utils.hpp" + +namespace holoscan::gxf { + +namespace { + +nvidia::gxf::Handle add_component_to_graph_entity( + gxf_context_t context, std::shared_ptr graph_entity, + const char* type_name, const char* name, const std::vector& arg_list = {}) { + auto null_component = nvidia::gxf::Handle::Null(); + gxf_tid_t derived_tid = GxfTidNull(); + gxf_tid_t base_tid = GxfTidNull(); + bool is_derived = false; + gxf_result_t result; + result = GxfComponentTypeId(context, type_name, &derived_tid); + if (result != GXF_SUCCESS) { return null_component; } + result = GxfComponentTypeId(context, "nvidia::gxf::Codelet", &base_tid); + if (result != GXF_SUCCESS) { return null_component; } + result = GxfComponentIsBase(context, derived_tid, base_tid, &is_derived); + if (result != GXF_SUCCESS) { return null_component; } + if (is_derived) { return graph_entity->addCodelet(type_name, name); } + result = GxfComponentTypeId(context, "nvidia::gxf::SchedulingTerm", &base_tid); + if (result != GXF_SUCCESS) { return null_component; } + result = GxfComponentIsBase(context, derived_tid, base_tid, &is_derived); + if (result != GXF_SUCCESS) { return null_component; } + if (is_derived) { return graph_entity->addSchedulingTerm(type_name, name, arg_list); } + + // Commented out use of addTransmitter or addReceiver as this have additional restrictions or + // defaults in GXF that we don't currently want for Holoscan. Transmitters and receivers will + // just be added via addComponent below instead. + + // result = GxfComponentTypeId(context, "nvidia::gxf::Transmitter", &base_tid); + // if (result != GXF_SUCCESS) { return null_component; } + // result = GxfComponentIsBase(context, derived_tid, base_tid, &is_derived); + // if (result != GXF_SUCCESS) { return null_component; } + // bool omit_term = true; // do not automatically add a scheduling term for rx/tx + // if (is_derived) { return graph_entity->addTransmitter(type_name, name, arg_list, omit_term); } + // result = GxfComponentTypeId(context, "nvidia::gxf::Receiver", &base_tid); + // if (result != GXF_SUCCESS) { return null_component; } + // result = GxfComponentIsBase(context, derived_tid, base_tid, &is_derived); + // if (result != GXF_SUCCESS) { return null_component; } + // if (is_derived) { return graph_entity->addReceiver(type_name, name, arg_list, omit_term); } + + result = GxfComponentTypeId(context, "nvidia::gxf::Clock", &base_tid); + if (result != GXF_SUCCESS) { return null_component; } + result = GxfComponentIsBase(context, derived_tid, base_tid, &is_derived); + if (result != GXF_SUCCESS) { return null_component; } + if (is_derived) { return graph_entity->addClock(type_name, name, arg_list); } + result = GxfComponentTypeId(context, "nvidia::gxf::Component", &base_tid); + if (result != GXF_SUCCESS) { return null_component; } + result = GxfComponentIsBase(context, derived_tid, base_tid, &is_derived); + if (result != GXF_SUCCESS) { return null_component; } + if (is_derived) { return graph_entity->addComponent(type_name, name, arg_list); } + HOLOSCAN_LOG_ERROR("type_name {} is not of Component type", type_name); + return nvidia::gxf::Handle::Null(); +} + +} // namespace + +void GXFComponent::gxf_initialize() { + if (gxf_context_ == nullptr) { + HOLOSCAN_LOG_ERROR("Initializing with null GXF context"); + return; + } + if (gxf_eid_ == 0) { + HOLOSCAN_LOG_ERROR("Initializing with null GXF Entity"); + return; + } + + const char* type_name = gxf_typename(); + // set the type id + HOLOSCAN_GXF_CALL(GxfComponentTypeId(gxf_context_, type_name, &gxf_tid_)); + + if (gxf_graph_entity_) { + HOLOSCAN_LOG_TRACE("Initializing component '{}' in entity '{}' via GraphEntity", + gxf_cname_, + gxf_graph_entity_->eid()); + const char* name = gxf_cname_.c_str(); + auto handle = add_component_to_graph_entity(gxf_context_, gxf_graph_entity_, type_name, name); + if (handle.is_null()) { + HOLOSCAN_LOG_ERROR("Failed to add component '{}' of type: '{}'", name, type_name); + return; + } + gxf_component_ = handle; + gxf_cid_ = handle->cid(); + } else { + // TODO: make sure all components always get initialized via GraphEntity so we can + // remove this code path. Some cases such as passing Arg of type Condition or + // Resource to make_operator will currently still use this code path. + HOLOSCAN_LOG_TRACE( + "Initializing component '{}' in entity '{}' via GxfComponentAdd", gxf_cname_, gxf_eid_); + HOLOSCAN_GXF_CALL( + GxfComponentAdd(gxf_context_, gxf_eid_, gxf_tid_, gxf_cname().c_str(), &gxf_cid_)); + } + + // TODO: replace gxf_cptr_ with Handle? + HOLOSCAN_GXF_CALL( + GxfComponentPointer(gxf_context_, gxf_cid_, gxf_tid_, reinterpret_cast(&gxf_cptr_))); +} + +void GXFComponent::set_gxf_parameter(const std::string& component_name, const std::string& key, + ParameterWrapper& param_wrap) { + HOLOSCAN_LOG_TRACE("GXF component '{}' of type '{}': setting GXF parameter '{}'", + component_name, + gxf_typename(), + key); + HOLOSCAN_GXF_CALL_WARN_MSG(::holoscan::gxf::GXFParameterAdaptor::set_param( + gxf_context_, gxf_cid_, key.c_str(), param_wrap), + "component '{}':: failed to set GXF parameter '{}'", + component_name, + key); + // TODO: handle error +} + +} // namespace holoscan::gxf diff --git a/src/core/gxf/gxf_condition.cpp b/src/core/gxf/gxf_condition.cpp index 6cc3b456..fea5184d 100644 --- a/src/core/gxf/gxf_condition.cpp +++ b/src/core/gxf/gxf_condition.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,7 +19,9 @@ #include +#include #include +#include #include "holoscan/core/component_spec.hpp" #include "holoscan/core/executors/gxf/gxf_executor.hpp" @@ -67,33 +69,28 @@ void GXFCondition::initialize() { HOLOSCAN_LOG_ERROR("No component spec for GXFCondition '{}'", name()); return; } - auto& spec = *spec_; // Set arguments - auto& params = spec.params(); - for (auto& arg : args_) { - // Find if arg.name() is in spec.params() - if (params.find(arg.name()) == params.end()) { - HOLOSCAN_LOG_WARN("Argument '{}' not found in spec.params()", arg.name()); - continue; - } + update_params_from_args(); - // Set arg.value() to spec.params()[arg.name()] - auto& param_wrap = params[arg.name()]; + // Set Handler parameters + for (auto& [key, param_wrap] : spec_->params()) { set_gxf_parameter(name_, key, param_wrap); } + is_initialized_ = true; +} - HOLOSCAN_LOG_TRACE("GXFCondition '{}':: setting argument '{}'", name(), arg.name()); +void GXFCondition::add_to_graph_entity(Operator* op) { + if (gxf_context_ == nullptr) { + // cannot reassign to a different graph entity if the condition was already initialized with GXF + if (gxf_graph_entity_ && is_initialized_) { return; } - ArgumentSetter::set_param(param_wrap, arg); - } - - // Set Handler parameters - for (auto& [key, param_wrap] : params) { - HOLOSCAN_GXF_CALL(::holoscan::gxf::GXFParameterAdaptor::set_param( - gxf_context_, gxf_cid_, key.c_str(), param_wrap)); - // TODO: handle error - HOLOSCAN_LOG_TRACE("GXFCondition '{}':: setting GXF parameter '{}'", name(), key); + gxf_graph_entity_ = op->graph_entity(); + fragment_ = op->fragment(); + if (gxf_graph_entity_) { + gxf_context_ = gxf_graph_entity_->context(); + gxf_eid_ = gxf_graph_entity_->eid(); + } } - is_initialized_ = true; + this->initialize(); } } // namespace holoscan::gxf diff --git a/src/core/gxf/gxf_execution_context.cpp b/src/core/gxf/gxf_execution_context.cpp index d903108e..58d34f68 100644 --- a/src/core/gxf/gxf_execution_context.cpp +++ b/src/core/gxf/gxf_execution_context.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -37,7 +37,7 @@ GXFExecutionContext::GXFExecutionContext(gxf_context_t context, std::shared_ptr gxf_input_context, std::shared_ptr gxf_output_context) : gxf_input_context_(gxf_input_context), gxf_output_context_(gxf_output_context) { - context_ = context; + context_ = context; } } // namespace holoscan::gxf diff --git a/src/core/gxf/gxf_extension_manager.cpp b/src/core/gxf/gxf_extension_manager.cpp index fcadc318..ebe55ddd 100644 --- a/src/core/gxf/gxf_extension_manager.cpp +++ b/src/core/gxf/gxf_extension_manager.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -178,6 +178,14 @@ bool GXFExtensionManager::load_extensions_from_yaml(const YAML::Node& node, bool try { for (const auto& entry : node[key.c_str()]) { auto file_name = entry.as(); + // Warn regarding extension removed in Holoscan 2.0. + if (file_name.find("libgxf_stream_playback.so") != std::string::npos) { + HOLOSCAN_LOG_WARN( + "As of Holoscan 2.0, VideoStreamReplayerOp and VideoStreamRecorderOp no longer require " + "specifying the libgxf_stream_playback.so extension. This extension is no longer " + "shipped with Holoscan and should be removed from the application's YAML config file."); + continue; + } auto result = load_extension(file_name, no_error_message, search_path_envs); if (!result) { return false; } } diff --git a/src/core/gxf/gxf_network_context.cpp b/src/core/gxf/gxf_network_context.cpp index fc95b7e4..0dc17e2b 100644 --- a/src/core/gxf/gxf_network_context.cpp +++ b/src/core/gxf/gxf_network_context.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,6 +17,28 @@ #include "holoscan/core/gxf/gxf_network_context.hpp" +#include "holoscan/core/component_spec.hpp" + // network context initialization is delayed until runtime via // `GXFExecutor::initialize_network_context` -namespace holoscan::gxf {} +namespace holoscan::gxf { + +void GXFNetworkContext::set_parameters() { + update_params_from_args(); + + // Set Handler parameters + for (auto& [key, param_wrap] : spec_->params()) { set_gxf_parameter(name_, key, param_wrap); } +} + +void GXFNetworkContext::reset_graph_entities() { + HOLOSCAN_LOG_TRACE( + "GXFNetworkContext '{}' of type '{}'::reset_graph_entities", gxf_cname_, gxf_typename()); + + // Reset GraphEntity of resources_ and spec_->args() of Scheduler + NetworkContext::reset_graph_entities(); + + // Reset the GraphEntity of this GXFNetworkContext itself + reset_gxf_graph_entity(); +} + +} // namespace holoscan::gxf diff --git a/src/core/gxf/gxf_operator.cpp b/src/core/gxf/gxf_operator.cpp index 446b3b45..ec3fc3d4 100644 --- a/src/core/gxf/gxf_operator.cpp +++ b/src/core/gxf/gxf_operator.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,6 +15,8 @@ * limitations under the License. */ +#include + #include "holoscan/core/gxf/gxf_operator.hpp" namespace holoscan::ops { @@ -24,4 +26,33 @@ void GXFOperator::initialize() { Operator::initialize(); } +gxf_uid_t GXFOperator::add_codelet_to_graph_entity() { + HOLOSCAN_LOG_TRACE("calling graph_entity()->addCodelet for {}", name_); + if (!graph_entity_) { throw std::runtime_error("graph entity is not initialized"); } + auto codelet_handle = graph_entity_->addCodelet(gxf_typename(), name_.c_str()); + if (!codelet_handle) { + throw std::runtime_error("Failed to add codelet of type " + std::string(gxf_typename())); + } + gxf_uid_t codelet_cid = codelet_handle->cid(); + gxf_eid_ = graph_entity_->eid(); + gxf_cid_ = codelet_cid; + gxf_context_ = graph_entity_->context(); + HOLOSCAN_LOG_TRACE("\tadded codelet with cid = {}", codelet_handle->cid()); + return codelet_cid; +} + +void GXFOperator::set_parameters() { + update_params_from_args(); + + // Set Handler parameters + for (auto& [key, param_wrap] : spec_->params()) { + HOLOSCAN_GXF_CALL_WARN_MSG(::holoscan::gxf::GXFParameterAdaptor::set_param( + gxf_context_, gxf_cid_, key.c_str(), param_wrap), + "GXFOperator '{}':: failed to set GXF parameter '{}'", + name_, + key); + HOLOSCAN_LOG_TRACE("GXFOperator '{}':: setting GXF parameter '{}'", name_, key); + } +} + } // namespace holoscan::ops diff --git a/src/core/gxf/gxf_resource.cpp b/src/core/gxf/gxf_resource.cpp index 7155b31f..546ca0c1 100644 --- a/src/core/gxf/gxf_resource.cpp +++ b/src/core/gxf/gxf_resource.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -67,6 +67,12 @@ void GXFResource::initialize() { // operator, the following code wouldn't be executed unless user explicitly calls // Resource::initialize() in Fragment::compose() method. if (gxf_eid_ == 0) { + HOLOSCAN_LOG_WARN( + "Resource '{}' of type '{}' initialized independent of a parent entity. This typically " + "occurs if initialize() was called directly rather than allowing GXFExecutor to " + "automatically initialize the resource.", + gxf_cname_, + gxf_typename()); const GxfEntityCreateInfo entity_create_info = {nullptr, GXF_ENTITY_CREATE_PROGRAM_BIT}; HOLOSCAN_GXF_CALL_FATAL(GxfCreateEntity(gxf_context_, &entity_create_info, &gxf_eid_)); } @@ -85,45 +91,45 @@ void GXFResource::initialize() { return; } - // Set arguments - auto& params = spec_->params(); - - for (auto& arg : args()) { - // Find if arg.name() is in spec_->params() - if (params.find(arg.name()) == params.end()) { - HOLOSCAN_LOG_WARN("Argument '{}' is not defined in spec", arg.name()); - continue; - } - - // Set arg.value() to spec_->params()[arg.name()] - auto& param_wrap = params[arg.name()]; - - HOLOSCAN_LOG_TRACE("GXFResource '{}':: setting argument '{}'", name(), arg.name()); - - ArgumentSetter::set_param(param_wrap, arg); - } + update_params_from_args(); static gxf_tid_t allocator_tid = GxfTidNull(); // issue 4336947 // Set Handler parameters - for (auto& [key, param_wrap] : params) { + for (auto& [key, param_wrap] : spec_->params()) { // Issue 4336947: dev_id parameter for allocator needs to be handled manually bool dev_id_handled = false; if (key.compare(std::string("dev_id")) == 0) { + if (!gxf_graph_entity_) { + HOLOSCAN_LOG_ERROR( + "`dev_id` parameter found, but gxf_graph_entity_ was not initialized so it could not " + "be added to the entity group. This parameter will be ignored and default GPU device 0 " + "will be used"); + continue; + } + gxf_tid_t derived_tid = GxfTidNull(); + bool is_derived = false; + gxf_result_t tid_result; + tid_result = GxfComponentTypeId(gxf_context_, gxf_typename(), &derived_tid); + if (tid_result != GXF_SUCCESS) { + HOLOSCAN_LOG_ERROR( + "Unable to get component type id of '{}': {}", gxf_typename(), tid_result); + } if (GxfTidIsNull(allocator_tid)) { - gxf_result_t tid_result = - GxfComponentTypeId(gxf_context_, "nvidia::gxf::Allocator", &allocator_tid); + tid_result = GxfComponentTypeId(gxf_context_, "nvidia::gxf::Allocator", &allocator_tid); if (tid_result != GXF_SUCCESS) { - HOLOSCAN_LOG_ERROR("Unable to get component type id of 'nvidia::gxf::Allocator' : {}", + HOLOSCAN_LOG_ERROR("Unable to get component type id of 'nvidia::gxf::Allocator': {}", tid_result); } } - - gxf_uid_t allocator_cid; - // Check if this resource (component) is subclass of nvidia::gxf::Allocator - auto cid_result = GxfComponentFind( - gxf_context_, gxf_eid_, allocator_tid, gxf_cname_.c_str(), nullptr, &allocator_cid); - if (cid_result == GXF_SUCCESS) { + tid_result = GxfComponentIsBase(gxf_context_, derived_tid, allocator_tid, &is_derived); + if (tid_result != GXF_SUCCESS) { + HOLOSCAN_LOG_ERROR( + "Unable to get determine if '{}' is derived from 'nvidia::gxf::Allocator': {}", + gxf_typename(), + tid_result); + } + if (is_derived) { HOLOSCAN_LOG_DEBUG( "The dev_id parameter is deprecated by GXF and will be removed from " "Holoscan SDK in the future."); @@ -131,37 +137,42 @@ void GXFResource::initialize() { auto dev_id_param = *std::any_cast*>(param_wrap.value()); if (dev_id_param.has_value()) { int32_t device_id = dev_id_param.get(); - // TODO: is this name guaranteed unique for creating the entity group? + + auto devices = gxf_graph_entity_->findAll(); + if (devices.size() > 0) { + HOLOSCAN_LOG_WARN("Existing entity already has a GPUDevice resource"); + } + + // Create an EntityGroup to associate the GPUDevice with this resource std::string entity_group_name = fmt::format("{}_eid{}_dev_id{}_group", name(), gxf_eid_, device_id); auto entity_group_gid = ::holoscan::gxf::add_entity_group(gxf_context_, entity_group_name); - std::string device_entity_name = - fmt::format("{}_eid{}_gpu_device_id{}_entity", name(), gxf_eid_, device_id); + // Add GPUDevice component to the same entity as this resource + // TODO (GXF4): requested an addResource method to handle nvidia::gxf::ResourceBase types std::string device_component_name = fmt::format("{}_eid{}_gpu_device_id{}_component", name(), gxf_eid_, device_id); - auto [gpu_device_tid, gpu_device_eid] = - ::holoscan::gxf::create_gpu_device_entity(gxf_context_, device_entity_name); - HOLOSCAN_LOG_DEBUG("{} eid = {}", device_entity_name, gpu_device_eid); - - ::holoscan::gxf::create_gpu_device_component( - gxf_context_, gpu_device_tid, gpu_device_eid, device_component_name, device_id); - HOLOSCAN_LOG_DEBUG("Adding GPUDevice with id {} to entity group containing resource '{}'", - device_id, - name()); - // Add this Resource and the newly created GPUDevice to this new entity group - GXF_ASSERT_SUCCESS(GxfUpdateEntityGroup(gxf_context_, entity_group_gid, gxf_eid_)); - GXF_ASSERT_SUCCESS(GxfUpdateEntityGroup(gxf_context_, entity_group_gid, gpu_device_eid)); + auto dev_handle = + gxf_graph_entity_->addComponent("nvidia::gxf::GPUDevice", + device_component_name.c_str(), + {nvidia::gxf::Arg("dev_id", device_id)}); + if (dev_handle.is_null()) { + HOLOSCAN_LOG_ERROR("Failed to create GPUDevice for resource '{}'", name_); + } else { + // TODO: warn and handle case if the resource was already in a different entity group + + // The GPUDevice and this resource have the same eid. + // Make their eid is added to the newly created entity group. + GXF_ASSERT_SUCCESS(GxfUpdateEntityGroup(gxf_context_, entity_group_gid, gxf_eid_)); + } dev_id_handled = true; } } } - if (!dev_id_handled) { - HOLOSCAN_GXF_CALL(::holoscan::gxf::GXFParameterAdaptor::set_param( - gxf_context_, gxf_cid_, key.c_str(), param_wrap)); - } - + HOLOSCAN_LOG_TRACE( + "GXF component '{}' of type '{}': setting GXF parameter '{}'", name_, gxf_typename(), key); + if (!dev_id_handled) { set_gxf_parameter(name_, key, param_wrap); } // TODO: handle error HOLOSCAN_LOG_TRACE("GXFResource '{}':: setting GXF parameter '{}'", name(), key); } @@ -169,4 +180,19 @@ void GXFResource::initialize() { is_initialized_ = true; } +void GXFResource::add_to_graph_entity(Operator* op) { + if (gxf_context_ == nullptr) { + // cannot reassign to a different graph entity if the resource was already initialized with GXF + if (gxf_graph_entity_ && is_initialized_) { return; } + + gxf_graph_entity_ = op->graph_entity(); + fragment_ = op->fragment(); + if (gxf_graph_entity_) { + gxf_context_ = gxf_graph_entity_->context(); + gxf_eid_ = gxf_graph_entity_->eid(); + } + } + this->initialize(); +} + } // namespace holoscan::gxf diff --git a/src/core/gxf/gxf_scheduler.cpp b/src/core/gxf/gxf_scheduler.cpp index fb320923..b9310eaf 100644 --- a/src/core/gxf/gxf_scheduler.cpp +++ b/src/core/gxf/gxf_scheduler.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,6 +18,7 @@ #include #include +#include "holoscan/core/component_spec.hpp" #include "holoscan/core/gxf/gxf_scheduler.hpp" namespace holoscan::gxf { @@ -32,4 +33,22 @@ nvidia::gxf::Clock* GXFScheduler::gxf_clock() { } } +void GXFScheduler::set_parameters() { + update_params_from_args(); + + // Set Handler parameters + for (auto& [key, param_wrap] : spec_->params()) { set_gxf_parameter(name_, key, param_wrap); } +} + +void GXFScheduler::reset_graph_entities() { + HOLOSCAN_LOG_TRACE( + "GXFScheduler '{}' of type '{}'::reset_graph_entities", gxf_cname_, gxf_typename()); + + // Reset GraphEntity of resources_ and spec_->args() of Scheduler + Scheduler::reset_graph_entities(); + + // Reset the GraphEntity of this GXFScheduler itself + reset_gxf_graph_entity(); +} + } // namespace holoscan::gxf diff --git a/src/core/gxf/gxf_tensor.cpp b/src/core/gxf/gxf_tensor.cpp deleted file mode 100644 index 916c233a..00000000 --- a/src/core/gxf/gxf_tensor.cpp +++ /dev/null @@ -1,340 +0,0 @@ -/* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "holoscan/core/gxf/gxf_tensor.hpp" - -#include -#include -#include - -#include "holoscan/core/common.hpp" - -namespace holoscan::gxf { - -struct GXFDataType { - nvidia::gxf::PrimitiveType element_type; - int64_t bytes_per_element; -}; - -static GXFDataType dtype2gxfdtype(DLDataType dtype) { - GXFDataType gxf_dtype; - int64_t bits = dtype.bits; - gxf_dtype.bytes_per_element = dtype.bits / 8; - - switch (dtype.code) { - case kDLInt: - switch (bits) { - case 8: - gxf_dtype.element_type = nvidia::gxf::PrimitiveType::kInt8; - break; - case 16: - gxf_dtype.element_type = nvidia::gxf::PrimitiveType::kInt16; - break; - case 32: - gxf_dtype.element_type = nvidia::gxf::PrimitiveType::kInt32; - break; - case 64: - gxf_dtype.element_type = nvidia::gxf::PrimitiveType::kInt64; - break; - default: - throw std::runtime_error( - fmt::format("Unsupported DLPack data type (code: {}, bits: {}, lanes: {})", - dtype.code, - dtype.bits, - dtype.lanes)); - } - break; - case kDLUInt: - switch (bits) { - case 8: - gxf_dtype.element_type = nvidia::gxf::PrimitiveType::kUnsigned8; - break; - case 16: - gxf_dtype.element_type = nvidia::gxf::PrimitiveType::kUnsigned16; - break; - case 32: - gxf_dtype.element_type = nvidia::gxf::PrimitiveType::kUnsigned32; - break; - case 64: - gxf_dtype.element_type = nvidia::gxf::PrimitiveType::kUnsigned64; - break; - default: - throw std::runtime_error( - fmt::format("Unsupported DLPack data type (code: {}, bits: {}, lanes: {})", - dtype.code, - dtype.bits, - dtype.lanes)); - } - break; - case kDLFloat: - switch (bits) { - case 32: - gxf_dtype.element_type = nvidia::gxf::PrimitiveType::kFloat32; - break; - case 64: - gxf_dtype.element_type = nvidia::gxf::PrimitiveType::kFloat64; - break; - default: - throw std::runtime_error( - fmt::format("Unsupported DLPack data type (code: {}, bits: {}, lanes: {})", - dtype.code, - dtype.bits, - dtype.lanes)); - } - break; - case kDLComplex: - switch (bits) { - case 64: - gxf_dtype.element_type = nvidia::gxf::PrimitiveType::kComplex64; - break; - case 128: - gxf_dtype.element_type = nvidia::gxf::PrimitiveType::kComplex128; - break; - default: - throw std::runtime_error( - fmt::format("Unsupported DLPack data type (code: {}, bits: {}, lanes: {})", - dtype.code, - dtype.bits, - dtype.lanes)); - } - break; - default: - throw std::runtime_error( - fmt::format("Unsupported DLPack data type (code: {}, bits: {}, lanes: {})", - dtype.code, - dtype.bits, - dtype.lanes)); - } - return gxf_dtype; -} - -GXFTensor::GXFTensor(std::shared_ptr& dl_ctx) : dl_ctx_(dl_ctx) { - auto& dl_managed_tensor = dl_ctx_->tensor; - auto& dl_tensor = dl_managed_tensor.dl_tensor; - - const uint32_t rank = dl_tensor.ndim; - const auto shape = [&dl_tensor, &rank]() { - std::array shape; - for (uint32_t index = 0; index < rank; ++index) { shape[index] = dl_tensor.shape[index]; } - return nvidia::gxf::Shape(shape, rank); - }(); - - const GXFDataType gxf_dtype = dtype2gxfdtype(dl_tensor.dtype); - const nvidia::gxf::PrimitiveType element_type = gxf_dtype.element_type; - const uint64_t bytes_per_element = gxf_dtype.bytes_per_element; - - const auto strides = [&dl_tensor, &rank, &shape, &bytes_per_element]() { - nvidia::gxf::Tensor::stride_array_t strides; - // If strides is not set, set it to the default strides - if (dl_tensor.strides == nullptr) { - strides = nvidia::gxf::ComputeTrivialStrides(shape, bytes_per_element); - } else { - for (uint32_t index = 0; index < rank; ++index) { - // GXF Tensor's stride is in bytes, but DLPack's stride is in elements - strides[index] = dl_tensor.strides[index] * bytes_per_element; - } - } - return strides; - }(); - - nvidia::gxf::MemoryStorageType storage_type = nvidia::gxf::MemoryStorageType::kDevice; - switch (dl_tensor.device.device_type) { - case kDLCUDAHost: - storage_type = nvidia::gxf::MemoryStorageType::kHost; - break; - case kDLCUDA: - storage_type = nvidia::gxf::MemoryStorageType::kDevice; - break; - case kDLCPU: - storage_type = nvidia::gxf::MemoryStorageType::kSystem; - break; - default: - throw std::runtime_error(fmt::format("Unsupported DLPack device type (device_type: {})", - dl_tensor.device.device_type)); - } - - this->wrapMemory(shape, - element_type, - bytes_per_element, - strides, - storage_type, - static_cast(dl_tensor.data) + - dl_tensor.byte_offset, // shift the pointer by the byte offset - [dl_ctx = dl_ctx_](void*) mutable { - dl_ctx.reset(); - return nvidia::gxf::Success; - }); -} - -// The number of mutexes to be used for GXFTensor::GXFTensor(nvidia::gxf::Tensor&, int64_t). -constexpr int HASH_MUTEX_COUNT = 257; ///< A large-enough prime number - -/** - * @brief Generate a hash from the provided value using a straightforward index hash function. - * - * This function is primarily designed to select a mutex in the - * `GXFTensor::GXFTensor(nvidia::gxf::Tensor&, int64_t)` method. - * The hashing can be particularly beneficial when the 'value' represents a tensor's address. - * - * @param value The input value to hash. - * @return The resulting hash value. - */ -static uint64_t simple_index_hash(uint64_t value) { - value ^= (value >> 32); - value ^= (value >> 16); - return (value % HASH_MUTEX_COUNT) + 1; -} - -GXFTensor::GXFTensor(nvidia::gxf::Tensor& tensor, int64_t id) { - // Note:: Issue 4272363 - static std::mutex mutexes_[HASH_MUTEX_COUNT + 1]; - // Skip mutex usage if the user explicitly sets the 'id' to -1. - std::optional> lock; - if (id != -1) { - auto mutex_index = simple_index_hash(static_cast(id)); - lock.emplace(mutexes_[mutex_index]); - } - - // Get the tensor info - const auto shape = tensor.shape(); - const auto element_type = tensor.element_type(); - const auto bytes_per_element = tensor.bytes_per_element(); - const auto storage_type = tensor.storage_type(); - const auto pointer = tensor.pointer(); - const auto shape_rank = shape.rank(); - - // Move the memory buffer from 'tensor' to 'buffer' variable with a shared pointer - auto buffer = std::make_shared(std::move(tensor.move_buffer())); - - dl_ctx_ = std::make_shared(); - dl_ctx_->memory_ref = buffer; - auto& dl_managed_tensor = dl_ctx_->tensor; - auto& dl_tensor = dl_managed_tensor.dl_tensor; - - auto& buffer_shape = buffer->dl_shape; - auto& buffer_strides = buffer->dl_strides; - - stride_array_t strides; - buffer_shape.reserve(shape_rank); - buffer_strides.reserve(shape_rank); - - for (uint32_t index = 0; index < shape_rank; ++index) { - const auto stride = tensor.stride(index); - strides[index] = stride; - - buffer_shape.push_back(shape.dimension(index)); - // DLPack's stride (buffer_strides) is in elements but GXF Tensor's stride is in bytes - buffer_strides.push_back(stride / bytes_per_element); - } - - // Reinsert the MemoryBuffer into the 'tensor' the new deallocator (just holding - // a shared pointer to the memory buffer so that releasing it would be handled by the shared - // pointer's destructor). - tensor.wrapMemory(shape, - element_type, - bytes_per_element, - strides, - storage_type, - pointer, - [buffer = buffer](void*) mutable { - buffer.reset(); - return nvidia::gxf::Success; - }); - - // Do the same for the 'this' object - this->wrapMemory(shape, - element_type, - bytes_per_element, - strides, - storage_type, - pointer, - [buffer = buffer](void*) mutable { - buffer.reset(); - return nvidia::gxf::Success; - }); - - // Set the DLManagedTensorCtx - dl_managed_tensor.manager_ctx = nullptr; // not used - dl_managed_tensor.deleter = nullptr; // not used - - dl_tensor.data = this->pointer(); - dl_tensor.device = this->device(); - dl_tensor.ndim = this->shape().rank(); - dl_tensor.dtype = this->dtype(); - dl_tensor.shape = buffer_shape.data(); - dl_tensor.strides = buffer_strides.data(); - dl_tensor.byte_offset = 0; -} - -DLDevice GXFTensor::device() const { - switch (storage_type()) { - case nvidia::gxf::MemoryStorageType::kSystem: - return DLDevice{kDLCPU, 0}; - case nvidia::gxf::MemoryStorageType::kHost: - case nvidia::gxf::MemoryStorageType::kDevice: - return dldevice_from_pointer(pointer()); - default: - throw std::runtime_error(fmt::format("Unsupported GXF storage type (storage_type: {})", - static_cast(storage_type()))); - } -} - -DLDataType GXFTensor::dtype() const { - DLDataType dtype; - dtype.lanes = 1; - dtype.bits = bytes_per_element() * 8; - - auto element_type = this->element_type(); - switch (element_type) { - case nvidia::gxf::PrimitiveType::kInt8: - case nvidia::gxf::PrimitiveType::kInt16: - case nvidia::gxf::PrimitiveType::kInt32: - case nvidia::gxf::PrimitiveType::kInt64: - dtype.code = kDLInt; - break; - case nvidia::gxf::PrimitiveType::kUnsigned8: - case nvidia::gxf::PrimitiveType::kUnsigned16: - case nvidia::gxf::PrimitiveType::kUnsigned32: - case nvidia::gxf::PrimitiveType::kUnsigned64: - dtype.code = kDLUInt; - break; - case nvidia::gxf::PrimitiveType::kFloat32: - case nvidia::gxf::PrimitiveType::kFloat64: - dtype.code = kDLFloat; - break; - case nvidia::gxf::PrimitiveType::kComplex64: - case nvidia::gxf::PrimitiveType::kComplex128: - dtype.code = kDLComplex; - break; - default: - throw std::runtime_error( - fmt::format("Unsupported GXF element type: {}", static_cast(element_type))); - } - return dtype; -} - -std::shared_ptr GXFTensor::as_tensor() { - auto tensor = std::make_shared(dl_ctx_); - return tensor; -} - -std::shared_ptr GXFTensor::from_tensor(std::shared_ptr tensor) { - auto gxf_tensor = std::make_shared(tensor->dl_ctx()); - return gxf_tensor; -} - -} // namespace holoscan::gxf diff --git a/src/core/gxf/gxf_utils.cpp b/src/core/gxf/gxf_utils.cpp new file mode 100644 index 00000000..760ae48c --- /dev/null +++ b/src/core/gxf/gxf_utils.cpp @@ -0,0 +1,78 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include + +#include "holoscan/core/gxf/gxf_utils.hpp" + +#include "holoscan/core/common.hpp" +#include "holoscan/core/gxf/gxf_execution_context.hpp" +#include "holoscan/core/io_context.hpp" + +#include "gxf/std/transmitter.hpp" + +namespace holoscan::gxf { + +gxf_uid_t get_component_eid(gxf_context_t context, gxf_uid_t cid) { + gxf_uid_t eid; + HOLOSCAN_GXF_CALL_FATAL(GxfComponentEntity(context, cid, &eid)); + return eid; +} + +std::string get_full_component_name(gxf_context_t context, gxf_uid_t cid) { + const char* cname; + HOLOSCAN_GXF_CALL_FATAL(GxfComponentName(context, cid, &cname)); + gxf_uid_t eid; + HOLOSCAN_GXF_CALL_FATAL(GxfComponentEntity(context, cid, &eid)); + const char* ename; + HOLOSCAN_GXF_CALL_FATAL(GxfComponentName(context, eid, &ename)); + + std::stringstream sstream; + sstream << ename << "/" << cname; + return sstream.str(); +} + +std::string create_name(const char* prefix, int index) { + std::stringstream sstream; + sstream << prefix << "_" << index; + return sstream.str(); +} + +std::string create_name(const char* prefix, const std::string& name) { + std::stringstream sstream; + sstream << prefix << "_" << name; + return sstream.str(); +} + +bool has_component(gxf_context_t context, gxf_uid_t eid, gxf_tid_t tid, const char* name, + int32_t* offset, gxf_uid_t* cid) { + gxf_uid_t temp_cid = 0; + auto result = GxfComponentFind(context, eid, tid, name, offset, cid ? cid : &temp_cid); + if (result == GXF_SUCCESS) { + return true; + } else { + return false; + } +} + +gxf_uid_t add_entity_group(void* context, std::string name) { + gxf_uid_t entity_group_gid = kNullUid; + HOLOSCAN_GXF_CALL_FATAL(GxfCreateEntityGroup(context, name.c_str(), &entity_group_gid)); + return entity_group_gid; +} + +} // namespace holoscan::gxf diff --git a/src/core/gxf/gxf_wrapper.cpp b/src/core/gxf/gxf_wrapper.cpp index dd450e16..831addf6 100644 --- a/src/core/gxf/gxf_wrapper.cpp +++ b/src/core/gxf/gxf_wrapper.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,6 +18,7 @@ #include "holoscan/core/gxf/gxf_wrapper.hpp" #include "holoscan/core/common.hpp" +#include "holoscan/core/fragment.hpp" #include "holoscan/core/gxf/gxf_execution_context.hpp" #include "holoscan/core/io_context.hpp" @@ -46,7 +47,18 @@ gxf_result_t GXFWrapper::start() { HOLOSCAN_LOG_ERROR("GXFWrapper::start() - Operator is not set"); return GXF_FAILURE; } - op_->start(); + + HOLOSCAN_LOG_TRACE("Starting operator: {}", op_->name()); + + try { + op_->start(); + } catch (const std::exception& e) { + store_exception(); + HOLOSCAN_LOG_ERROR( + "Exception occurred when starting operator: '{}' - {}", op_->name(), e.what()); + return GXF_FAILURE; + } + return GXF_SUCCESS; } @@ -65,6 +77,10 @@ gxf_result_t GXFWrapper::tick() { try { op_->compute(*op_input, *op_output, exec_context); } catch (const std::exception& e) { + // Note: Rethrowing the exception (using `throw;`) would cause the Python interpreter to exit. + // To avoid this, we store the exception and return GXF_FAILURE. + // The exception is then rethrown in GXFExecutor::run_gxf_graph(). + store_exception(); HOLOSCAN_LOG_ERROR("Exception occurred for operator: '{}' - {}", op_->name(), e.what()); return GXF_FAILURE; } @@ -78,8 +94,24 @@ gxf_result_t GXFWrapper::stop() { HOLOSCAN_LOG_ERROR("GXFWrapper::stop() - Operator is not set"); return GXF_FAILURE; } - op_->stop(); + + HOLOSCAN_LOG_TRACE("Stopping operator: {}", op_->name()); + + try { + op_->stop(); + } catch (const std::exception& e) { + store_exception(); + HOLOSCAN_LOG_ERROR( + "Exception occurred when stopping operator: '{}' - {}", op_->name(), e.what()); + return GXF_FAILURE; + } + return GXF_SUCCESS; } +void GXFWrapper::store_exception() { + auto stored_exception = std::current_exception(); + if (stored_exception != nullptr) { op_->fragment()->executor().exception(stored_exception); } +} + } // namespace holoscan::gxf diff --git a/src/core/network_context.cpp b/src/core/network_context.cpp index 511299e7..81d5f479 100644 --- a/src/core/network_context.cpp +++ b/src/core/network_context.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -32,6 +32,18 @@ void NetworkContext::initialize() { } } +void NetworkContext::reset_graph_entities() { + HOLOSCAN_LOG_TRACE("NetworkContext '{}'::reset_graph_entities", name_); + for (auto& [_, resource] : resources_) { + if (resource) { + auto gxf_resource = std::dynamic_pointer_cast(resource); + if (gxf_resource) { gxf_resource->reset_gxf_graph_entity(); } + resource->reset_graph_entities(); + } + } + Component::reset_graph_entities(); +} + YAML::Node NetworkContext::to_yaml_node() const { YAML::Node node = Component::to_yaml_node(); if (spec_) { diff --git a/src/core/network_contexts/gxf/ucx_context.cpp b/src/core/network_contexts/gxf/ucx_context.cpp index e212b0b9..d243434a 100644 --- a/src/core/network_contexts/gxf/ucx_context.cpp +++ b/src/core/network_contexts/gxf/ucx_context.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -36,6 +36,10 @@ void UcxContext::setup(ComponentSpec& spec) { // spec.resource(gpu_device_, "Optional GPU device resource"); } +nvidia::gxf::UcxContext* UcxContext::get() const { + return static_cast(gxf_cptr_); +} + void UcxContext::initialize() { HOLOSCAN_LOG_DEBUG("UcxContext::initialize"); // Set up prerequisite parameters before calling GXFNetworkContext::initialize() @@ -51,7 +55,8 @@ void UcxContext::initialize() { // fragment that sends the message and the sequence_number is not synchronized across // fragments. auto entity_serializer = frag->make_resource( - "ucx_entity_serializer", Arg("verbose_warning") = false); + "ucx_context_ucxentity_serializer", Arg("verbose_warning") = false); + entity_serializer->gxf_cname(entity_serializer->name().c_str()); // Note: Activation sequence of entities in GXF: // 1. System entities diff --git a/src/core/operator.cpp b/src/core/operator.cpp index e52372f8..084d2090 100644 --- a/src/core/operator.cpp +++ b/src/core/operator.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -27,6 +27,7 @@ #include "holoscan/core/fragment.hpp" #include "holoscan/core/gxf/gxf_operator.hpp" #include "holoscan/core/gxf/gxf_scheduler.hpp" +#include "holoscan/core/gxf/gxf_wrapper.hpp" #include "holoscan/core/messagelabel.hpp" #include "holoscan/core/operator.hpp" @@ -156,6 +157,32 @@ void Operator::set_op_backend() { } } +gxf_uid_t Operator::initialize_graph_entity(void* context, const std::string& entity_prefix) { + const std::string op_entity_name = fmt::format("{}{}", entity_prefix, name_); + HOLOSCAN_LOG_TRACE( + "initialize_graph_entity called for Operator {}, entity_name: {}", name_, op_entity_name); + graph_entity_ = std::make_shared(); + auto maybe = graph_entity_->setup(context, op_entity_name.c_str()); + if (!maybe) { + throw std::runtime_error(fmt::format("Failed to create operator entity: '{}'", op_entity_name)); + } + return graph_entity_->eid(); +} + +gxf_uid_t Operator::add_codelet_to_graph_entity() { + HOLOSCAN_LOG_TRACE("calling graph_entity()->addCodelet for {}", name()); + if (!graph_entity_) { throw std::runtime_error("graph entity is not initialized"); } + auto codelet_handle = graph_entity_->addCodelet(name().c_str()); + if (!codelet_handle) { + throw std::runtime_error("Failed to create GXFWrapper codelet corresponding to this operator"); + } + codelet_handle->set_operator(this); + HOLOSCAN_LOG_TRACE("\tadded codelet with cid {} to entity with eid {}", + codelet_handle->cid(), + graph_entity_->eid()); + return codelet_handle->cid(); +} + YAML::Node Operator::to_yaml_node() const { std::unordered_map operatortype_namemap{ {OperatorType::kGXF, "kGXF"s}, @@ -163,7 +190,7 @@ YAML::Node Operator::to_yaml_node() const { {OperatorType::kVirtual, "kVirtual"s}, }; - YAML::Node node = Component::to_yaml_node(); + YAML::Node node = ComponentBase::to_yaml_node(); node["type"] = operatortype_namemap[operator_type_]; node["conditions"] = YAML::Node(YAML::NodeType::Sequence); for (const auto& c : conditions_) { node["conditions"].push_back(c.second->to_yaml_node()); } @@ -177,4 +204,90 @@ YAML::Node Operator::to_yaml_node() const { return node; } +void Operator::initialize_conditions() { + for (const auto& [name, condition] : conditions_) { + HOLOSCAN_LOG_TRACE("\top '{}': initializing condition: {}", name_, condition->name()); + auto gxf_condition = std::dynamic_pointer_cast(condition); + if (gxf_condition) { + // assign the condition to the same entity as the operator and initialize it + gxf_condition->add_to_graph_entity(this); + } else { + // currently no native Condition support so raise if it is not a GXFCondition + throw std::runtime_error( + fmt::format("condition {} was not a holoscan::gxf::GXFCondition", condition->name())); + } + } +} + +void Operator::initialize_resources() { + for (const auto& [name, resource] : resources_) { + HOLOSCAN_LOG_TRACE("\top '{}': initializing resource: {}", name_, resource->name()); + auto gxf_resource = std::dynamic_pointer_cast(resource); + if (gxf_resource) { + // assign the resource to the same entity as the operator and initialize it + gxf_resource->add_to_graph_entity(this); + } else { + // initialize as a native (non-GXF) resource + resource->initialize(); + } + } +} + +void Operator::update_params_from_args() { + update_params_from_args(spec_->params()); +} + +void Operator::set_parameters() { + update_params_from_args(); + + // Set only default parameter values + for (auto& [key, param_wrap] : spec_->params()) { + // If no value is specified, the default value will be used by setting an empty argument. + Arg empty_arg(""); + ArgumentSetter::set_param(param_wrap, empty_arg); + } +} + +bool Operator::has_ucx_connector() { + for (const auto& [_, io_spec] : spec_->inputs()) { + if (io_spec->connector_type() == IOSpec::ConnectorType::kUCX) { return true; } + } + for (const auto& [_, io_spec] : spec_->outputs()) { + if (io_spec->connector_type() == IOSpec::ConnectorType::kUCX) { return true; } + } + return false; +} + +void Operator::reset_graph_entities() { + HOLOSCAN_LOG_TRACE("Operator '{}'::reset_graph_entities", name_); + auto reset_resource = [](std::shared_ptr resource) { + if (resource) { + auto gxf_resource = std::dynamic_pointer_cast(resource); + if (gxf_resource) { gxf_resource->reset_gxf_graph_entity(); } + resource->reset_graph_entities(); + } + }; + auto reset_condition = [](std::shared_ptr condition) { + if (condition) { + auto gxf_condition = std::dynamic_pointer_cast(condition); + if (gxf_condition) { gxf_condition->reset_gxf_graph_entity(); } + condition->reset_graph_entities(); + } + }; + auto reset_iospec = + [reset_resource, + reset_condition](const std::unordered_map>& io_specs) { + for (auto& [_, io_spec] : io_specs) { + reset_resource(io_spec->connector()); + for (auto& [_, condition] : io_spec->conditions()) { reset_condition(condition); } + } + }; + for (auto& [_, resource] : resources_) { reset_resource(resource); } + for (auto& [_, condition] : conditions_) { reset_condition(condition); } + reset_iospec(spec_->inputs()); + reset_iospec(spec_->outputs()); + ComponentBase::reset_graph_entities(); + graph_entity_.reset(); +} + } // namespace holoscan diff --git a/src/core/resource.cpp b/src/core/resource.cpp index 378af765..265b4306 100644 --- a/src/core/resource.cpp +++ b/src/core/resource.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -38,20 +38,7 @@ void Resource::initialize() { // Set arguments auto& params = spec.params(); - for (auto& arg : args_) { - // Find if arg.name() is in spec.params() - if (params.find(arg.name()) == params.end()) { - HOLOSCAN_LOG_WARN("Argument '{}' not found in spec_->params()", arg.name()); - continue; - } - - // Set arg.value() to spec.params()[arg.name()] - auto& param_wrap = params[arg.name()]; - - HOLOSCAN_LOG_TRACE("GXFResource '{}':: setting argument '{}'", name(), arg.name()); - - ArgumentSetter::set_param(param_wrap, arg); - } + update_params_from_args(params); // Set default values for unspecified arguments if the resource is native if (resource_type_ == ResourceType::kNative) { diff --git a/src/core/resources/gxf/allocator.cpp b/src/core/resources/gxf/allocator.cpp index 254698d3..e2ae41d1 100644 --- a/src/core/resources/gxf/allocator.cpp +++ b/src/core/resources/gxf/allocator.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,21 +22,22 @@ namespace holoscan { Allocator::Allocator(const std::string& name, nvidia::gxf::Allocator* component) - : GXFResource(name, component) {} + : gxf::GXFResource(name, component) {} + +nvidia::gxf::Allocator* Allocator::get() const { + return static_cast(gxf_cptr_); +} bool Allocator::is_available(uint64_t size) { - if (gxf_cptr_) { - nvidia::gxf::Allocator* allocator = static_cast(gxf_cptr_); - return allocator->is_available(size); - } + auto allocator = get(); + if (allocator) { return allocator->is_available(size); } return false; } nvidia::byte* Allocator::allocate(uint64_t size, MemoryStorageType type) { - if (gxf_cptr_) { - nvidia::gxf::Allocator* allocator = static_cast(gxf_cptr_); - + auto allocator = get(); + if (allocator) { auto result = allocator->allocate(size, static_cast(type)); if (result) { return result.value(); } } @@ -48,11 +49,17 @@ nvidia::byte* Allocator::allocate(uint64_t size, MemoryStorageType type) { } void Allocator::free(nvidia::byte* pointer) { - if (gxf_cptr_) { - nvidia::gxf::Allocator* allocator = static_cast(gxf_cptr_); + auto allocator = get(); + if (allocator) { auto result = allocator->free(pointer); if (!result) { HOLOSCAN_LOG_ERROR("Failed to free memory at {}", static_cast(pointer)); } } } +uint64_t Allocator::block_size() { + auto allocator = get(); + if (!allocator) { throw std::runtime_error("null GXF component pointer"); } + return allocator->block_size(); +} + } // namespace holoscan diff --git a/src/core/resources/gxf/annotated_double_buffer_receiver.cpp b/src/core/resources/gxf/annotated_double_buffer_receiver.cpp index aee76f8d..ec03ca86 100644 --- a/src/core/resources/gxf/annotated_double_buffer_receiver.cpp +++ b/src/core/resources/gxf/annotated_double_buffer_receiver.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -27,7 +27,7 @@ namespace holoscan { -gxf_result_t holoscan::AnnotatedDoubleBufferReceiver::receive_abi(gxf_uid_t* uid) { +gxf_result_t AnnotatedDoubleBufferReceiver::receive_abi(gxf_uid_t* uid) { gxf_result_t code = nvidia::gxf::DoubleBufferReceiver::receive_abi(uid); static gxf_tid_t message_label_tid = GxfTidNull(); diff --git a/src/core/resources/gxf/block_memory_pool.cpp b/src/core/resources/gxf/block_memory_pool.cpp index ab28993e..c38c4338 100644 --- a/src/core/resources/gxf/block_memory_pool.cpp +++ b/src/core/resources/gxf/block_memory_pool.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,8 +17,10 @@ #include "holoscan/core/resources/gxf/block_memory_pool.hpp" +#include #include +#include "gxf/std/resources.hpp" // for GPUDevice #include "holoscan/core/component_spec.hpp" #include "holoscan/core/gxf/gxf_utils.hpp" @@ -30,18 +32,27 @@ constexpr int32_t kDefaultDeviceId = 0; BlockMemoryPool::BlockMemoryPool(const std::string& name, nvidia::gxf::BlockMemoryPool* component) : Allocator(name, component) { - int32_t storage_type = 0; - HOLOSCAN_GXF_CALL_FATAL( - GxfParameterGetInt32(gxf_context_, gxf_cid_, "storage_type", &storage_type)); - storage_type_ = storage_type; - uint64_t block_size = 0; - HOLOSCAN_GXF_CALL_FATAL(GxfParameterGetUInt64(gxf_context_, gxf_cid_, "block_size", &block_size)); - block_size_ = block_size; - uint64_t num_blocks = 0; - HOLOSCAN_GXF_CALL_FATAL(GxfParameterGetUInt64(gxf_context_, gxf_cid_, "num_blocks", &num_blocks)); - num_blocks_ = num_blocks; - // TODO: how to get the device ID now that it is a GPUDevice Resource and not a parameter? - dev_id_ = 0; + auto maybe_storage_type = component->getParameter("storage_type"); + if (!maybe_storage_type) { throw std::runtime_error("Failed to get storage_type"); } + storage_type_ = maybe_storage_type.value(); + + auto maybe_block_size = component->getParameter("block_size"); + if (!maybe_block_size) { throw std::runtime_error("Failed to get block_size"); } + block_size_ = maybe_block_size.value(); + + auto maybe_num_blocks = component->getParameter("num_blocks"); + if (!maybe_num_blocks) { throw std::runtime_error("Failed to get num_blocks"); } + num_blocks_ = maybe_num_blocks.value(); + + auto maybe_gpu_device = + component->getParameter>("dev_id"); + if (!maybe_gpu_device) { throw std::runtime_error("Failed to get dev_id"); } + auto gpu_device_handle = maybe_gpu_device.value(); + dev_id_ = gpu_device_handle->device_id(); +} + +nvidia::gxf::BlockMemoryPool* BlockMemoryPool::get() const { + return static_cast(gxf_cptr_); } void BlockMemoryPool::setup(ComponentSpec& spec) { @@ -70,4 +81,24 @@ void BlockMemoryPool::setup(ComponentSpec& spec) { kDefaultDeviceId); } +nvidia::gxf::MemoryStorageType BlockMemoryPool::storage_type() const { + auto pool = get(); + if (pool) { + return pool->storage_type(); + } else { + // TODO: throw error or return Unexpected? + HOLOSCAN_LOG_ERROR("BlockMemoryPool component not yet registered with GXF"); + return nvidia::gxf::MemoryStorageType::kSystem; + } +} + +uint64_t BlockMemoryPool::num_blocks() const { + auto pool = get(); + if (!pool) { + HOLOSCAN_LOG_ERROR("BlockMemoryPool component not yet registered with GXF"); + return 0; + } + return pool->num_blocks(); +} + } // namespace holoscan diff --git a/src/core/resources/gxf/clock.cpp b/src/core/resources/gxf/clock.cpp index 86c423b7..abdf08a8 100644 --- a/src/core/resources/gxf/clock.cpp +++ b/src/core/resources/gxf/clock.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,4 +24,8 @@ namespace holoscan { Clock::Clock(const std::string& name, nvidia::gxf::Clock* component) : GXFResource(name, component) {} +nvidia::gxf::Clock* Clock::get() const { + return static_cast(gxf_cptr_); +} + } // namespace holoscan diff --git a/src/core/resources/gxf/cuda_stream_pool.cpp b/src/core/resources/gxf/cuda_stream_pool.cpp index 1db83850..58e7c367 100644 --- a/src/core/resources/gxf/cuda_stream_pool.cpp +++ b/src/core/resources/gxf/cuda_stream_pool.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,8 +17,10 @@ #include "holoscan/core/resources/gxf/cuda_stream_pool.hpp" +#include #include +#include "gxf/std/resources.hpp" // for GPUDevice #include "holoscan/core/component_spec.hpp" #include "holoscan/core/gxf/gxf_utils.hpp" @@ -34,23 +36,31 @@ constexpr int32_t kDefaultDeviceId = 0; CudaStreamPool::CudaStreamPool(const std::string& name, nvidia::gxf::CudaStreamPool* component) : Allocator(name, component) { - // TODO: how to get the device ID now that it is a GPUDevice Resource and not a parameter? - dev_id_ = 0; - uint32_t stream_flags = 0; - HOLOSCAN_GXF_CALL_FATAL( - GxfParameterGetUInt32(gxf_context_, gxf_cid_, "stream_flags", &stream_flags)); - stream_flags_ = stream_flags; - int32_t stream_priority = 0; - HOLOSCAN_GXF_CALL_FATAL( - GxfParameterGetInt32(gxf_context_, gxf_cid_, "stream_priority", &stream_priority)); - stream_priority_ = stream_priority; - uint32_t reserved_size = 0; - HOLOSCAN_GXF_CALL_FATAL( - GxfParameterGetUInt32(gxf_context_, gxf_cid_, "reserved_size", &reserved_size)); - reserved_size_ = reserved_size; - uint32_t max_size = 0; - HOLOSCAN_GXF_CALL_FATAL(GxfParameterGetUInt32(gxf_context_, gxf_cid_, "max_size", &max_size)); - max_size_ = max_size; + auto maybe_stream_flags = component->getParameter("stream_flags"); + if (!maybe_stream_flags) { throw std::runtime_error("Failed to get stream_flags"); } + stream_flags_ = maybe_stream_flags.value(); + + auto maybe_stream_priority = component->getParameter("stream_priority"); + if (!maybe_stream_priority) { throw std::runtime_error("Failed to get stream_priority"); } + stream_priority_ = maybe_stream_priority.value(); + + auto maybe_reserved_size = component->getParameter("reserved_size"); + if (!maybe_reserved_size) { throw std::runtime_error("Failed to get reserved_size"); } + reserved_size_ = maybe_reserved_size.value(); + + auto maybe_max_size = component->getParameter("max_size"); + if (!maybe_max_size) { throw std::runtime_error("Failed to get max_size"); } + max_size_ = maybe_max_size.value(); + + auto maybe_gpu_device = + component->getParameter>("dev_id"); + if (!maybe_gpu_device) { throw std::runtime_error("Failed to get dev_id"); } + auto gpu_device_handle = maybe_gpu_device.value(); + dev_id_ = gpu_device_handle->device_id(); +} + +nvidia::gxf::CudaStreamPool* CudaStreamPool::get() const { + return static_cast(gxf_cptr_); } void CudaStreamPool::setup(ComponentSpec& spec) { @@ -62,22 +72,30 @@ void CudaStreamPool::setup(ComponentSpec& spec) { spec.param(stream_flags_, "stream_flags", "Stream Flags", - "Create CUDA streams with flags.", + "Flags for CUDA streams in the pool. The flag value will be passed to CUDA's " + "cudaStreamCreateWithPriority when creating the streams. A value of 0 corresponds to " + "`cudaStreamDefault` while a value of 1 corresponds to `cudaStreamNonBlocking`, " + "indicating that the stream can run concurrently with work in stream 0 (default " + "stream) and should not perform any implicit synchronization with it. See: " + "https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__STREAM.html.", kDefaultStreamFlags); spec.param(stream_priority_, "stream_priority", "Stream Priority", - "Create CUDA streams with priority.", + "Priority of the CUDA streams in the pool. This is an integer value passed to " + "cudaSreamCreateWithPriority . Lower numbers represent higher priorities. See: " + "https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__STREAM.html.", kDefaultStreamPriority); - spec.param(reserved_size_, - "reserved_size", - "Reserved Stream Size", - "Reserve several CUDA streams before 1st request coming", - kDefaultReservedSize); + spec.param( + reserved_size_, + "reserved_size", + "Reserved Stream Size", + "The number of CUDA streams to initially reserve in the pool (prior to first request).", + kDefaultReservedSize); spec.param(max_size_, "max_size", - "Maximum Stream Size", - "The maximum stream size for the pool to allocate, unlimited by default", + "Maximum Pool Size", + "The maximum number of streams that can be allocated, unlimited by default", kDefaultMaxSize); } diff --git a/src/core/resources/gxf/double_buffer_receiver.cpp b/src/core/resources/gxf/double_buffer_receiver.cpp index aee0e07f..8832276e 100644 --- a/src/core/resources/gxf/double_buffer_receiver.cpp +++ b/src/core/resources/gxf/double_buffer_receiver.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -28,26 +28,30 @@ namespace holoscan { DoubleBufferReceiver::DoubleBufferReceiver(const std::string& name, nvidia::gxf::DoubleBufferReceiver* component) : Receiver(name, component) { - uint64_t capacity = 0; - HOLOSCAN_GXF_CALL_FATAL(GxfParameterGetUInt64(gxf_context_, gxf_cid_, "capacity", &capacity)); - capacity_ = capacity; - uint64_t policy = 0; - HOLOSCAN_GXF_CALL_FATAL(GxfParameterGetUInt64(gxf_context_, gxf_cid_, "policy", &policy)); - policy_ = policy; + auto maybe_capacity = component->getParameter("capacity"); + if (!maybe_capacity) { throw std::runtime_error("Failed to get capacity"); } + auto maybe_policy = component->getParameter("policy"); + if (!maybe_policy) { throw std::runtime_error("Failed to get policy"); } + capacity_ = maybe_capacity.value(); + policy_ = maybe_policy.value(); } DoubleBufferReceiver::DoubleBufferReceiver(const std::string& name, AnnotatedDoubleBufferReceiver* component) : Receiver(name, component) { - uint64_t capacity = 0; - HOLOSCAN_GXF_CALL_FATAL(GxfParameterGetUInt64(gxf_context_, gxf_cid_, "capacity", &capacity)); - capacity_ = capacity; - uint64_t policy = 0; - HOLOSCAN_GXF_CALL_FATAL(GxfParameterGetUInt64(gxf_context_, gxf_cid_, "policy", &policy)); - policy_ = policy; + auto maybe_capacity = component->getParameter("capacity"); + if (!maybe_capacity) { throw std::runtime_error("Failed to get capacity"); } + auto maybe_policy = component->getParameter("policy"); + if (!maybe_policy) { throw std::runtime_error("Failed to get policy"); } + capacity_ = maybe_capacity.value(); + policy_ = maybe_policy.value(); tracking_ = true; } +nvidia::gxf::DoubleBufferReceiver* DoubleBufferReceiver::get() const { + return static_cast(gxf_cptr_); +} + const char* DoubleBufferReceiver::gxf_typename() const { if (tracking_) { return "holoscan::AnnotatedDoubleBufferReceiver"; diff --git a/src/core/resources/gxf/double_buffer_transmitter.cpp b/src/core/resources/gxf/double_buffer_transmitter.cpp index d5dd11d9..42a3007b 100644 --- a/src/core/resources/gxf/double_buffer_transmitter.cpp +++ b/src/core/resources/gxf/double_buffer_transmitter.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -28,26 +28,30 @@ namespace holoscan { DoubleBufferTransmitter::DoubleBufferTransmitter(const std::string& name, nvidia::gxf::DoubleBufferTransmitter* component) : Transmitter(name, component) { - uint64_t capacity = 0; - HOLOSCAN_GXF_CALL_FATAL(GxfParameterGetUInt64(gxf_context_, gxf_cid_, "capacity", &capacity)); - capacity_ = capacity; - uint64_t policy = 0; - HOLOSCAN_GXF_CALL_FATAL(GxfParameterGetUInt64(gxf_context_, gxf_cid_, "policy", &policy)); - policy_ = policy; + auto maybe_capacity = component->getParameter("capacity"); + if (!maybe_capacity) { throw std::runtime_error("Failed to get capacity"); } + auto maybe_policy = component->getParameter("policy"); + if (!maybe_policy) { throw std::runtime_error("Failed to get policy"); } + capacity_ = maybe_capacity.value(); + policy_ = maybe_policy.value(); } DoubleBufferTransmitter::DoubleBufferTransmitter(const std::string& name, AnnotatedDoubleBufferTransmitter* component) : Transmitter(name, component) { - uint64_t capacity = 0; - HOLOSCAN_GXF_CALL_FATAL(GxfParameterGetUInt64(gxf_context_, gxf_cid_, "capacity", &capacity)); - capacity_ = capacity; - uint64_t policy = 0; - HOLOSCAN_GXF_CALL_FATAL(GxfParameterGetUInt64(gxf_context_, gxf_cid_, "policy", &policy)); - policy_ = policy; + auto maybe_capacity = component->getParameter("capacity"); + if (!maybe_capacity) { throw std::runtime_error("Failed to get capacity"); } + auto maybe_policy = component->getParameter("policy"); + if (!maybe_policy) { throw std::runtime_error("Failed to get policy"); } + capacity_ = maybe_capacity.value(); + policy_ = maybe_policy.value(); tracking_ = true; } +nvidia::gxf::DoubleBufferTransmitter* DoubleBufferTransmitter::get() const { + return static_cast(gxf_cptr_); +} + const char* DoubleBufferTransmitter::gxf_typename() const { if (tracking_) { return "holoscan::AnnotatedDoubleBufferTransmitter"; diff --git a/src/core/resources/gxf/manual_clock.cpp b/src/core/resources/gxf/manual_clock.cpp index feb39914..11c969c8 100644 --- a/src/core/resources/gxf/manual_clock.cpp +++ b/src/core/resources/gxf/manual_clock.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,31 +26,30 @@ namespace holoscan { ManualClock::ManualClock(const std::string& name, nvidia::gxf::ManualClock* component) : Clock(name, component) { - uint64_t initial_timestamp = 0L; - HOLOSCAN_GXF_CALL_FATAL( - GxfParameterGetUInt64(gxf_context_, gxf_cid_, "initial_timestamp", &initial_timestamp)); - initial_timestamp_ = initial_timestamp; + auto maybe_initial_timestamp = component->getParameter("initial_timestamp"); + if (!maybe_initial_timestamp) { throw std::runtime_error("Failed to get initial_timestamp"); } + initial_timestamp_ = maybe_initial_timestamp.value(); +} + +nvidia::gxf::ManualClock* ManualClock::get() const { + return static_cast(gxf_cptr_); } double ManualClock::time() const { - if (gxf_cptr_) { - nvidia::gxf::ManualClock* clock = static_cast(gxf_cptr_); - return clock->time(); - } + auto clock = get(); + if (clock) { return clock->time(); } return 0.0; } int64_t ManualClock::timestamp() const { - if (gxf_cptr_) { - nvidia::gxf::ManualClock* clock = static_cast(gxf_cptr_); - return clock->timestamp(); - } + auto clock = get(); + if (clock) { return clock->timestamp(); } return 0; } void ManualClock::sleep_for(int64_t duration_ns) { - if (gxf_cptr_) { - nvidia::gxf::ManualClock* clock = static_cast(gxf_cptr_); + auto clock = get(); + if (clock) { clock->sleepFor(duration_ns); } else { HOLOSCAN_LOG_ERROR("RealtimeClock component not yet registered with GXF"); @@ -58,8 +57,8 @@ void ManualClock::sleep_for(int64_t duration_ns) { } void ManualClock::sleep_until(int64_t target_time_ns) { - if (gxf_cptr_) { - nvidia::gxf::ManualClock* clock = static_cast(gxf_cptr_); + auto clock = get(); + if (clock) { clock->sleepUntil(target_time_ns); } else { HOLOSCAN_LOG_ERROR("RealtimeClock component not yet registered with GXF"); diff --git a/src/core/resources/gxf/realtime_clock.cpp b/src/core/resources/gxf/realtime_clock.cpp index d1cf5161..2443c7f9 100644 --- a/src/core/resources/gxf/realtime_clock.cpp +++ b/src/core/resources/gxf/realtime_clock.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,39 +26,38 @@ namespace holoscan { RealtimeClock::RealtimeClock(const std::string& name, nvidia::gxf::RealtimeClock* component) : Clock(name, component) { - double initial_time_offset = 0.0; - HOLOSCAN_GXF_CALL_FATAL( - GxfParameterGetFloat64(gxf_context_, gxf_cid_, "initial_time_offset", &initial_time_offset)); - initial_time_offset_ = initial_time_offset; - double initial_time_scale = 1.0; - HOLOSCAN_GXF_CALL_FATAL( - GxfParameterGetFloat64(gxf_context_, gxf_cid_, "initial_time_scale", &initial_time_scale)); - initial_time_scale_ = initial_time_scale; - bool use_time_since_epoch = false; - HOLOSCAN_GXF_CALL_FATAL( - GxfParameterGetBool(gxf_context_, gxf_cid_, "use_time_since_epoch", &use_time_since_epoch)); - use_time_since_epoch_ = use_time_since_epoch; + auto maybe_offset = component->getParameter("offset"); + if (!maybe_offset) { throw std::runtime_error("Failed to get initial_time_offset"); } + initial_time_offset_ = maybe_offset.value(); + + auto maybe_scale = component->getParameter("scale"); + if (!maybe_scale) { throw std::runtime_error("Failed to get initial_time_scale"); } + initial_time_scale_ = maybe_scale.value(); + + auto maybe_use_epoch = component->getParameter("use_epoch"); + if (!maybe_use_epoch) { throw std::runtime_error("Failed to get use_time_since_epoch"); } + use_time_since_epoch_ = maybe_use_epoch.value(); +} + +nvidia::gxf::RealtimeClock* RealtimeClock::get() const { + return static_cast(gxf_cptr_); } double RealtimeClock::time() const { - if (gxf_cptr_) { - nvidia::gxf::RealtimeClock* clock = static_cast(gxf_cptr_); - return clock->time(); - } + auto clock = get(); + if (clock) { return clock->time(); } return 0.0; } int64_t RealtimeClock::timestamp() const { - if (gxf_cptr_) { - nvidia::gxf::RealtimeClock* clock = static_cast(gxf_cptr_); - return clock->timestamp(); - } + auto clock = get(); + if (clock) { return clock->timestamp(); } return 0; } void RealtimeClock::sleep_for(int64_t duration_ns) { - if (gxf_cptr_) { - nvidia::gxf::RealtimeClock* clock = static_cast(gxf_cptr_); + auto clock = get(); + if (clock) { clock->sleepFor(duration_ns); } else { HOLOSCAN_LOG_ERROR("RealtimeClock component not yet registered with GXF"); @@ -66,8 +65,8 @@ void RealtimeClock::sleep_for(int64_t duration_ns) { } void RealtimeClock::sleep_until(int64_t target_time_ns) { - if (gxf_cptr_) { - nvidia::gxf::RealtimeClock* clock = static_cast(gxf_cptr_); + auto clock = get(); + if (clock) { clock->sleepUntil(target_time_ns); } else { HOLOSCAN_LOG_ERROR("RealtimeClock component not yet registered with GXF"); @@ -75,8 +74,8 @@ void RealtimeClock::sleep_until(int64_t target_time_ns) { } void RealtimeClock::set_time_scale(double time_scale) { - if (gxf_cptr_) { - nvidia::gxf::RealtimeClock* clock = static_cast(gxf_cptr_); + auto clock = get(); + if (clock) { clock->setTimeScale(time_scale); } else { HOLOSCAN_LOG_ERROR("RealtimeClock component not yet registered with GXF"); diff --git a/src/core/resources/gxf/receiver.cpp b/src/core/resources/gxf/receiver.cpp index 2fe2a180..aa7aea6b 100644 --- a/src/core/resources/gxf/receiver.cpp +++ b/src/core/resources/gxf/receiver.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,4 +24,8 @@ namespace holoscan { Receiver::Receiver(const std::string& name, nvidia::gxf::Receiver* component) : GXFResource(name, component) {} +nvidia::gxf::Receiver* Receiver::get() const { + return static_cast(gxf_cptr_); +} + } // namespace holoscan diff --git a/src/core/resources/gxf/serialization_buffer.cpp b/src/core/resources/gxf/serialization_buffer.cpp index 1057f58a..ca8b0d6a 100644 --- a/src/core/resources/gxf/serialization_buffer.cpp +++ b/src/core/resources/gxf/serialization_buffer.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,6 +20,7 @@ #include #include +#include "gxf/std/allocator.hpp" #include "holoscan/core/component_spec.hpp" #include "holoscan/core/fragment.hpp" @@ -28,24 +29,16 @@ namespace holoscan { SerializationBuffer::SerializationBuffer(const std::string& name, nvidia::gxf::SerializationBuffer* component) : GXFResource(name, component) { + auto maybe_buffer_size = component->getParameter("buffer_size"); + if (!maybe_buffer_size) { throw std::runtime_error("Failed to get maybe_buffer_size"); } + buffer_size_ = maybe_buffer_size.value(); - // using GxfParameterGetUInt64 since no method specific to size_t is available - uint64_t buffer_size = 0; - HOLOSCAN_GXF_CALL_FATAL( - GxfParameterGetUInt64(gxf_context_, gxf_cid_, "buffer_size", &buffer_size)); - buffer_size_ = static_cast(buffer_size); - - // get the allocator object - gxf_uid_t allocator_cid; - HOLOSCAN_GXF_CALL_FATAL( - GxfParameterGetHandle(gxf_context_, gxf_cid_, "allocator", &allocator_cid)); - gxf_tid_t allocator_tid{}; - HOLOSCAN_GXF_CALL_FATAL( - GxfComponentTypeId(gxf_context_, "nvidia::gxf::Allocator", &allocator_tid)); - nvidia::gxf::Allocator* allocator_ptr; - HOLOSCAN_GXF_CALL_FATAL(GxfComponentPointer( - gxf_context_, gxf_cid_, allocator_tid, reinterpret_cast(&allocator_ptr))); - allocator_ = std::make_shared(std::string{allocator_ptr->name()}, allocator_ptr); + auto maybe_allocator = + component->getParameter>("allocator"); + if (!maybe_allocator) { throw std::runtime_error("Failed to get allocator"); } + auto allocator_handle = maybe_allocator.value(); + allocator_ = + std::make_shared(std::string{allocator_handle->name()}, allocator_handle.get()); } void SerializationBuffer::setup(ComponentSpec& spec) { @@ -58,6 +51,10 @@ void SerializationBuffer::setup(ComponentSpec& spec) { kDefaultSerializationBufferSize); } +nvidia::gxf::SerializationBuffer* SerializationBuffer::get() const { + return static_cast(gxf_cptr_); +} + void SerializationBuffer::initialize() { HOLOSCAN_LOG_DEBUG("SerializationBuffer::initialize"); // Set up prerequisite parameters before calling GXFOperator::initialize() @@ -68,7 +65,9 @@ void SerializationBuffer::initialize() { args().begin(), args().end(), [](const auto& arg) { return (arg.name() == "allocator"); }); // Create an UnboundedAllocator if no allocator was provided if (has_allocator == args().end()) { - auto allocator = frag->make_resource("allocator"); + auto allocator = frag->make_resource("serialization_buffer_allocator"); + allocator->gxf_cname(allocator->name().c_str()); + if (gxf_eid_ != 0) { allocator->gxf_eid(gxf_eid_); } add_arg(Arg("allocator") = allocator); } GXFResource::initialize(); diff --git a/src/core/resources/gxf/std_component_serializer.cpp b/src/core/resources/gxf/std_component_serializer.cpp index 35b2a416..c399f1a4 100644 --- a/src/core/resources/gxf/std_component_serializer.cpp +++ b/src/core/resources/gxf/std_component_serializer.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,11 +26,16 @@ void StdComponentSerializer::setup(ComponentSpec& spec) { spec.param(allocator_, "allocator", "Memory allocator", "Memory allocator for tensor components"); } +// nvidia::gxf::StdComponentSerializer* StdComponentSerializer::get() const { +// return static_cast(gxf_cptr_); +// } + void StdComponentSerializer::initialize() { // Set up prerequisite parameters before calling GXFOperator::initialize() auto frag = fragment(); - auto allocator = frag->make_resource("allocator"); - + auto allocator = frag->make_resource("std_component_serializer_allocator"); + allocator->gxf_cname(allocator->name().c_str()); + if (gxf_eid_ != 0) { allocator->gxf_eid(gxf_eid_); } add_arg(Arg("allocator") = allocator); GXFResource::initialize(); diff --git a/src/core/resources/gxf/video_stream_serializer.cpp b/src/core/resources/gxf/std_entity_serializer.cpp similarity index 64% rename from src/core/resources/gxf/video_stream_serializer.cpp rename to src/core/resources/gxf/std_entity_serializer.cpp index 4b53074b..dd71257b 100644 --- a/src/core/resources/gxf/video_stream_serializer.cpp +++ b/src/core/resources/gxf/std_entity_serializer.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +15,7 @@ * limitations under the License. */ -#include "holoscan/core/resources/gxf/video_stream_serializer.hpp" +#include "holoscan/core/resources/gxf/std_entity_serializer.hpp" #include #include @@ -26,19 +26,29 @@ namespace holoscan { -void VideoStreamSerializer::setup(ComponentSpec& spec) { +void StdEntitySerializer::setup(ComponentSpec& spec) { spec.param(component_serializers_, "component_serializers", "Component serializers", "List of serializers for serializing and deserializing components"); + spec.param(verbose_warning_, + "verbose_warning", + "Verbose Warning", + "Whether or not to print verbose warning", + false); } -void VideoStreamSerializer::initialize() { +nvidia::gxf::StdEntitySerializer* StdEntitySerializer::get() const { + return static_cast(gxf_cptr_); +} + +void StdEntitySerializer::initialize() { // Set up prerequisite parameters before calling GXFOperator::initialize() auto frag = fragment(); auto component_serializer = - frag->make_resource("component_serializer"); - + frag->make_resource("std_component_serializer"); + component_serializer->gxf_cname(component_serializer->name().c_str()); + if (gxf_eid_ != 0) { component_serializer->gxf_eid(gxf_eid_); } add_arg(Arg("component_serializers") = std::vector>{component_serializer}); diff --git a/src/core/resources/gxf/transmitter.cpp b/src/core/resources/gxf/transmitter.cpp index a70da663..b8e5e687 100644 --- a/src/core/resources/gxf/transmitter.cpp +++ b/src/core/resources/gxf/transmitter.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,4 +24,8 @@ namespace holoscan { Transmitter::Transmitter(const std::string& name, nvidia::gxf::Transmitter* component) : GXFResource(name, component) {} +nvidia::gxf::Transmitter* Transmitter::get() const { + return static_cast(gxf_cptr_); +} + } // namespace holoscan diff --git a/src/core/resources/gxf/ucx_component_serializer.cpp b/src/core/resources/gxf/ucx_component_serializer.cpp index 91dc2a7f..c2c71101 100644 --- a/src/core/resources/gxf/ucx_component_serializer.cpp +++ b/src/core/resources/gxf/ucx_component_serializer.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -28,6 +28,10 @@ void UcxComponentSerializer::setup(ComponentSpec& spec) { spec.param(allocator_, "allocator", "Memory allocator", "Memory allocator for tensor components"); } +nvidia::gxf::UcxComponentSerializer* UcxComponentSerializer::get() const { + return static_cast(gxf_cptr_); +} + void UcxComponentSerializer::initialize() { HOLOSCAN_LOG_DEBUG("UcxComponentSerializer::initialize"); // Set up prerequisite parameters before calling GXFOperator::initialize() @@ -38,8 +42,10 @@ void UcxComponentSerializer::initialize() { args().begin(), args().end(), [](const auto& arg) { return (arg.name() == "allocator"); }); // Create an UnboundedAllocator if no allocator was provided if (has_allocator == args().end()) { - auto allocator = frag->make_resource("allocator"); + auto allocator = frag->make_resource("ucx_component_allocator"); add_arg(Arg("allocator") = allocator); + allocator->gxf_cname(allocator->name().c_str()); + if (gxf_eid_ != 0) { allocator->gxf_eid(gxf_eid_); } } GXFResource::initialize(); } diff --git a/src/core/resources/gxf/ucx_entity_serializer.cpp b/src/core/resources/gxf/ucx_entity_serializer.cpp index fa3919e1..081deee4 100644 --- a/src/core/resources/gxf/ucx_entity_serializer.cpp +++ b/src/core/resources/gxf/ucx_entity_serializer.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -37,7 +37,11 @@ void UcxEntitySerializer::setup(ComponentSpec& spec) { "verbose_warning", "Verbose Warning", "Whether or not to print verbose warning", - true); + false); +} + +nvidia::gxf::UcxEntitySerializer* UcxEntitySerializer::get() const { + return static_cast(gxf_cptr_); } void UcxEntitySerializer::initialize() { @@ -53,14 +57,16 @@ void UcxEntitySerializer::initialize() { if (has_component_serializers == args().end()) { std::vector> component_serializers; component_serializers.reserve(2); - // UcxHoloscanComponentSerializer handles Holoscan SDK types such as holoscan::gxf::GXFTensor + // UcxHoloscanComponentSerializer handles Holoscan SDK types such as holoscan::Message auto ucx_holoscan_component_serializer = frag->make_resource( "ucx_holoscan_component_serializer"); + ucx_holoscan_component_serializer->gxf_cname(ucx_holoscan_component_serializer->name().c_str()); component_serializers.push_back(ucx_holoscan_component_serializer); // UcxComponentSerializer handles nvidia::gxf::Tensor, nvidia::gxf::VideoBuffer, etc. auto ucx_component_serializer = frag->make_resource("ucx_component_serializer"); + ucx_component_serializer->gxf_cname(ucx_component_serializer->name().c_str()); component_serializers.push_back(ucx_component_serializer); // Note: Activation sequence of entities in GXF: diff --git a/src/core/resources/gxf/ucx_holoscan_component_serializer.cpp b/src/core/resources/gxf/ucx_holoscan_component_serializer.cpp index d37cf1c1..81016272 100644 --- a/src/core/resources/gxf/ucx_holoscan_component_serializer.cpp +++ b/src/core/resources/gxf/ucx_holoscan_component_serializer.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -38,8 +38,10 @@ void UcxHoloscanComponentSerializer::initialize() { args().begin(), args().end(), [](const auto& arg) { return (arg.name() == "allocator"); }); // Create an UnboundedAllocator if no allocator was provided if (has_allocator == args().end()) { - auto allocator = frag->make_resource("allocator"); + auto allocator = frag->make_resource("ucx_holoscan_component_allocator"); add_arg(Arg("allocator") = allocator); + allocator->gxf_cname(allocator->name().c_str()); + if (gxf_eid_ != 0) { allocator->gxf_eid(gxf_eid_); } } GXFResource::initialize(); } diff --git a/src/core/resources/gxf/ucx_receiver.cpp b/src/core/resources/gxf/ucx_receiver.cpp index 210112a1..aab6b14e 100644 --- a/src/core/resources/gxf/ucx_receiver.cpp +++ b/src/core/resources/gxf/ucx_receiver.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,6 +20,7 @@ #include #include +#include "gxf/ucx/ucx_serialization_buffer.hpp" #include "holoscan/core/component_spec.hpp" #include "holoscan/core/fragment.hpp" #include "holoscan/core/gxf/gxf_resource.hpp" @@ -29,29 +30,29 @@ namespace holoscan { UcxReceiver::UcxReceiver(const std::string& name, nvidia::gxf::Receiver* component) : Receiver(name, component) { - uint64_t capacity = 0; - HOLOSCAN_GXF_CALL_FATAL(GxfParameterGetUInt64(gxf_context_, gxf_cid_, "capacity", &capacity)); - capacity_ = capacity; - uint64_t policy = 0; - HOLOSCAN_GXF_CALL_FATAL(GxfParameterGetUInt64(gxf_context_, gxf_cid_, "policy", &policy)); - policy_ = policy; - const char* address; - HOLOSCAN_GXF_CALL_FATAL(GxfParameterGetStr(gxf_context_, gxf_cid_, "address", &address)); - address_ = std::string(address); - uint32_t port = 0; - HOLOSCAN_GXF_CALL_FATAL(GxfParameterGetUInt32(gxf_context_, gxf_cid_, "port", &port)); - port_ = port; + auto maybe_capacity = component->getParameter("capacity"); + if (!maybe_capacity) { throw std::runtime_error("Failed to get capacity"); } + capacity_ = maybe_capacity.value(); + + auto maybe_policy = component->getParameter("policy"); + if (!maybe_policy) { throw std::runtime_error("Failed to get policy"); } + policy_ = maybe_policy.value(); + + auto maybe_address = component->getParameter("address"); + if (!maybe_address) { throw std::runtime_error("Failed to get address"); } + address_ = maybe_address.value(); + + auto maybe_port = component->getParameter("port"); + if (!maybe_port) { throw std::runtime_error("Failed to get port"); } + port_ = maybe_port.value(); // get the serialization buffer object - gxf_uid_t buffer_cid; - HOLOSCAN_GXF_CALL_FATAL(GxfParameterGetHandle(gxf_context_, gxf_cid_, "buffer", &buffer_cid)); - gxf_tid_t ucx_serialization_buffer_tid{}; - HOLOSCAN_GXF_CALL_FATAL(GxfComponentTypeId( - gxf_context_, "nvidia::gxf::UcxSerializationBuffer", &ucx_serialization_buffer_tid)); - nvidia::gxf::SerializationBuffer* buffer_ptr; - HOLOSCAN_GXF_CALL_FATAL(GxfComponentPointer( - gxf_context_, gxf_cid_, ucx_serialization_buffer_tid, reinterpret_cast(&buffer_ptr))); - buffer_ = std::make_shared(std::string{buffer_ptr->name()}, buffer_ptr); + auto maybe_buffer = + component->getParameter>("buffer"); + if (!maybe_buffer) { throw std::runtime_error("Failed to get buffer"); } + auto buffer_handle = maybe_buffer.value(); + buffer_ = std::make_shared(std::string{buffer_handle->name()}, + buffer_handle.get()); } void UcxReceiver::setup(ComponentSpec& spec) { @@ -66,6 +67,10 @@ void UcxReceiver::setup(ComponentSpec& spec) { // spec.resource(gpu_device_, "Optional GPU device resource"); } +nvidia::gxf::UcxReceiver* UcxReceiver::get() const { + return static_cast(gxf_cptr_); +} + void UcxReceiver::initialize() { HOLOSCAN_LOG_DEBUG("UcxReceiver::initialize"); // Set up prerequisite parameters before calling GXFOperator::initialize() @@ -79,6 +84,8 @@ void UcxReceiver::initialize() { auto buffer = frag->make_resource("ucx_rx_serialization_buffer"); add_arg(Arg("buffer") = buffer); + buffer->gxf_cname(buffer->name().c_str()); + if (gxf_eid_ != 0) { buffer->gxf_eid(gxf_eid_); } } GXFResource::initialize(); } diff --git a/src/core/resources/gxf/ucx_serialization_buffer.cpp b/src/core/resources/gxf/ucx_serialization_buffer.cpp index b7005aed..9c1b20a4 100644 --- a/src/core/resources/gxf/ucx_serialization_buffer.cpp +++ b/src/core/resources/gxf/ucx_serialization_buffer.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,17 +19,29 @@ #include // setenv +#include #include #include "holoscan/core/component_spec.hpp" #include "holoscan/core/fragment.hpp" +#include "holoscan/core/resources/gxf/allocator.hpp" namespace holoscan { +// Note: UcxSerializationBuffer does not inherit from SerializationBuffer UcxSerializationBuffer::UcxSerializationBuffer(const std::string& name, - nvidia::gxf::SerializationBuffer* component) - : SerializationBuffer(name, component) { - // no additional parameters to set here + nvidia::gxf::UcxSerializationBuffer* component) + : gxf::GXFResource(name, component) { + auto maybe_buffer_size = component->getParameter("buffer_size"); + if (!maybe_buffer_size) { throw std::runtime_error("Failed to get maybe_buffer_size"); } + buffer_size_ = maybe_buffer_size.value(); + + auto maybe_allocator = + component->getParameter>("allocator"); + if (!maybe_allocator) { throw std::runtime_error("Failed to get allocator"); } + auto allocator_handle = maybe_allocator.value(); + allocator_ = + std::make_shared(std::string{allocator_handle->name()}, allocator_handle.get()); } void UcxSerializationBuffer::setup(ComponentSpec& spec) { @@ -63,6 +75,10 @@ void UcxSerializationBuffer::setup(ComponentSpec& spec) { default_buffer_size); } +nvidia::gxf::UcxSerializationBuffer* UcxSerializationBuffer::get() const { + return static_cast(gxf_cptr_); +} + void UcxSerializationBuffer::initialize() { // Set up prerequisite parameters before calling GXFOperator::initialize() auto frag = fragment(); @@ -72,7 +88,9 @@ void UcxSerializationBuffer::initialize() { args().begin(), args().end(), [](const auto& arg) { return (arg.name() == "allocator"); }); // Create an UnboundedAllocator if no allocator was provided if (has_allocator == args().end()) { - auto allocator = frag->make_resource("allocator"); + auto allocator = frag->make_resource("ucx_serialization_buffer_allocator"); + allocator->gxf_cname(allocator->name().c_str()); + if (gxf_eid_ != 0) { allocator->gxf_eid(gxf_eid_); } add_arg(Arg("allocator") = allocator); } GXFResource::initialize(); diff --git a/src/core/resources/gxf/ucx_transmitter.cpp b/src/core/resources/gxf/ucx_transmitter.cpp index 423667d9..6bda0994 100644 --- a/src/core/resources/gxf/ucx_transmitter.cpp +++ b/src/core/resources/gxf/ucx_transmitter.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,6 +20,7 @@ #include #include +#include "gxf/ucx/ucx_serialization_buffer.hpp" #include "holoscan/core/component_spec.hpp" #include "holoscan/core/fragment.hpp" #include "holoscan/core/gxf/gxf_resource.hpp" @@ -30,41 +31,41 @@ namespace holoscan { UcxTransmitter::UcxTransmitter(const std::string& name, nvidia::gxf::Transmitter* component) : Transmitter(name, component) { - uint64_t capacity = 0; - HOLOSCAN_GXF_CALL_FATAL(GxfParameterGetUInt64(gxf_context_, gxf_cid_, "capacity", &capacity)); - capacity_ = capacity; - uint64_t policy = 0; - HOLOSCAN_GXF_CALL_FATAL(GxfParameterGetUInt64(gxf_context_, gxf_cid_, "policy", &policy)); - policy_ = policy; - const char* receiver_address; - HOLOSCAN_GXF_CALL_FATAL( - GxfParameterGetStr(gxf_context_, gxf_cid_, "receiver_address", &receiver_address)); - receiver_address_ = std::string(receiver_address); - uint32_t port = 0; - HOLOSCAN_GXF_CALL_FATAL(GxfParameterGetUInt32(gxf_context_, gxf_cid_, "port", &port)); - port_ = port; - const char* local_address; - HOLOSCAN_GXF_CALL_FATAL( - GxfParameterGetStr(gxf_context_, gxf_cid_, "local_address", &local_address)); - local_address_ = std::string(receiver_address); - uint32_t local_port = 0; - HOLOSCAN_GXF_CALL_FATAL(GxfParameterGetUInt32(gxf_context_, gxf_cid_, "local_port", &local_port)); - local_port_ = local_port; - uint32_t maximum_connection_retries = 0; - HOLOSCAN_GXF_CALL_FATAL(GxfParameterGetUInt32( - gxf_context_, gxf_cid_, "maximum_connection_retries", &maximum_connection_retries)); - maximum_connection_retries_ = maximum_connection_retries; + auto maybe_capacity = component->getParameter("capacity"); + if (!maybe_capacity) { throw std::runtime_error("Failed to get capacity"); } + capacity_ = maybe_capacity.value(); + + auto maybe_policy = component->getParameter("policy"); + if (!maybe_policy) { throw std::runtime_error("Failed to get policy"); } + policy_ = maybe_policy.value(); + + auto maybe_receiver_address = component->getParameter("receiver_address"); + if (!maybe_receiver_address) { throw std::runtime_error("Failed to get receiver_address"); } + receiver_address_ = maybe_receiver_address.value(); + + auto maybe_port = component->getParameter("port"); + if (!maybe_port) { throw std::runtime_error("Failed to get port"); } + port_ = maybe_port.value(); + + auto maybe_local_address = component->getParameter("local_address"); + if (!maybe_local_address) { throw std::runtime_error("Failed to get local_address"); } + local_address_ = maybe_local_address.value(); + + auto maybe_local_port = component->getParameter("local_port"); + if (!maybe_local_port) { throw std::runtime_error("Failed to get local_port"); } + local_port_ = maybe_local_port.value(); + + auto maybe_max_retry = component->getParameter("max_retry"); + if (!maybe_max_retry) { throw std::runtime_error("Failed to get maximum_connection_retries"); } + maximum_connection_retries_ = maybe_max_retry.value(); // get the serialization buffer object - gxf_uid_t buffer_cid; - HOLOSCAN_GXF_CALL_FATAL(GxfParameterGetHandle(gxf_context_, gxf_cid_, "buffer", &buffer_cid)); - gxf_tid_t ucx_serialization_buffer_tid{}; - HOLOSCAN_GXF_CALL_FATAL(GxfComponentTypeId( - gxf_context_, "nvidia::gxf::UcxSerializationBuffer", &ucx_serialization_buffer_tid)); - nvidia::gxf::SerializationBuffer* buffer_ptr; - HOLOSCAN_GXF_CALL_FATAL(GxfComponentPointer( - gxf_context_, gxf_cid_, ucx_serialization_buffer_tid, reinterpret_cast(&buffer_ptr))); - buffer_ = std::make_shared(std::string{buffer_ptr->name()}, buffer_ptr); + auto maybe_buffer = + component->getParameter>("buffer"); + if (!maybe_buffer) { throw std::runtime_error("Failed to get buffer"); } + auto buffer_handle = maybe_buffer.value(); + buffer_ = std::make_shared(std::string{buffer_handle->name()}, + buffer_handle.get()); } void UcxTransmitter::setup(ComponentSpec& spec) { @@ -99,6 +100,10 @@ void UcxTransmitter::setup(ComponentSpec& spec) { // spec.resource(gpu_device_, "Optional GPU device resource"); } +nvidia::gxf::UcxTransmitter* UcxTransmitter::get() const { + return static_cast(gxf_cptr_); +} + void UcxTransmitter::initialize() { HOLOSCAN_LOG_DEBUG("UcxTransmitter::initialize"); // Set up prerequisite parameters before calling GXFOperator::initialize() @@ -112,6 +117,8 @@ void UcxTransmitter::initialize() { auto buffer = frag->make_resource("ucx_tx_serialization_buffer"); add_arg(Arg("buffer") = buffer); + buffer->gxf_cname(buffer->name().c_str()); + if (gxf_eid_ != 0) { buffer->gxf_eid(gxf_eid_); } } GXFResource::initialize(); } diff --git a/src/core/scheduler.cpp b/src/core/scheduler.cpp index abad6eda..ac4c4f80 100644 --- a/src/core/scheduler.cpp +++ b/src/core/scheduler.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -45,4 +45,16 @@ YAML::Node Scheduler::to_yaml_node() const { return node; } +void Scheduler::reset_graph_entities() { + HOLOSCAN_LOG_TRACE("Scheduler '{}'::reset_graph_entities", name_); + for (auto& [_, resource] : resources_) { + if (resource) { + auto gxf_resource = std::dynamic_pointer_cast(resource); + if (gxf_resource) { gxf_resource->reset_gxf_graph_entity(); } + resource->reset_graph_entities(); + } + } + Component::reset_graph_entities(); +} + } // namespace holoscan diff --git a/src/core/schedulers/gxf/event_based_scheduler.cpp b/src/core/schedulers/gxf/event_based_scheduler.cpp new file mode 100644 index 00000000..b6e5909e --- /dev/null +++ b/src/core/schedulers/gxf/event_based_scheduler.cpp @@ -0,0 +1,83 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "holoscan/core/schedulers/gxf/event_based_scheduler.hpp" + +#include + +#include "holoscan/core/component_spec.hpp" +#include "holoscan/core/fragment.hpp" + +namespace holoscan { + +void EventBasedScheduler::setup(ComponentSpec& spec) { + spec.param(clock_, + "clock", + "Clock", + "The clock used by the scheduler to define flow of time. Typically this " + "would be a std::shared_ptr."); + spec.param( + worker_thread_number_, "worker_thread_number", "Thread Number", "Number of threads", 1L); + spec.param(stop_on_deadlock_, + "stop_on_deadlock", + "Stop on dead end", + "If enabled the scheduler will stop when all entities are in a waiting state, but " + "no periodic entity exists to break the dead end. Should be disabled when " + "scheduling conditions can be changed by external actors, for example by clearing " + "queues manually.", + true); + spec.param(max_duration_ms_, + "max_duration_ms", + "Max Duration [ms]", + "The maximum duration for which the scheduler will execute (in ms). If not " + "specified the scheduler will run until all work is done. If periodic terms are " + "present this means the application will run indefinitely", + ParameterFlag::kOptional); + spec.param(stop_on_deadlock_timeout_, + "stop_on_deadlock_timeout", + "Delay (in ms) until stop_on_deadlock kicks in", + "Scheduler will wait this amount of time (in ms) before determining that it is in " + "deadlock and should stop. It will reset if a job comes in during the wait. A " + "negative value means not stop on deadlock. This parameter only applies when " + "stop_on_deadlock=true", + 0L); +} + +nvidia::gxf::EventBasedScheduler* EventBasedScheduler::get() const { + return static_cast(gxf_cptr_); +} + +void EventBasedScheduler::initialize() { + // Set up prerequisite parameters before calling Scheduler::initialize() + auto frag = fragment(); + + // Find if there is an argument for 'clock' + auto has_clock = std::find_if( + args().begin(), args().end(), [](const auto& arg) { return (arg.name() == "clock"); }); + // Create the clock if there was no argument provided. + if (has_clock == args().end()) { + clock_ = frag->make_resource("event_based_scheduler__realtime_clock"); + clock_->gxf_cname(clock_->name().c_str()); + if (gxf_eid_ != 0) { clock_->gxf_eid(gxf_eid_); } + add_arg(clock_.get()); + } + + // parent class initialize() call must be after the argument additions above + Scheduler::initialize(); +} + +} // namespace holoscan diff --git a/src/core/schedulers/gxf/greedy_scheduler.cpp b/src/core/schedulers/gxf/greedy_scheduler.cpp index c282018b..49c85e46 100644 --- a/src/core/schedulers/gxf/greedy_scheduler.cpp +++ b/src/core/schedulers/gxf/greedy_scheduler.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -63,6 +63,10 @@ void GreedyScheduler::setup(ComponentSpec& spec) { 0L); } +nvidia::gxf::GreedyScheduler* GreedyScheduler::get() const { + return static_cast(gxf_cptr_); +} + void GreedyScheduler::initialize() { // Set up prerequisite parameters before calling Scheduler::initialize() auto frag = fragment(); @@ -72,7 +76,9 @@ void GreedyScheduler::initialize() { args().begin(), args().end(), [](const auto& arg) { return (arg.name() == "clock"); }); // Create the BooleanCondition if there is no argument provided. if (has_clock == args().end()) { - clock_ = frag->make_resource("realtime_clock"); + clock_ = frag->make_resource("greedy_scheduler__realtime_clock"); + clock_->gxf_cname(clock_->name().c_str()); + if (gxf_eid_ != 0) { clock_->gxf_eid(gxf_eid_); } add_arg(clock_.get()); } diff --git a/src/core/schedulers/gxf/multithread_scheduler.cpp b/src/core/schedulers/gxf/multithread_scheduler.cpp index c2007691..ca8b647d 100644 --- a/src/core/schedulers/gxf/multithread_scheduler.cpp +++ b/src/core/schedulers/gxf/multithread_scheduler.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -63,6 +63,10 @@ void MultiThreadScheduler::setup(ComponentSpec& spec) { 0L); } +nvidia::gxf::MultiThreadScheduler* MultiThreadScheduler::get() const { + return static_cast(gxf_cptr_); +} + void MultiThreadScheduler::initialize() { // Set up prerequisite parameters before calling Scheduler::initialize() auto frag = fragment(); @@ -72,7 +76,9 @@ void MultiThreadScheduler::initialize() { args().begin(), args().end(), [](const auto& arg) { return (arg.name() == "clock"); }); // Create the clock if there was no argument provided. if (has_clock == args().end()) { - clock_ = frag->make_resource("realtime_clock"); + clock_ = frag->make_resource("multithread_scheduler__realtime_clock"); + clock_->gxf_cname(clock_->name().c_str()); + if (gxf_eid_ != 0) { clock_->gxf_eid(gxf_eid_); } add_arg(clock_.get()); } diff --git a/src/core/system/network_utils.cpp b/src/core/system/network_utils.cpp index ec51b4c0..7d70c50d 100644 --- a/src/core/system/network_utils.cpp +++ b/src/core/system/network_utils.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,7 +24,7 @@ #include // for rand() #include // for memset() #include -#include // for unique_ptr +#include // for unique_ptr #include // for istringstream #include #include diff --git a/src/logger/logger.cpp b/src/logger/logger.cpp index 3e0a9530..757e17bb 100644 --- a/src/logger/logger.cpp +++ b/src/logger/logger.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,20 +17,34 @@ #include "holoscan/logger/logger.hpp" -#include -#include -#include - #include #include #include #include +#include "common/logger/spdlog_logger.hpp" + namespace holoscan { +using nvidia::logger::SpdlogLogger; + bool Logger::log_pattern_set_by_user = false; bool Logger::log_level_set_by_user = false; +class HoloscanLogger : public SpdlogLogger { + public: + static HoloscanLogger& instance() { + static HoloscanLogger logger("holoscan"); + return logger; + } + + HoloscanLogger(const HoloscanLogger&) = delete; + HoloscanLogger& operator=(const HoloscanLogger&) = delete; + + private: + using SpdlogLogger::SpdlogLogger; +}; + static std::string get_concrete_log_pattern(std::string pattern) { // Convert to uppercase std::string log_pattern = pattern; @@ -53,20 +67,8 @@ static std::string get_concrete_log_pattern(std::string pattern) { } std::string& Logger::pattern() { - static std::string log_pattern = "[%^%l%$] [%s:%#] %v"; - return log_pattern; -} - -static std::shared_ptr& get_logger(const std::string& name = "holoscan") { - static auto logger = [&name] { - auto tmp_logger = spdlog::stderr_color_mt(name); - // Set default log level and pattern - tmp_logger->set_level(spdlog::level::info); - tmp_logger->set_pattern(Logger::pattern()); - return tmp_logger; - }(); - - return logger; + HoloscanLogger& logger = HoloscanLogger::instance(); + return logger.pattern_string(); } void set_log_level(LogLevel level) { @@ -128,13 +130,15 @@ void Logger::set_level(LogLevel level, bool* is_overridden_by_env) { level = env_level; } - get_logger()->set_level(static_cast(level)); + HoloscanLogger& logger = HoloscanLogger::instance(); + logger.level(static_cast(level)); Logger::log_level_set_by_user = true; } LogLevel Logger::level() { - return static_cast(get_logger()->level()); + HoloscanLogger& logger = HoloscanLogger::instance(); + return static_cast(logger.level()); } void Logger::set_pattern(std::string pattern, bool* is_overridden_by_env) { @@ -173,54 +177,27 @@ void Logger::set_pattern(std::string pattern, bool* is_overridden_by_env) { case LogLevel::DEBUG: case LogLevel::TRACE: // Display info for [time] [thread] [tool] [level] [filename:line_number] message - pattern = "[%Y-%m-%d %H:%M:%S.%e][%t][%n][%^%l%$][%s:%#] %v"; + pattern = "[%Y-%m-%d %H:%M:%S.%e] [%t] [%n] [%^%l%$] [%s:%#] %v"; } } } if (!pattern.empty()) { Logger::pattern() = pattern; - get_logger()->set_pattern(pattern); + HoloscanLogger& logger = HoloscanLogger::instance(); + logger.pattern(pattern.c_str()); } } -bool Logger::should_backtrace() { - return get_logger()->should_backtrace(); -} - -void Logger::disable_backtrace() { - return get_logger()->disable_backtrace(); -} - -void Logger::enable_backtrace(size_t n_messages) { - return get_logger()->enable_backtrace(n_messages); -} - -void Logger::dump_backtrace() { - return get_logger()->dump_backtrace(); -} - -void Logger::flush() { - return get_logger()->flush(); -} - -LogLevel Logger::flush_level() { - return static_cast(get_logger()->flush_level()); -} - -void Logger::flush_on(LogLevel level) { - get_logger()->flush_on(static_cast(level)); -} - -void Logger::log_message(const char* file, int line, const char* function_name, LogLevel level, +void Logger::log_message(const char* file, int line, const char* name, LogLevel level, fmt::string_view format, fmt::format_args args) { - get_logger()->log(spdlog::source_loc{file, line, function_name}, - static_cast(level), - fmt::vformat(format, args)); + HoloscanLogger& logger = HoloscanLogger::instance(); + logger.log(file, line, name, static_cast(level), fmt::vformat(format, args).c_str()); } void Logger::log_message(LogLevel level, fmt::string_view format, fmt::format_args args) { - get_logger()->log(static_cast(level), fmt::vformat(format, args)); + HoloscanLogger& logger = HoloscanLogger::instance(); + logger.log(nullptr, 0, nullptr, static_cast(level), fmt::vformat(format, args).c_str()); } } // namespace holoscan diff --git a/src/operators/async_ping_rx/async_ping_rx.cpp b/src/operators/async_ping_rx/async_ping_rx.cpp index 10ac62ac..3ac83bf2 100644 --- a/src/operators/async_ping_rx/async_ping_rx.cpp +++ b/src/operators/async_ping_rx/async_ping_rx.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -34,8 +34,7 @@ void AsyncPingRxOp::async_ping() { std::this_thread::sleep_for(std::chrono::milliseconds(delay_.get())); - if (async_condition_->event_state() == - AsynchronousEventState::EVENT_WAITING) { + if (async_condition_->event_state() == AsynchronousEventState::EVENT_WAITING) { async_condition_->event_state(AsynchronousEventState::EVENT_DONE); } } @@ -55,15 +54,13 @@ void AsyncPingRxOp::initialize() { auto frag = fragment(); // Find if there is an argument for 'async_condition_' - auto has_async_condition = - std::find_if(args().begin(), args().end(), [](const auto& arg) { - return (arg.name() == "async_condition"); - }); + auto has_async_condition = std::find_if(args().begin(), args().end(), [](const auto& arg) { + return (arg.name() == "async_condition"); + }); // Create the AsynchronousCondition if there is no argument provided. if (has_async_condition == args().end()) { - async_condition_ = - frag->make_condition("async_condition"); + async_condition_ = frag->make_condition("async_condition"); add_arg(async_condition_.get()); } diff --git a/src/operators/async_ping_tx/async_ping_tx.cpp b/src/operators/async_ping_tx/async_ping_tx.cpp index 72c91362..05314ad3 100644 --- a/src/operators/async_ping_tx/async_ping_tx.cpp +++ b/src/operators/async_ping_tx/async_ping_tx.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -34,8 +34,7 @@ void AsyncPingTxOp::async_ping() { std::this_thread::sleep_for(std::chrono::milliseconds(delay_.get())); - if (async_condition_->event_state() == - AsynchronousEventState::EVENT_WAITING) { + if (async_condition_->event_state() == AsynchronousEventState::EVENT_WAITING) { async_condition_->event_state(AsynchronousEventState::EVENT_DONE); } } @@ -57,15 +56,13 @@ void AsyncPingTxOp::initialize() { auto frag = fragment(); // Find if there is an argument for 'async_condition_' - auto has_async_condition = - std::find_if(args().begin(), args().end(), [](const auto& arg) { - return (arg.name() == "async_condition"); - }); + auto has_async_condition = std::find_if(args().begin(), args().end(), [](const auto& arg) { + return (arg.name() == "async_condition"); + }); // Create the BooleanCondition if there is no argument provided. if (has_async_condition == args().end()) { - async_condition_ = - frag->make_condition("async_condition"); + async_condition_ = frag->make_condition("async_condition"); add_arg(async_condition_.get()); } diff --git a/src/operators/bayer_demosaic/bayer_demosaic.cpp b/src/operators/bayer_demosaic/bayer_demosaic.cpp index 86980223..e3816cf5 100644 --- a/src/operators/bayer_demosaic/bayer_demosaic.cpp +++ b/src/operators/bayer_demosaic/bayer_demosaic.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,11 +23,11 @@ #include #include +#include "gxf/std/tensor.hpp" #include "holoscan/core/execution_context.hpp" #include "holoscan/core/executor.hpp" #include "holoscan/core/fragment.hpp" #include "holoscan/core/gxf/entity.hpp" -#include "holoscan/core/gxf/gxf_tensor.hpp" #include "holoscan/core/io_context.hpp" #include "holoscan/core/io_spec.hpp" #include "holoscan/core/operator_spec.hpp" @@ -68,20 +68,21 @@ void BayerDemosaicOp::setup(OperatorSpec& spec) { "Name of the output tensor", std::string("")); spec.param(pool_, "pool", "Pool", "Pool to allocate the output message."); - spec.param(bayer_interp_mode_, - "interpolation_mode", - "Interpolation used for demosaicing", - "The interpolation model to be used for demosaicing (default UNDEFINED). Values " - "available at: " - "https://docs.nvidia.com/cuda/npp/" - "group__typedefs__npp.html#ga2b58ebd329141d560aa4367f1708f191", - 0); + spec.param( + bayer_interp_mode_, + "interpolation_mode", + "Interpolation used for demosaicing", + "The interpolation model to be used for demosaicing (default: NPPI_INTER_UNDEFINED). Values " + "available at: " + "https://docs.nvidia.com/cuda/npp/nppdefs.html?highlight=" + "Two%20parameter%20cubic%20filter#c.NppiInterpolationMode", + 0); spec.param(bayer_grid_pos_, "bayer_grid_pos", "Bayer grid position", - "The Bayer grid position (default GBRG). Values available at: " - "https://docs.nvidia.com/cuda/npp/" - "group__typedefs__npp.html#ga5597309d6766fb2dffe155990d915ecb", + "The Bayer grid position (default: NPPI_BAYER_GBRG). Values available at: " + "https://docs.nvidia.com/cuda/npp/nppdefs.html?highlight=" + "Two%20parameter%20cubic%20filter#c.NppiBayerGridPosition", 2); spec.param(generate_alpha_, "generate_alpha", @@ -210,8 +211,8 @@ void BayerDemosaicOp::compute(InputContext& op_input, OutputContext& op_output, auto in_tensor = maybe_tensor; // Get needed information from the tensor - // cast Holoscan::Tensor to GXFTensor so attribute access code can remain as-is - holoscan::gxf::GXFTensor in_tensor_gxf{in_tensor->dl_ctx()}; + // cast Holoscan::Tensor to nvidia::gxf::Tensor so attribute access code can remain as-is + nvidia::gxf::Tensor in_tensor_gxf{in_tensor->dl_ctx()}; input_data_ptr = in_tensor_gxf.pointer(); if (input_data_ptr == nullptr) { @@ -249,7 +250,10 @@ void BayerDemosaicOp::compute(InputContext& op_input, OutputContext& op_output, nvidia::gxf::Shape{rows, columns, out_channels}, element_size)}}, false); - if (!out_message) { throw std::runtime_error("Failed to allocate tensors in output message"); } + if (!out_message) { + throw std::runtime_error(fmt::format("Failed to allocate tensors in output message: {}", + GxfResultStr(out_message.error()))); + } // get the tensor of interest const auto maybe_output_tensor = diff --git a/src/operators/format_converter/format_converter.cpp b/src/operators/format_converter/format_converter.cpp index 5b769dba..d3933589 100644 --- a/src/operators/format_converter/format_converter.cpp +++ b/src/operators/format_converter/format_converter.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,11 +22,11 @@ #include #include +#include "gxf/std/tensor.hpp" #include "holoscan/core/execution_context.hpp" #include "holoscan/core/executor.hpp" #include "holoscan/core/fragment.hpp" #include "holoscan/core/gxf/entity.hpp" -#include "holoscan/core/gxf/gxf_tensor.hpp" #include "holoscan/core/io_context.hpp" #include "holoscan/core/io_spec.hpp" #include "holoscan/core/operator_spec.hpp" @@ -345,8 +345,8 @@ void FormatConverterOp::compute(InputContext& op_input, OutputContext& op_output auto in_tensor = maybe_tensor; // Get needed information from the tensor - // cast Holoscan::Tensor to GXFTensor so attribute access code can remain as-is - holoscan::gxf::GXFTensor in_tensor_gxf{in_tensor->dl_ctx()}; + // cast Holoscan::Tensor to nvidia::gxf::Tensor to use it's APIs directly + nvidia::gxf::Tensor in_tensor_gxf{in_tensor->dl_ctx()}; out_shape = in_tensor_gxf.shape(); in_tensor_data = in_tensor_gxf.pointer(); if (in_tensor_data == nullptr) { @@ -470,9 +470,9 @@ void FormatConverterOp::compute(InputContext& op_input, OutputContext& op_output nvidia::gxf::ComputeTrivialStrides(out_shape, dst_typesize)}}, false); - if (!out_message) { std::runtime_error("failed to create out_message"); } + if (!out_message) { throw std::runtime_error("failed to create out_message"); } const auto out_tensor = out_message.value().get(); - if (!out_tensor) { std::runtime_error("failed to create out_tensor"); } + if (!out_tensor) { throw std::runtime_error("failed to create out_tensor"); } // Set tensor to constant using NPP if (in_channels == 2 || in_channels == 3 || in_channels == 4) { diff --git a/src/operators/holoviz/holoviz.cpp b/src/operators/holoviz/holoviz.cpp index bda1bad9..591b4a5e 100644 --- a/src/operators/holoviz/holoviz.cpp +++ b/src/operators/holoviz/holoviz.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -811,6 +811,7 @@ void HolovizOp::compute(InputContext& op_input, OutputContext& op_output, // information for (auto&& message : messages) { const auto tensors = message.findAll(); + HOLOSCAN_LOG_DEBUG("tensors.size()={}", tensors.value().size()); for (auto&& tensor : tensors.value()) { // check if an input spec with the same tensor name already exist const std::string tensor_name(tensor->name()); @@ -832,6 +833,8 @@ void HolovizOp::compute(InputContext& op_input, OutputContext& op_output, } } const auto video_buffers = message.findAll(); + HOLOSCAN_LOG_DEBUG("video_buffers.size()={}", video_buffers.value().size()); + for (auto&& video_buffer : video_buffers.value()) { // check if an input spec with the same tensor name already exist const std::string tensor_name(video_buffer->name()); @@ -886,6 +889,7 @@ void HolovizOp::compute(InputContext& op_input, OutputContext& op_output, // pick the first one with that name break; } + // check for video if no tensor found maybe_input_video = message->get(input_spec.tensor_name_.c_str()); if (maybe_input_video) { // pick the first one with that name diff --git a/src/operators/segmentation_postprocessor/segmentation_postprocessor.cpp b/src/operators/segmentation_postprocessor/segmentation_postprocessor.cpp index 75a4bbb3..ca074f92 100644 --- a/src/operators/segmentation_postprocessor/segmentation_postprocessor.cpp +++ b/src/operators/segmentation_postprocessor/segmentation_postprocessor.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -133,8 +133,8 @@ void SegmentationPostprocessorOp::compute(InputContext& op_input, OutputContext& nvidia::gxf::Shape output_shape{shape.height, shape.width, 1}; // get Handle to underlying nvidia::gxf::Allocator from std::shared_ptr - auto allocator = nvidia::gxf::Handle::Create(context.context(), - allocator_->gxf_cid()); + auto allocator = + nvidia::gxf::Handle::Create(context.context(), allocator_->gxf_cid()); out_tensor.value()->reshape( output_shape, nvidia::gxf::MemoryStorageType::kDevice, allocator.value()); if (!out_tensor.value()->pointer()) { diff --git a/src/operators/v4l2_video_capture/v4l2_video_capture.cpp b/src/operators/v4l2_video_capture/v4l2_video_capture.cpp index c818cc30..e25e09ee 100644 --- a/src/operators/v4l2_video_capture/v4l2_video_capture.cpp +++ b/src/operators/v4l2_video_capture/v4l2_video_capture.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,17 +18,20 @@ #include "holoscan/operators/v4l2_video_capture/v4l2_video_capture.hpp" #include +#include #include #include #include -#include -#include + #include +#include #include #include +#include +#include -#include "holoscan/core/resources/gxf/allocator.hpp" #include "holoscan/core/execution_context.hpp" +#include "holoscan/core/resources/gxf/allocator.hpp" #define CLEAR(x) memset(&(x), 0, sizeof(x)) @@ -76,6 +79,17 @@ void V4L2VideoCaptureOp::setup(OperatorSpec& spec) { "Pixel Format", "Pixel format of capture stream (little endian four character code (fourcc))", std::string(kDefaultPixelFormat)); + spec.param(exposure_time_, + "exposure_time", + "Exposure Time", + "Exposure time of the camera sensor in multiples of 100 μs (e.g. setting " + "exposure_time to 100 is 10 ms). See V4L2_CID_EXPOSURE_ABSOLUTE.", + ParameterFlag::kOptional); + spec.param(gain_, + "gain", + "Gain", + "Gain of the camera sensor. See V4L2_CID_GAIN.", + ParameterFlag::kOptional); } void V4L2VideoCaptureOp::initialize() { @@ -87,6 +101,7 @@ void V4L2VideoCaptureOp::start() { v4l2_set_mode(); v4l2_check_formats(); v4l2_set_formats(); + v4l2_set_camera_settings(); v4l2_requestbuffers(); v4l2_start(); } @@ -103,13 +118,9 @@ void V4L2VideoCaptureOp::compute(InputContext& op_input, OutputContext& op_outpu // Create video buffer auto out_message = nvidia::gxf::Entity::New(context.context()); - if (!out_message) { - throw std::runtime_error("Failed to allocate video output; terminating."); - } + if (!out_message) { throw std::runtime_error("Failed to allocate video output; terminating."); } auto video_buffer = out_message.value().add(); - if (!video_buffer) { - throw std::runtime_error("Failed to allocate video buffer; terminating."); - } + if (!video_buffer) { throw std::runtime_error("Failed to allocate video buffer; terminating."); } // Get Handle to underlying nvidia::gxf::Allocator from std::shared_ptr auto allocator = @@ -137,7 +148,6 @@ void V4L2VideoCaptureOp::compute(InputContext& op_input, OutputContext& op_outpu throw std::runtime_error( fmt::format("Failed to queue buffer {} on {}", buf.index, device_.get().c_str())); } - } else { // Wrap memory into output buffer video_buffer.value()->wrapMemory( @@ -173,9 +183,7 @@ void V4L2VideoCaptureOp::stop() { free(buffers_); // close FD - if (-1 == v4l2_close(fd_)) { - throw std::runtime_error("Close failed"); - } + if (-1 == v4l2_close(fd_)) { throw std::runtime_error("Close failed"); } fd_ = -1; } @@ -210,18 +218,16 @@ void V4L2VideoCaptureOp::v4l2_requestbuffers() { if (-1 == ioctl(fd_, VIDIOC_REQBUFS, &req)) { if (errno == EINVAL) throw std::runtime_error(fmt::format( - "Video capturing or DMABUF streaming is not supported type {} memory {} count {}", - req.type, - req.memory, - req.count)); + "Video capturing or DMABUF streaming is not supported type {} memory {} count {}", + req.type, + req.memory, + req.count)); else throw std::runtime_error("Request buffers Ioctl failed"); } buffers_ = (Buffer*)calloc(req.count, sizeof(*buffers_)); - if (!buffers_) { - throw std::runtime_error("Allocate buffers failed"); - } + if (!buffers_) { throw std::runtime_error("Allocate buffers failed"); } for (uint32_t i = 0; i < req.count; ++i) { struct v4l2_buffer buf; @@ -236,9 +242,7 @@ void V4L2VideoCaptureOp::v4l2_requestbuffers() { buffers_[i].length = buf.length; buffers_[i].ptr = mmap(NULL, buf.length, PROT_READ | PROT_WRITE, MAP_SHARED, fd_, buf.m.offset); - if (MAP_FAILED == buffers_[i].ptr) { - throw std::runtime_error("MMAP failed"); - } + if (MAP_FAILED == buffers_[i].ptr) { throw std::runtime_error("MMAP failed"); } } } @@ -344,6 +348,112 @@ void V4L2VideoCaptureOp::v4l2_set_formats() { } } +bool V4L2VideoCaptureOp::v4l2_camera_supports_control(int cid, const char* control_name) { + struct v4l2_queryctrl queryctrl; + + memset(&queryctrl, 0, sizeof(queryctrl)); + queryctrl.id = cid; + + if (ioctl(fd_, VIDIOC_QUERYCTRL, &queryctrl) == -1) { + // EINVAL indicates that the control is not supported + if (errno == EINVAL) { + return false; + } else { + throw std::runtime_error(fmt::format("Querying {} failed", control_name)); + } + } + + return true; +} + +// Set the device's control settings if supported +void V4L2VideoCaptureOp::v4l2_set_camera_control(v4l2_control control, const char* control_name, + bool warn) { + HOLOSCAN_LOG_DEBUG(fmt::format("Setting {} to {}", control_name, control.value)); + if (v4l2_camera_supports_control(control.id, control_name)) { + if (ioctl(fd_, VIDIOC_S_CTRL, &control) == -1) { + HOLOSCAN_LOG_DEBUG(fmt::format("Setting {} to {} failed", control_name, control.value)); + throw std::runtime_error(fmt::format("Setting {} to {} failed", control_name, control.value)); + } + } else { + auto msg = fmt::format("Device does not support {}", control_name); + if (warn) { + HOLOSCAN_LOG_WARN(msg); + } else { + HOLOSCAN_LOG_DEBUG(msg); + } + } +} + +void V4L2VideoCaptureOp::v4l2_set_camera_settings() { + struct v4l2_capability caps; + CLEAR(caps); + + // To check if the dev is v4l2loopback + if (ioctl(fd_, VIDIOC_QUERYCAP, &caps) == -1) { + throw std::runtime_error("Querying video capabilities failed"); + } + std::string busInfo = reinterpret_cast(caps.bus_info); + if (busInfo.find("v4l2loopback") != std::string::npos) { + // Return before setting the camera parameters as loopback option + // does not have camera settings to run. + HOLOSCAN_LOG_DEBUG("Found a v4l2loopback device"); + return; + } + + struct v4l2_control control; + CLEAR(control); + + // Set Exposure + if (exposure_time_.try_get().has_value()) { + // Manual exposure: try EXPOSURE_SHUTTER_PRIORITY first (manual exposure, auto iris) + control.id = V4L2_CID_EXPOSURE_AUTO; + control.value = V4L2_EXPOSURE_SHUTTER_PRIORITY; + try { + v4l2_set_camera_control(control, "V4L2_CID_EXPOSURE_AUTO", false); + } catch (std::exception& e) { + // If fails, try setting to full manual mode + control.value = V4L2_EXPOSURE_MANUAL; + v4l2_set_camera_control(control, "V4L2_CID_EXPOSURE_AUTO", true); + } + // Then set the value + CLEAR(control); + control.id = V4L2_CID_EXPOSURE_ABSOLUTE; + control.value = exposure_time_; + v4l2_set_camera_control(control, "V4L2_CID_EXPOSURE_ABSOLUTE", true); + } else { + // Auto exposure: try fully auto first (auto exposure, auto iris) + control.id = V4L2_CID_EXPOSURE_AUTO; + control.value = V4L2_EXPOSURE_AUTO; + try { + v4l2_set_camera_control(control, "V4L2_CID_EXPOSURE_AUTO", false); + } catch (std::exception& e) { + // If fails, try setting to APERTURE_PRIORITY (auto exposure, manual iris) + control.value = V4L2_EXPOSURE_APERTURE_PRIORITY; + v4l2_set_camera_control(control, "V4L2_CID_EXPOSURE_AUTO", false); + } + } + + // Set Gain + if (gain_.try_get().has_value()) { + // Manual: turn auto gain off + control.id = V4L2_CID_AUTOGAIN; + control.value = 0; + v4l2_set_camera_control(control, "V4L2_CID_AUTOGAIN", false); + + // Then set value + CLEAR(control); + control.id = V4L2_CID_GAIN; + control.value = gain_; + v4l2_set_camera_control(control, "V4L2_CID_GAIN", true); + } else { + // Auto gain + control.id = V4L2_CID_AUTOGAIN; + control.value = 1; + v4l2_set_camera_control(control, "V4L2_CID_AUTOGAIN", false); + } +} + void V4L2VideoCaptureOp::v4l2_start() { // Start streaming on V4L2 device // queue capture plane into device @@ -379,22 +489,16 @@ void V4L2VideoCaptureOp::v4l2_read_buffer(v4l2_buffer& buf) { int r; r = select(fd_ + 1, &fds, NULL, NULL, &tv); - if (-1 == r) { - throw std::runtime_error("Error in querying file descriptor"); - } - if (0 == r) { - throw std::runtime_error("Querying file descriptor timed out"); - } + if (-1 == r) { throw std::runtime_error("Error in querying file descriptor"); } + if (0 == r) { throw std::runtime_error("Querying file descriptor timed out"); } buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; buf.memory = V4L2_MEMORY_MMAP; - if (-1 == ioctl(fd_, VIDIOC_DQBUF, &buf)) { - throw std::runtime_error("Failed to deque buffer"); - } + if (-1 == ioctl(fd_, VIDIOC_DQBUF, &buf)) { throw std::runtime_error("Failed to deque buffer"); } if (buf.index >= num_buffers_.get()) { - throw std::runtime_error(fmt::format( - "Buf index is {} more than the queue size {}", buf.index, num_buffers_.get())); + throw std::runtime_error( + fmt::format("Buf index is {} more than the queue size {}", buf.index, num_buffers_.get())); } } diff --git a/src/operators/video_stream_recorder/CMakeLists.txt b/src/operators/video_stream_recorder/CMakeLists.txt index f429f40a..7f6a9b94 100644 --- a/src/operators/video_stream_recorder/CMakeLists.txt +++ b/src/operators/video_stream_recorder/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,6 +20,3 @@ target_link_libraries(op_video_stream_recorder holoscan::core GXF::serialization ) - -# Uses the VideoStreamSerializer from the GXF extension -add_dependencies(op_video_stream_recorder gxf_stream_playback) diff --git a/src/operators/video_stream_recorder/video_stream_recorder.cpp b/src/operators/video_stream_recorder/video_stream_recorder.cpp index d22b2330..3808473f 100644 --- a/src/operators/video_stream_recorder/video_stream_recorder.cpp +++ b/src/operators/video_stream_recorder/video_stream_recorder.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -31,7 +31,7 @@ #include "holoscan/core/conditions/gxf/boolean.hpp" #include "holoscan/core/conditions/gxf/message_available.hpp" -#include "holoscan/core/resources/gxf/video_stream_serializer.hpp" +#include "holoscan/core/resources/gxf/std_entity_serializer.hpp" namespace holoscan::ops { @@ -56,7 +56,12 @@ void VideoStreamRecorderOp::initialize() { // Set up prerequisite parameters before calling GXFOperator::initialize() auto frag = fragment(); auto entity_serializer = - frag->make_resource("entity_serializer"); + frag->make_resource("recorder__std_entity_serializer"); + entity_serializer->gxf_cname(entity_serializer->name().c_str()); + if (graph_entity_) { + entity_serializer->gxf_eid(graph_entity_->eid()); + entity_serializer->gxf_graph_entity(graph_entity_); + } add_arg(Arg("entity_serializer") = entity_serializer); // Operator::initialize must occur after all arguments have been added @@ -142,9 +147,9 @@ void VideoStreamRecorderOp::compute(InputContext& op_input, OutputContext& op_ou auto entity = op_input.receive("input").value(); - // dynamic cast from holoscan::Resource to holoscan::VideoStreamSerializer + // dynamic cast from holoscan::Resource to holoscan::StdEntitySerializer auto vs_serializer = - std::dynamic_pointer_cast(entity_serializer_.get()); + std::dynamic_pointer_cast(entity_serializer_.get()); // get the Handle to the underlying GXF EntitySerializer auto entity_serializer = nvidia::gxf::Handle::Create( context.context(), vs_serializer->gxf_cid()); diff --git a/src/operators/video_stream_replayer/CMakeLists.txt b/src/operators/video_stream_replayer/CMakeLists.txt index b657dccc..650defc5 100644 --- a/src/operators/video_stream_replayer/CMakeLists.txt +++ b/src/operators/video_stream_replayer/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,6 +20,3 @@ target_link_libraries(op_video_stream_replayer holoscan::core GXF::serialization ) - -# Uses the VideoStreamSerializer from the GXF extension -add_dependencies(op_video_stream_replayer gxf_stream_playback) diff --git a/src/operators/video_stream_replayer/video_stream_replayer.cpp b/src/operators/video_stream_replayer/video_stream_replayer.cpp index 51afa1b0..3cead809 100644 --- a/src/operators/video_stream_replayer/video_stream_replayer.cpp +++ b/src/operators/video_stream_replayer/video_stream_replayer.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -32,8 +32,7 @@ #include "holoscan/core/fragment.hpp" #include "holoscan/core/gxf/entity.hpp" #include "holoscan/core/operator_spec.hpp" -#include "holoscan/core/resources/gxf/video_stream_serializer.hpp" - +#include "holoscan/core/resources/gxf/std_entity_serializer.hpp" namespace holoscan::ops { @@ -93,7 +92,11 @@ void VideoStreamReplayerOp::initialize() { // Set up prerequisite parameters before calling GXFOperator::initialize() auto frag = fragment(); auto entity_serializer = - frag->make_resource("entity_serializer"); + frag->make_resource("replayer__std_entity_serializer"); + if (graph_entity_) { + entity_serializer->gxf_eid(graph_entity_->eid()); + entity_serializer->gxf_graph_entity(graph_entity_); + } add_arg(Arg("entity_serializer") = entity_serializer); // Find if there is an argument for 'boolean_scheduling_term' @@ -209,9 +212,9 @@ void VideoStreamReplayerOp::compute(InputContext& op_input, OutputContext& op_ou break; } - // dynamic cast from holoscan::Resource to holoscan::VideoStreamSerializer + // dynamic cast from holoscan::Resource to holoscan::StdEntitySerializer auto vs_serializer = - std::dynamic_pointer_cast(entity_serializer_.get()); + std::dynamic_pointer_cast(entity_serializer_.get()); // get underlying GXF EntitySerializer auto entity_serializer = nvidia::gxf::Handle::Create( context.context(), vs_serializer->gxf_cid()); @@ -259,7 +262,7 @@ void VideoStreamReplayerOp::compute(InputContext& op_input, OutputContext& op_ou } if (time_to_delay < 0 && (playback_count_ % index_frame_count_ != 0)) { HOLOSCAN_LOG_INFO( - fmt::format("Playing video stream is lagging behind (count: % {} , delay: {} ns)", + fmt::format("Playing video stream is lagging behind (count: {} , delay: {} ns)", playback_count_, time_to_delay)); } diff --git a/src/utils/cuda_stream_handler.cpp b/src/utils/cuda_stream_handler.cpp new file mode 100644 index 00000000..92070aec --- /dev/null +++ b/src/utils/cuda_stream_handler.cpp @@ -0,0 +1,262 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "holoscan/utils/cuda_stream_handler.hpp" + +#include +#include +#include + +#include "gxf/cuda/cuda_stream.hpp" +#include "gxf/cuda/cuda_stream_id.hpp" +#include "gxf/cuda/cuda_stream_pool.hpp" +#include "holoscan/core/operator_spec.hpp" +#include "holoscan/core/parameter.hpp" +#include "holoscan/core/resources/gxf/cuda_stream_pool.hpp" + +namespace holoscan { + +CudaStreamHandler::~CudaStreamHandler() { + for (auto&& event : cuda_events_) { + const cudaError_t result = cudaEventDestroy(event); + if (cudaSuccess != result) { + HOLOSCAN_LOG_ERROR("Failed to destroy CUDA event: %s", cudaGetErrorString(result)); + } + } + cuda_events_.clear(); +} + +void CudaStreamHandler::define_params(OperatorSpec& spec, bool required) { + spec.param(cuda_stream_pool_, + "cuda_stream_pool", + "CUDA Stream Pool", + "Instance of gxf::CudaStreamPool."); + cuda_stream_pool_required_ = required; +} + +void CudaStreamHandler::defineParams(OperatorSpec& spec, bool required) { + static bool warned = false; + if (!warned) { + warned = true; + HOLOSCAN_LOG_WARN( + "CudaStreamHandler's `defineParams` method has been renamed to `define_params`. " + "The old name is deprecated and may be removed in a future release."); + } + return define_params(spec, required); +} + +gxf_result_t CudaStreamHandler::from_message( + gxf_context_t context, const nvidia::gxf::Expected& message) { + // if the message contains a stream use this + const auto maybe_cuda_stream_id = message.value().get(); + if (maybe_cuda_stream_id) { + const auto maybe_cuda_stream_handle = nvidia::gxf::Handle::Create( + context, maybe_cuda_stream_id.value()->stream_cid); + if (maybe_cuda_stream_handle) { + message_cuda_stream_handle_ = maybe_cuda_stream_handle.value(); + } + } else { + // if no stream had been found, allocate a stream and use that + gxf_result_t result = allocate_internal_stream(context); + if (result != GXF_SUCCESS) { return result; } + message_cuda_stream_handle_ = cuda_stream_handle_; + } + return GXF_SUCCESS; +} + +gxf_result_t CudaStreamHandler::fromMessage( + gxf_context_t context, const nvidia::gxf::Expected& message) { + static bool warned = false; + if (!warned) { + warned = true; + HOLOSCAN_LOG_WARN( + "CudaStreamHandler's `fromMessage` method has been renamed to `from_message`. " + "The old name is deprecated and may be removed in a future release."); + } + return from_message(context, message); +} + +gxf_result_t CudaStreamHandler::from_messages(gxf_context_t context, + const std::vector& messages) { + const gxf_result_t result = allocate_internal_stream(context); + if (result != GXF_SUCCESS) { return result; } + + if (!cuda_stream_handle_) { + // if no CUDA stream can be allocated because no stream pool is set, then don't sync + // with incoming streams. CUDA operations of this operator will use the default stream + // which sync with all other streams by default. + return GXF_SUCCESS; + } + + // iterate through all messages and use events to chain incoming streams with the internal + // stream + auto event_it = cuda_events_.begin(); + for (auto& msg : messages) { + const auto maybe_cuda_stream_id = msg.get(); + if (maybe_cuda_stream_id) { + const auto maybe_cuda_stream_handle = nvidia::gxf::Handle::Create( + context, maybe_cuda_stream_id.value()->stream_cid); + if (maybe_cuda_stream_handle) { + const cudaStream_t cuda_stream = maybe_cuda_stream_handle.value()->stream().value(); + cudaError_t result; + + // allocate a new event if needed + if (event_it == cuda_events_.end()) { + cudaEvent_t cuda_event; + result = cudaEventCreateWithFlags(&cuda_event, cudaEventDisableTiming); + if (cudaSuccess != result) { + HOLOSCAN_LOG_ERROR("Failed to create input CUDA event: %s", cudaGetErrorString(result)); + return GXF_FAILURE; + } + cuda_events_.push_back(cuda_event); + event_it = cuda_events_.end(); + --event_it; + } + + result = cudaEventRecord(*event_it, cuda_stream); + if (cudaSuccess != result) { + HOLOSCAN_LOG_ERROR("Failed to record event for message stream: %s", + cudaGetErrorString(result)); + return GXF_FAILURE; + } + result = cudaStreamWaitEvent(cuda_stream_handle_->stream().value(), *event_it); + if (cudaSuccess != result) { + HOLOSCAN_LOG_ERROR("Failed to record wait on message event: %s", + cudaGetErrorString(result)); + return GXF_FAILURE; + } + ++event_it; + } + } + } + message_cuda_stream_handle_ = cuda_stream_handle_; + return GXF_SUCCESS; +} + +gxf_result_t CudaStreamHandler::fromMessages(gxf_context_t context, + const std::vector& messages) { + static bool warned = false; + if (!warned) { + warned = true; + HOLOSCAN_LOG_WARN( + "CudaStreamHandler's `fromMessages` method has been renamed to `from_messages`. " + "The old name is deprecated and may be removed in a future release."); + } + return from_messages(context, messages); +} + +gxf_result_t CudaStreamHandler::to_message(nvidia::gxf::Expected& message) { + if (message_cuda_stream_handle_) { + const auto maybe_stream_id = message.value().add("cuda_stream_id_"); + if (!maybe_stream_id) { + HOLOSCAN_LOG_ERROR("Failed to add CUDA stream id to output message."); + return nvidia::gxf::ToResultCode(maybe_stream_id); + } + maybe_stream_id.value()->stream_cid = message_cuda_stream_handle_.cid(); + } + return GXF_SUCCESS; +} + +gxf_result_t CudaStreamHandler::toMessage(nvidia::gxf::Expected& message) { + static bool warned = false; + if (!warned) { + warned = true; + HOLOSCAN_LOG_WARN( + "CudaStreamHandler's `toMessage` method has been renamed to `to_message`. " + "The old name is deprecated and may be removed in a future release."); + } + return to_message(message); +} + +nvidia::gxf::Handle CudaStreamHandler::get_stream_handle( + gxf_context_t context) { + // If there is a message stream handle, return this + if (message_cuda_stream_handle_) { return message_cuda_stream_handle_; } + + // else allocate an internal CUDA stream and return it + allocate_internal_stream(context); + return cuda_stream_handle_; +} + +nvidia::gxf::Handle CudaStreamHandler::getStreamHandle( + gxf_context_t context) { + static bool warned = false; + if (!warned) { + warned = true; + HOLOSCAN_LOG_WARN( + "CudaStreamHandler's `getStreamHandle` method has been renamed to `get_stream_handle`. " + "The old name is deprecated and may be removed in a future release."); + } + return get_stream_handle(context); +} + +cudaStream_t CudaStreamHandler::get_cuda_stream(gxf_context_t context) { + const nvidia::gxf::Handle cuda_stream_handle = + get_stream_handle(context); + if (cuda_stream_handle) { return cuda_stream_handle->stream().value(); } + if (!default_stream_warning_) { + default_stream_warning_ = true; + HOLOSCAN_LOG_WARN( + "Parameter `cuda_stream_pool` is not set, using the default CUDA stream for CUDA " + "operations."); + } + return cudaStreamDefault; +} + +cudaStream_t CudaStreamHandler::getCudaStream(gxf_context_t context) { + static bool warned = false; + if (!warned) { + warned = true; + HOLOSCAN_LOG_WARN( + "CudaStreamHandler's `getCudaStream` method has been renamed to `get_cuda_stream`. " + "The old name is deprecated and may be removed in a future release."); + } + return get_cuda_stream(context); +} + +gxf_result_t CudaStreamHandler::allocate_internal_stream(gxf_context_t context) { + // Create the CUDA stream if it does not yet exist. + if (!cuda_stream_handle_) { + // Check if a cuda stream pool is given. + const bool has_cuda_stream_pool_ = cuda_stream_pool_.has_value() && cuda_stream_pool_.get(); + if (!has_cuda_stream_pool_) { + // If the cuda stream pool is required return an error + if (cuda_stream_pool_required_) { + HOLOSCAN_LOG_ERROR("'cuda_stream_pool' is required but not set."); + return GXF_FAILURE; + } + return GXF_SUCCESS; + } + + // get Handle to underlying nvidia::gxf::CudaStreamPool from + // std::shared_ptr + const auto cuda_stream_pool = nvidia::gxf::Handle::Create( + context, cuda_stream_pool_.get()->gxf_cid()); + if (cuda_stream_pool) { + // allocate a stream + auto maybe_stream = cuda_stream_pool.value()->allocateStream(); + if (!maybe_stream) { + HOLOSCAN_LOG_ERROR("Failed to allocate CUDA stream"); + return nvidia::gxf::ToResultCode(maybe_stream); + } + cuda_stream_handle_ = std::move(maybe_stream.value()); + } + } + return GXF_SUCCESS; +} + +} // namespace holoscan diff --git a/src/utils/holoinfer_utils.cpp b/src/utils/holoinfer_utils.cpp index 1d834c1a..fffa56d4 100644 --- a/src/utils/holoinfer_utils.cpp +++ b/src/utils/holoinfer_utils.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,6 +22,7 @@ #include #include +#include "gxf/std/tensor.hpp" #include #include #include @@ -142,9 +143,9 @@ gxf_result_t get_data_per_model(InputContext& op_input, const std::vectordl_ctx()}; + nvidia::gxf::Tensor in_tensor_gxf{in_tensor->dl_ctx()}; void* in_tensor_data = in_tensor_gxf.pointer(); auto element_type = in_tensor_gxf.element_type(); diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 44553792..c771c71d 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -101,12 +101,17 @@ ConfigureTest(OPERATORS_CLASSES_TEST target_link_libraries(OPERATORS_CLASSES_TEST PRIVATE holoscan::ops::aja + holoscan::ops::async_ping_rx + holoscan::ops::async_ping_tx holoscan::ops::bayer_demosaic holoscan::ops::format_converter holoscan::ops::holoviz holoscan::ops::inference holoscan::ops::inference_processor + holoscan::ops::ping_rx + holoscan::ops::ping_tx holoscan::ops::segmentation_postprocessor + holoscan::ops::v4l2 holoscan::ops::video_stream_recorder holoscan::ops::video_stream_replayer ) @@ -169,13 +174,41 @@ target_link_libraries(SYSTEM_DISTRIBUTED_TEST set(CMAKE_SYSTEM_DISTRIBUTED_TEST_FLAGS "\ HOLOSCAN_STOP_ON_DEADLOCK_TIMEOUT=2500;\ -HOLOSCAN_MAX_DURATION_MS=2500\ +HOLOSCAN_MAX_DURATION_MS=2500;\ +HOLOSCAN_DISTRIBUTED_APP_SCHEDULER=multi_thread\ " ) set_tests_properties( SYSTEM_DISTRIBUTED_TEST PROPERTIES ENVIRONMENT "${CMAKE_SYSTEM_DISTRIBUTED_TEST_FLAGS}" ) +# Repeat distributed tests, but setting environment variables to use the event-based scheduler +# (omitting UcxMessageSerialization tests in this case to reduce overall test time) +ConfigureTest( + SYSTEM_DISTRIBUTED_EBS_TEST + system/distributed/distributed_app.cpp + system/distributed/distributed_demosaic_op_app.cpp + system/distributed/holoscan_ucx_ports_env.cpp + system/env_wrapper.cpp + system/ping_tensor_rx_op.cpp + system/ping_tensor_tx_op.cpp +) +target_link_libraries(SYSTEM_DISTRIBUTED_EBS_TEST + PRIVATE + holoscan::ops::bayer_demosaic + holoscan::ops::holoviz +) + + set(CMAKE_SYSTEM_DISTRIBUTED_EBS_TEST_FLAGS +"\ +HOLOSCAN_STOP_ON_DEADLOCK_TIMEOUT=2500;\ +HOLOSCAN_MAX_DURATION_MS=2500;\ +HOLOSCAN_DISTRIBUTED_APP_SCHEDULER=event_based\ +" + ) +set_tests_properties( + SYSTEM_DISTRIBUTED_EBS_TEST PROPERTIES ENVIRONMENT "${CMAKE_SYSTEM_DISTRIBUTED_EBS_TEST_FLAGS}") + # ################################################################################################## # * stress tests ---------------------------------------------------------------------------------- diff --git a/tests/codecs/codecs.cpp b/tests/codecs/codecs.cpp index 97437894..6f6a5271 100644 --- a/tests/codecs/codecs.cpp +++ b/tests/codecs/codecs.cpp @@ -301,10 +301,25 @@ TEST(Codecs, TestComplexDoubleShared) { } TEST(Codecs, TestVectorBool) { - std::vector value{0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0}; - bool omit_size_check = true; // due to ContiguousDataHeader, size won't match assumptions - bool omit_values_check = false; // make sure values match - codec_vector_compare>(value, 4096, omit_size_check, omit_values_check); + // choose a length here that is not a multiple of 8 + std::vector value{0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1}; + + // serialize + auto endpoint = std::make_shared( + 128, holoscan::Endpoint::MemoryStorageType::kSystem); + auto maybe_size = codec>::serialize(value, endpoint.get()); + + // Check that serialization used bit packing as expected. + // Stores a size_t for number of unit8_t elements after bit packing in addition to the packed bits + size_t expected_size = sizeof(size_t) + (value.size() + 7) / 8; + EXPECT_EQ(maybe_size.value(), expected_size); + + // deserialize and verify roundtrip result + auto maybe_deserialized = codec>::deserialize(endpoint.get()); + auto result = maybe_deserialized.value(); + EXPECT_EQ(typeid(result), typeid(value)); + EXPECT_EQ(result.size(), value.size()); + for (size_t i = 0; i < value.size(); i++) { EXPECT_EQ(result[i], value[i]); } } TEST(Codecs, TestVectorInt32) { @@ -363,6 +378,40 @@ TEST(Codecs, TestVectorVectorString) { codec_vector_vector_compare>>(value, 4096); } +TEST(Codecs, TestVectorVectorBool) { + std::vector> bvecs; + + std::vector v1{0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1}; + std::vector v2{1, 1, 0, 1, 1, 0, 1}; + std::vector v3(1001, true); + bvecs.push_back(v1); + bvecs.push_back(v2); + bvecs.push_back(v3); + + size_t expected_size = sizeof(size_t); // number of vectors + // add bit-packed serialization size of each vector + for (auto& v : bvecs) { expected_size += sizeof(size_t) + (v.size() + 7) / 8; } + + // serialize to buffer of exactly expected_size (exception thrown if expected_size is too small) + auto endpoint = std::make_shared( + expected_size, holoscan::Endpoint::MemoryStorageType::kSystem); + auto maybe_size = codec>>::serialize(bvecs, endpoint.get()); + EXPECT_EQ(maybe_size.value(), expected_size); + + // deserialize and verify roundtrip result + auto maybe_deserialized = codec>>::deserialize(endpoint.get()); + auto result = maybe_deserialized.value(); + EXPECT_EQ(typeid(result), typeid(bvecs)); + EXPECT_EQ(result.size(), bvecs.size()); + for (size_t j = 0; j < bvecs.size(); j++) { + auto vec = bvecs[j]; + auto res = result[j]; + EXPECT_EQ(typeid(vec), typeid(res)); + EXPECT_EQ(vec.size(), res.size()); + for (size_t i = 0; i < vec.size(); i++) { EXPECT_EQ(res[i], vec[i]); } + } +} + TEST(Codecs, TestCustomTrivialSerializer) { // codecs.hpp defines a custom serializer for a Coordinate type // We verify proper roundtrip serialization and deserialization of that type diff --git a/tests/codecs/codecs.hpp b/tests/codecs/codecs.hpp index ea8d9da4..565179cc 100644 --- a/tests/codecs/codecs.hpp +++ b/tests/codecs/codecs.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -31,7 +31,6 @@ struct Coordinate { // note: don't have to explicitly define codec for this POD type - // Intentionally place members of different size in non-optimal order to result in a struct // that is not tightly packed. On my system this struct occupies 24 bytes. // Automatically inserted to align to 8-byte boundaries. diff --git a/tests/codecs/memory_buffer.hpp b/tests/codecs/memory_buffer.hpp index e7cfe049..7d2ec417 100644 --- a/tests/codecs/memory_buffer.hpp +++ b/tests/codecs/memory_buffer.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,8 +21,8 @@ #include "./mock_allocator.hpp" -#include "common/logger.hpp" // GXF_LOG* -#include "gxf/core/gxf.h" // GxfResultStr +#include "common/logger.hpp" // GXF_LOG* +#include "gxf/core/gxf.h" // GxfResultStr #include "gxf/core/expected.hpp" // nvidia::gxf::Expected, nvidia::gxf::ForwardError, nvidia::gxf::Success #ifndef HOLOSCAN_TESTS_CODECS_MEMORY_BUFFER_HPP diff --git a/tests/config.hpp b/tests/config.hpp index 160d1c64..2fdfc034 100644 --- a/tests/config.hpp +++ b/tests/config.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -25,29 +25,29 @@ #define STR(x) #x struct HoloscanTestConfig { - std::string test_folder; - std::string test_file; - std::string temp_folder = "/tmp"; - std::string get_test_data_file(const std::string & default_value = "app_config.yaml") const { - // If `test_file` is absolute path - if (!test_folder.empty() && test_file.substr(0, 1) == "/") { - return test_file; + std::string test_folder; + std::string test_file; + std::string temp_folder = "/tmp"; + std::string get_test_data_file(const std::string& default_value = "app_config.yaml") const { + // If `test_file` is absolute path + if (!test_folder.empty() && test_file.substr(0, 1) == "/") { + return test_file; + } else { + std::string test_data_folder = test_folder; + if (test_data_folder.empty()) { + if (const char* env_p = std::getenv("HOLOSCAN_TESTS_DATA_PATH")) { + test_data_folder = env_p; } else { - std::string test_data_folder = test_folder; - if (test_data_folder.empty()) { - if (const char* env_p = std::getenv("HOLOSCAN_TESTS_DATA_PATH")) { - test_data_folder = env_p; - } else { - test_data_folder = "tests/data"; - } - } - if (test_file.empty()) { - return test_data_folder + "/" + default_value; - } else { - return test_data_folder + "/" + test_file; - } + test_data_folder = "tests/data"; } + } + if (test_file.empty()) { + return test_data_folder + "/" + default_value; + } else { + return test_data_folder + "/" + test_file; + } } + } }; extern HoloscanTestConfig holoscan_test_config; diff --git a/tests/core/condition_classes.cpp b/tests/core/condition_classes.cpp index 77461dae..e8420c78 100644 --- a/tests/core/condition_classes.cpp +++ b/tests/core/condition_classes.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -162,7 +162,7 @@ TEST_F(ConditionClassesWithGXFContext, TestCountConditionInitializeWithUnrecogni std::string log_output = testing::internal::GetCapturedStderr(); EXPECT_TRUE(log_output.find("warning") != std::string::npos); - EXPECT_TRUE(log_output.find("'undefined_arg' not found in spec.params") != std::string::npos); + EXPECT_TRUE(log_output.find("'undefined_arg' not found in spec_.params") != std::string::npos); } TEST(ConditionClasses, TestDownstreamMessageAffordableCondition) { @@ -382,9 +382,8 @@ TEST_F(ConditionClassesWithGXFContext, TestPeriodicConditionInitializeWithArg) { } TEST_F(ConditionClassesWithGXFContext, TestPeriodicConditionInitializeWithUnrecognizedArg) { - auto condition = - F.make_condition(Arg{"recess_period", std::string("1000000")}, - Arg("undefined_arg", 5.0)); + auto condition = F.make_condition(Arg{"recess_period", std::string("1000000")}, + Arg("undefined_arg", 5.0)); // test that an warning is logged if an unknown argument is provided testing::internal::CaptureStderr(); @@ -392,7 +391,7 @@ TEST_F(ConditionClassesWithGXFContext, TestPeriodicConditionInitializeWithUnreco std::string log_output = testing::internal::GetCapturedStderr(); EXPECT_TRUE(log_output.find("warning") != std::string::npos); - EXPECT_TRUE(log_output.find("'undefined_arg' not found in spec.params") != std::string::npos); + EXPECT_TRUE(log_output.find("'undefined_arg' not found in spec_.params") != std::string::npos); } } // namespace holoscan diff --git a/tests/core/io_spec.cpp b/tests/core/io_spec.cpp index 4f29fb11..c1f33ca2 100644 --- a/tests/core/io_spec.cpp +++ b/tests/core/io_spec.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -305,7 +305,7 @@ TEST_F(IOSpecWithGXFContext, TestIOSpecDescription) { std::string entity_typename = typeid(holoscan::gxf::Entity).name(); // Condition added in this way will not yet have had a fragment or component spec assigned - spec.condition(ConditionType::kMessageAvailable, Arg("min_size", static_cast(5))); + spec.condition(ConditionType::kMessageAvailable, Arg("min_size", static_cast(5))); auto cond = spec.conditions()[0].second; // manually set name and fragment for the condition cond->name("message_available"); diff --git a/tests/core/logger.cpp b/tests/core/logger.cpp index d6ac8155..79122b52 100644 --- a/tests/core/logger.cpp +++ b/tests/core/logger.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,6 +21,7 @@ #include #include +#include #include #include "../config.hpp" @@ -47,9 +48,6 @@ TEST(Logger, TestLoggingPattern) { testing::internal::CaptureStderr(); HOLOSCAN_LOG_INFO("my_message"); - // can explicitly flush the log (not required for this test case) - Logger::flush(); - // test that the specified pattern includes the thread, but omits the message std::string log_output = testing::internal::GetCapturedStderr(); EXPECT_TRUE(log_output.find("[thread") != std::string::npos); @@ -147,33 +145,6 @@ TEST(Logger, TestDefaultLogPattern) { } } -TEST(Logger, TestLoggingFlushLevel) { - auto default_level = Logger::flush_level(); - - Logger::flush_on(LogLevel::WARN); - EXPECT_EQ(Logger::flush_level(), LogLevel::WARN); - - Logger::flush_on(default_level); -} - -TEST(Logger, TestLoggingBacktrace) { - bool default_backtrace = Logger::should_backtrace(); - - Logger::enable_backtrace(32); - EXPECT_TRUE(Logger::should_backtrace()); - - Logger::disable_backtrace(); - EXPECT_FALSE(Logger::should_backtrace()); - - Logger::dump_backtrace(); - - if (default_backtrace) { - Logger::enable_backtrace(32); - } else { - Logger::disable_backtrace(); - } -} - TEST_P(LevelParameterizedTestFixture, TestLoggingGetSet) { const char* env_orig = std::getenv("HOLOSCAN_LOG_LEVEL"); @@ -420,4 +391,133 @@ TEST(Logger, TestDefaultLogLevel) { } } +//////////////////////////////////////////////////////////////////////////////// +// Test cases for SpdlogLogger +//////////////////////////////////////////////////////////////////////////////// + +namespace { +constexpr const char* kLogTestString = "Test log message"; +constexpr const char* kLogFilePath = "/tmp/test_log_file"; +} // namespace + +class MockSpdlogLogger : public nvidia::logger::SpdlogLogger { + public: + explicit MockSpdlogLogger(const char* name) : SpdlogLogger(name) {} + + static std::shared_ptr create() { + return std::make_shared("mock_logger"); + } +}; + +class RedirectLogTest : public ::testing::Test { + protected: + void SetUp() override { file_ = fopen(kLogFilePath, "wr"); } + + void TearDown() override { + // Close the file + fclose(file_); + } + + std::string get_file_content() { + // Read the file content from kLogFilePath + std::string file_content; + std::ifstream file; + file.open(kLogFilePath); + getline(file, file_content); + file.close(); + return file_content; + } + + std::FILE* file_ = nullptr; +}; + +TEST_F(RedirectLogTest, Trace) { + auto logger = MockSpdlogLogger::create(); + + logger->redirect(HOLOSCAN_LOG_LEVEL_TRACE, file_); + logger->level(HOLOSCAN_LOG_LEVEL_TRACE); + logger->log(__FILE__, __LINE__, "test", HOLOSCAN_LOG_LEVEL_TRACE, kLogTestString); + std::string file_content = get_file_content(); + + EXPECT_NE(file_content.find(std::string(kLogTestString)), std::string::npos) << file_content; + EXPECT_NE(file_content.find(std::string("trace")), std::string::npos) << file_content; +} + +TEST_F(RedirectLogTest, Debug) { + auto logger = MockSpdlogLogger::create(); + + logger->redirect(HOLOSCAN_LOG_LEVEL_DEBUG, file_); + logger->level(HOLOSCAN_LOG_LEVEL_DEBUG); + logger->log(__FILE__, __LINE__, "test", HOLOSCAN_LOG_LEVEL_DEBUG, kLogTestString); + // Log info message + logger->log(__FILE__, __LINE__, "test", HOLOSCAN_LOG_LEVEL_INFO, kLogTestString); + std::string file_content = get_file_content(); + + EXPECT_NE(file_content.find(std::string(kLogTestString)), std::string::npos) << file_content; + EXPECT_NE(file_content.find(std::string("debug")), std::string::npos) << file_content; + // Info log message should not be written to the file + EXPECT_EQ(file_content.find(std::string("info")), std::string::npos) << file_content; +} + +TEST_F(RedirectLogTest, Info) { + auto logger = MockSpdlogLogger::create(); + + logger->redirect(HOLOSCAN_LOG_LEVEL_INFO, file_); + logger->level(HOLOSCAN_LOG_LEVEL_INFO); + logger->log(__FILE__, __LINE__, "test", HOLOSCAN_LOG_LEVEL_INFO, kLogTestString); + std::string file_content = get_file_content(); + + EXPECT_NE(file_content.find(std::string(kLogTestString)), std::string::npos) << file_content; + EXPECT_NE(file_content.find(std::string("info")), std::string::npos) << file_content; +} + +TEST_F(RedirectLogTest, Warn) { + auto logger = MockSpdlogLogger::create(); + + logger->redirect(HOLOSCAN_LOG_LEVEL_WARN, file_); + logger->level(HOLOSCAN_LOG_LEVEL_WARN); + logger->log(__FILE__, __LINE__, "test", HOLOSCAN_LOG_LEVEL_WARN, kLogTestString); + std::string file_content = get_file_content(); + + EXPECT_NE(file_content.find(std::string(kLogTestString)), std::string::npos) << file_content; + EXPECT_NE(file_content.find(std::string("warn")), std::string::npos) << file_content; +} + +TEST_F(RedirectLogTest, Error) { + auto logger = MockSpdlogLogger::create(); + + logger->redirect(HOLOSCAN_LOG_LEVEL_ERROR, file_); + logger->level(HOLOSCAN_LOG_LEVEL_ERROR); + logger->log(__FILE__, __LINE__, "test", HOLOSCAN_LOG_LEVEL_ERROR, kLogTestString); + std::string file_content = get_file_content(); + + EXPECT_NE(file_content.find(std::string(kLogTestString)), std::string::npos) << file_content; + EXPECT_NE(file_content.find(std::string("error")), std::string::npos) << file_content; +} + +TEST_F(RedirectLogTest, Critical) { + auto logger = MockSpdlogLogger::create(); + + logger->redirect(HOLOSCAN_LOG_LEVEL_CRITICAL, file_); + logger->level(HOLOSCAN_LOG_LEVEL_CRITICAL); + logger->log(__FILE__, __LINE__, "test", HOLOSCAN_LOG_LEVEL_CRITICAL, kLogTestString); + std::string file_content = get_file_content(); + + EXPECT_NE(file_content.find(std::string(kLogTestString)), std::string::npos) << file_content; + EXPECT_NE(file_content.find(std::string("critical")), std::string::npos) << file_content; +} + +TEST_F(RedirectLogTest, NullPointer) { + auto logger = MockSpdlogLogger::create(); + + logger->redirect(HOLOSCAN_LOG_LEVEL_CRITICAL, nullptr); + logger->level(HOLOSCAN_LOG_LEVEL_CRITICAL); + logger->log(__FILE__, __LINE__, "test", HOLOSCAN_LOG_LEVEL_CRITICAL, kLogTestString); + std::string file_content = get_file_content(); + + // No log message should be written to the file + EXPECT_EQ(file_content.find(std::string(kLogTestString)), std::string::npos) << file_content; + EXPECT_EQ(file_content.find(std::string("critical")), std::string::npos) << file_content; +} + } // namespace holoscan diff --git a/tests/core/native_operator.cpp b/tests/core/native_operator.cpp index 35b562c6..5b1cc474 100644 --- a/tests/core/native_operator.cpp +++ b/tests/core/native_operator.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -79,7 +79,6 @@ class PingRxOp : public Operator { }; } // namespace ops - class NativeOpApp : public holoscan::Application { public: void compose() override { diff --git a/tests/core/operator_spec.cpp b/tests/core/operator_spec.cpp index 70665c90..d221a987 100644 --- a/tests/core/operator_spec.cpp +++ b/tests/core/operator_spec.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -28,6 +28,7 @@ #include #include "holoscan/core/arg.hpp" +#include "holoscan/core/domain/tensor.hpp" #include "holoscan/core/parameter.hpp" #include "holoscan/core/gxf/entity.hpp" #include "holoscan/core/operator_spec.hpp" // must be before argument_setter import diff --git a/tests/core/resource_classes.cpp b/tests/core/resource_classes.cpp index 6a498246..d5c0ab30 100644 --- a/tests/core/resource_classes.cpp +++ b/tests/core/resource_classes.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -39,6 +39,7 @@ #include "holoscan/core/resources/gxf/realtime_clock.hpp" #include "holoscan/core/resources/gxf/serialization_buffer.hpp" #include "holoscan/core/resources/gxf/std_component_serializer.hpp" +#include "holoscan/core/resources/gxf/std_entity_serializer.hpp" #include "holoscan/core/resources/gxf/ucx_component_serializer.hpp" #include "holoscan/core/resources/gxf/ucx_entity_serializer.hpp" #include "holoscan/core/resources/gxf/ucx_holoscan_component_serializer.hpp" @@ -46,7 +47,6 @@ #include "holoscan/core/resources/gxf/ucx_serialization_buffer.hpp" #include "holoscan/core/resources/gxf/ucx_transmitter.hpp" #include "holoscan/core/resources/gxf/unbounded_allocator.hpp" -#include "holoscan/core/resources/gxf/video_stream_serializer.hpp" #include "common/assert.hpp" using namespace std::string_literals; @@ -179,18 +179,17 @@ TEST_F(ResourceClassesWithGXFContext, TestUnboundedAllocatorDefaultConstructor) auto resource = F.make_resource(); } -TEST_F(ResourceClassesWithGXFContext, TestVideoStreamSerializer) { +TEST_F(ResourceClassesWithGXFContext, TestStdEntitySerializer) { const std::string name{"video-stream-serializer"}; - auto resource = F.make_resource(name); + auto resource = F.make_resource(name); EXPECT_EQ(resource->name(), name); - EXPECT_EQ(typeid(resource), typeid(std::make_shared())); - EXPECT_EQ(std::string(resource->gxf_typename()), - "nvidia::holoscan::stream_playback::VideoStreamSerializer"s); + EXPECT_EQ(typeid(resource), typeid(std::make_shared())); + EXPECT_EQ(std::string(resource->gxf_typename()), "nvidia::gxf::StdEntitySerializer"s); EXPECT_TRUE(resource->description().find("name: " + name) != std::string::npos); } -TEST_F(ResourceClassesWithGXFContext, TestVideoStreamSerializerDefaultConstructor) { - auto resource = F.make_resource(); +TEST_F(ResourceClassesWithGXFContext, TestStdEntitySerializerDefaultConstructor) { + auto resource = F.make_resource(); } TEST_F(ResourceClassesWithGXFContext, TestReceiver) { diff --git a/tests/core/scheduler_classes.cpp b/tests/core/scheduler_classes.cpp index 41afc82a..ddb1a8d6 100644 --- a/tests/core/scheduler_classes.cpp +++ b/tests/core/scheduler_classes.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -31,6 +31,7 @@ #include "holoscan/core/gxf/gxf_scheduler.hpp" #include "holoscan/core/resources/gxf/manual_clock.hpp" #include "holoscan/core/resources/gxf/realtime_clock.hpp" +#include "holoscan/core/schedulers/gxf/event_based_scheduler.hpp" #include "holoscan/core/schedulers/gxf/greedy_scheduler.hpp" #include "holoscan/core/schedulers/gxf/multithread_scheduler.hpp" #include "../utils.hpp" @@ -110,4 +111,38 @@ TEST_F(SchedulerClassesWithGXFContext, TestMultiThreadSchedulerWithRealtimeClock auto scheduler = F.make_scheduler(name, arglist); } +TEST(SchedulerClasses, TestEventBasedScheduler) { + Fragment F; + const std::string name{"event-based-scheduler"}; + auto scheduler = F.make_scheduler(name); + EXPECT_EQ(scheduler->name(), name); + EXPECT_EQ(typeid(scheduler), typeid(std::make_shared())); + EXPECT_EQ(std::string(scheduler->gxf_typename()), "nvidia::gxf::EventBasedScheduler"s); +} + +TEST_F(SchedulerClassesWithGXFContext, TestEventBasedSchedulerWithArgs) { + const std::string name{"event-based-scheduler"}; + ArgList arglist{ + Arg{"name", name}, + Arg{"worker_thread_number", 4L}, + Arg{"stop_on_deadlock", false}, + Arg{"max_duration_ms", 10000L}, + Arg{"stop_on_deadlock_timeout", 100LL}, + }; + auto scheduler = F.make_scheduler(name, arglist); + EXPECT_TRUE(scheduler->description().find("name: " + name) != std::string::npos); +} + +TEST_F(SchedulerClassesWithGXFContext, TestEventBasedSchedulerWithManualClock) { + const std::string name{"event-based-scheduler"}; + ArgList arglist{Arg{"clock", F.make_resource()}}; + auto scheduler = F.make_scheduler(name, arglist); +} + +TEST_F(SchedulerClassesWithGXFContext, TestEventBasedSchedulerWithRealtimeClock) { + const std::string name{"event-based-scheduler"}; + ArgList arglist{Arg{"clock", F.make_resource()}}; + auto scheduler = F.make_scheduler(name, arglist); +} + } // namespace holoscan diff --git a/tests/data/app_config.yaml b/tests/data/app_config.yaml index 61734d7d..95ac7f7c 100644 --- a/tests/data/app_config.yaml +++ b/tests/data/app_config.yaml @@ -1,5 +1,5 @@ %YAML 1.2 -# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,7 +23,6 @@ # - libgxf_cuda.so # - libgxf_multimedia.so # - libgxf_serialization.so -# - libgxf_stream_playback.so source: "replayer" # or "aja" do_record: false # or 'true' if you want to record input video stream. @@ -187,3 +186,11 @@ demosaic: interpolation_mode: 0 # this is the only interpolation mode supported by NPP currently video_composer: + +v4l2_video_capture: + width: 320 + height: 240 + pixel_format: "auto" + device: "/dev/video0" + exposure_time: 500 + gain: 100 diff --git a/tests/data/validation_frames/holoviz_geometry/cpp_holoviz_geometry.patch b/tests/data/validation_frames/holoviz_geometry/cpp_holoviz_geometry.patch index af765103..0f0ee81a 100644 --- a/tests/data/validation_frames/holoviz_geometry/cpp_holoviz_geometry.patch +++ b/tests/data/validation_frames/holoviz_geometry/cpp_holoviz_geometry.patch @@ -9,7 +9,7 @@ index 2ac25f3b8..449ae0b68 100644 +#ifdef RECORD_OUTPUT + #include + #include -+ #include <../tests/recorder.hpp> ++ #include +#endif + namespace holoscan::ops { diff --git a/tests/data/validation_frames/video_replayer/cpp_video_replayer.patch b/tests/data/validation_frames/video_replayer/cpp_video_replayer.patch index 9f7eb816..2285eef6 100644 --- a/tests/data/validation_frames/video_replayer/cpp_video_replayer.patch +++ b/tests/data/validation_frames/video_replayer/cpp_video_replayer.patch @@ -7,7 +7,7 @@ +#ifdef RECORD_OUTPUT + #include + #include -+ #include <../tests/recorder.hpp> ++ #include +#endif + class VideoReplayerApp : public holoscan::Application { diff --git a/tests/data/validation_frames/video_replayer/cpp_video_replayer_distributed.patch b/tests/data/validation_frames/video_replayer/cpp_video_replayer_distributed.patch index 4a417ca1..3997df29 100644 --- a/tests/data/validation_frames/video_replayer/cpp_video_replayer_distributed.patch +++ b/tests/data/validation_frames/video_replayer/cpp_video_replayer_distributed.patch @@ -5,7 +5,7 @@ #include +#ifdef RECORD_OUTPUT -+ #include <../tests/recorder.hpp> ++ #include + #include + #include +#endif diff --git a/tests/holoinfer/inference/test_core.cpp b/tests/holoinfer/inference/test_core.cpp index 87eb56d3..93a0899e 100644 --- a/tests/holoinfer/inference/test_core.cpp +++ b/tests/holoinfer/inference/test_core.cpp @@ -161,6 +161,7 @@ HoloInfer::InferStatus HoloInferTests::do_inference() { HoloInfer::InferStatus status = HoloInfer::InferStatus(HoloInfer::holoinfer_code::H_ERROR); try { + if (!holoscan_infer_context_) { return status; } return holoscan_infer_context_->execute_inference(inference_specs_->data_per_tensor_, inference_specs_->output_per_model_); } catch (...) { diff --git a/tests/operators/operator_classes.cpp b/tests/operators/operator_classes.cpp index 8693a64d..b8f78621 100644 --- a/tests/operators/operator_classes.cpp +++ b/tests/operators/operator_classes.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -42,6 +42,8 @@ #include "common/assert.hpp" #include "holoscan/operators/aja_source/aja_source.hpp" +#include "holoscan/operators/async_ping_rx/async_ping_rx.hpp" +#include "holoscan/operators/async_ping_tx/async_ping_tx.hpp" #include "holoscan/operators/bayer_demosaic/bayer_demosaic.hpp" #include "holoscan/operators/format_converter/format_converter.hpp" #include "holoscan/operators/holoviz/holoviz.hpp" @@ -50,6 +52,7 @@ #include "holoscan/operators/ping_rx/ping_rx.hpp" #include "holoscan/operators/ping_tx/ping_tx.hpp" #include "holoscan/operators/segmentation_postprocessor/segmentation_postprocessor.hpp" +#include "holoscan/operators/v4l2_video_capture/v4l2_video_capture.hpp" #include "holoscan/operators/video_stream_recorder/video_stream_recorder.hpp" #include "holoscan/operators/video_stream_replayer/video_stream_replayer.hpp" @@ -78,17 +81,7 @@ TEST_F(OperatorClassesWithGXFContext, TestAJASourceOpChannelFromYAML) { EXPECT_TRUE(op->description().find("name: " + name) != std::string::npos); std::string log_output = testing::internal::GetCapturedStderr(); - auto error_pos = log_output.find("error"); - if (error_pos != std::string::npos) { - // Initializing a native operator outside the context of app.run() will result in the - // following error being logged because the GXFWrapper will not yet have been created for - // the operator: - // [error] [gxf_executor.cpp:452] Unable to get GXFWrapper for Operator 'aja-source' - - // GXFWrapper was mentioned and no additional error was logged - EXPECT_TRUE(log_output.find("GXFWrapper", error_pos + 1) != std::string::npos); - EXPECT_TRUE(log_output.find("error", error_pos + 1) == std::string::npos); - } + EXPECT_TRUE(log_output.find("error") == std::string::npos); } TEST_F(TestWithGXFContext, TestAJASourceOpChannelFromEnum) { @@ -110,17 +103,8 @@ TEST_F(TestWithGXFContext, TestAJASourceOpChannelFromEnum) { EXPECT_TRUE(op->description().find("name: " + name) != std::string::npos); std::string log_output = testing::internal::GetCapturedStderr(); - auto error_pos = log_output.find("error"); - if (error_pos != std::string::npos) { - // Initializing a native operator outside the context of app.run() will result in the - // following error being logged because the GXFWrapper will not yet have been created for - // the operator: - // [error] [gxf_executor.cpp:452] Unable to get GXFWrapper for Operator 'aja-source' - - // GXFWrapper was mentioned and no additional error was logged - EXPECT_TRUE(log_output.find("GXFWrapper", error_pos + 1) != std::string::npos); - EXPECT_TRUE(log_output.find("error", error_pos + 1) == std::string::npos); - } + + EXPECT_TRUE(log_output.find("error") == std::string::npos); } TEST_F(OperatorClassesWithGXFContext, TestFormatConverterOp) { @@ -156,17 +140,7 @@ TEST_F(OperatorClassesWithGXFContext, TestFormatConverterOp) { EXPECT_TRUE(op->description().find("name: " + name) != std::string::npos); std::string log_output = testing::internal::GetCapturedStderr(); - auto error_pos = log_output.find("error"); - if (error_pos != std::string::npos) { - // Initializing a native operator outside the context of app.run() will result in the - // following error being logged because the GXFWrapper will not yet have been created for - // the operator: - // [error] [gxf_executor.cpp:452] Unable to get GXFWrapper for Operator 'format_converter' - - // GXFWrapper was mentioned and no additional error was logged - EXPECT_TRUE(log_output.find("GXFWrapper", error_pos + 1) != std::string::npos); - EXPECT_TRUE(log_output.find("error", error_pos + 1) == std::string::npos); - } + EXPECT_TRUE(log_output.find("error") == std::string::npos); } TEST_F(OperatorClassesWithGXFContext, TestVideoStreamRecorderOp) { @@ -183,17 +157,7 @@ TEST_F(OperatorClassesWithGXFContext, TestVideoStreamRecorderOp) { EXPECT_TRUE(op->description().find("name: " + name) != std::string::npos); std::string log_output = testing::internal::GetCapturedStderr(); - auto error_pos = log_output.find("error"); - if (error_pos != std::string::npos) { - // Initializing a native operator outside the context of app.run() will result in the - // following error being logged because the GXFWrapper will not yet have been created for - // the operator: - // [error] [gxf_executor.cpp:452] Unable to get GXFWrapper for Operator 'recorder' - - // GXFWrapper was mentioned and no additional error was logged - EXPECT_TRUE(log_output.find("GXFWrapper", error_pos + 1) != std::string::npos); - EXPECT_TRUE(log_output.find("error", error_pos + 1) == std::string::npos); - } + EXPECT_TRUE(log_output.find("error") == std::string::npos); } TEST_F(OperatorClassesWithGXFContext, TestVideoStreamReplayerOp) { @@ -216,17 +180,7 @@ TEST_F(OperatorClassesWithGXFContext, TestVideoStreamReplayerOp) { EXPECT_TRUE(op->description().find("name: " + name) != std::string::npos); std::string log_output = testing::internal::GetCapturedStderr(); - auto error_pos = log_output.find("error"); - if (error_pos != std::string::npos) { - // Initializing a native operator outside the context of app.run() will result in the - // following error being logged because the GXFWrapper will not yet have been created for - // the operator: - // [error] [gxf_executor.cpp:452] Unable to get GXFWrapper for Operator 'replayer' - - // GXFWrapper was mentioned and no additional error was logged - EXPECT_TRUE(log_output.find("GXFWrapper", error_pos + 1) != std::string::npos); - EXPECT_TRUE(log_output.find("error", error_pos + 1) == std::string::npos); - } + EXPECT_TRUE(log_output.find("error") == std::string::npos); } TEST_F(OperatorClassesWithGXFContext, TestSegmentationPostprocessorOp) { @@ -246,18 +200,7 @@ TEST_F(OperatorClassesWithGXFContext, TestSegmentationPostprocessorOp) { EXPECT_TRUE(op->description().find("name: " + name) != std::string::npos); std::string log_output = testing::internal::GetCapturedStderr(); - auto error_pos = log_output.find("error"); - if (error_pos != std::string::npos) { - // Initializing a native operator outside the context of app.run() will result in the - // following error being logged because the GXFWrapper will not yet have been created for - // the operator: - // [error] [gxf_executor.cpp:452] Unable to get GXFWrapper for Operator - // 'segmentation_postprocessor' - - // GXFWrapper was mentioned and no additional error was logged - EXPECT_TRUE(log_output.find("GXFWrapper", error_pos + 1) != std::string::npos); - EXPECT_TRUE(log_output.find("error", error_pos + 1) == std::string::npos); - } + EXPECT_TRUE(log_output.find("error") == std::string::npos); } TEST_F(OperatorClassesWithGXFContext, TestHolovizOp) { @@ -284,17 +227,7 @@ TEST_F(OperatorClassesWithGXFContext, TestHolovizOp) { EXPECT_TRUE(op->description().find("name: " + name) != std::string::npos); std::string log_output = testing::internal::GetCapturedStderr(); - auto error_pos = log_output.find("error"); - if (error_pos != std::string::npos) { - // Initializing a native operator outside the context of app.run() will result in the - // following error being logged because the GXFWrapper will not yet have been created for - // the operator: - // [error] [gxf_executor.cpp:452] Unable to get GXFWrapper for Operator 'holoviz' - - // GXFWrapper was mentioned and no additional error was logged - EXPECT_TRUE(log_output.find("GXFWrapper", error_pos + 1) != std::string::npos); - EXPECT_TRUE(log_output.find("error", error_pos + 1) == std::string::npos); - } + EXPECT_TRUE(log_output.find("error") == std::string::npos); } TEST_F(OperatorClassesWithGXFContext, TestHolovizOpInputSpec) { @@ -325,17 +258,7 @@ TEST_F(OperatorClassesWithGXFContext, TestInferenceOp) { EXPECT_TRUE(op->description().find("name: " + name) != std::string::npos); std::string log_output = testing::internal::GetCapturedStderr(); - auto error_pos = log_output.find("error"); - if (error_pos != std::string::npos) { - // Initializing a native operator outside the context of app.run() will result in the - // following error being logged because the GXFWrapper will not yet have been created for - // the operator: - // [error] [gxf_executor.cpp:452] Unable to get GXFWrapper for Operator 'inference' - - // GXFWrapper was mentioned and no additional error was logged - EXPECT_TRUE(log_output.find("GXFWrapper", error_pos + 1) != std::string::npos); - EXPECT_TRUE(log_output.find("error", error_pos + 1) == std::string::npos); - } + EXPECT_TRUE(log_output.find("error") == std::string::npos); } TEST_F(OperatorClassesWithGXFContext, TestInferenceProcessorOp) { @@ -353,18 +276,7 @@ TEST_F(OperatorClassesWithGXFContext, TestInferenceProcessorOp) { EXPECT_TRUE(op->description().find("name: " + name) != std::string::npos); std::string log_output = testing::internal::GetCapturedStderr(); - auto error_pos = log_output.find("error"); - if (error_pos != std::string::npos) { - // Initializing a native operator outside the context of app.run() will result in the - // following error being logged because the GXFWrapper will not yet have been created for - // the operator: - // [error] [gxf_executor.cpp:452] Unable to get GXFWrapper for Operator - // 'processor' - - // GXFWrapper was mentioned and no additional error was logged - EXPECT_TRUE(log_output.find("GXFWrapper", error_pos + 1) != std::string::npos); - EXPECT_TRUE(log_output.find("error", error_pos + 1) == std::string::npos); - } + EXPECT_TRUE(log_output.find("error") == std::string::npos); } TEST_F(OperatorClassesWithGXFContext, TestBayerDemosaicOp) { @@ -384,17 +296,7 @@ TEST_F(OperatorClassesWithGXFContext, TestBayerDemosaicOp) { EXPECT_TRUE(op->description().find("name: " + name) != std::string::npos); std::string log_output = testing::internal::GetCapturedStderr(); - auto error_pos = log_output.find("error"); - if (error_pos != std::string::npos) { - // Initializing a native operator outside the context of app.run() will result in the - // following error being logged because the GXFWrapper will not yet have been created for - // the operator: - // [error] [gxf_executor.cpp:452] Unable to get GXFWrapper for Operator 'bayer_demosaic' - - // GXFWrapper was mentioned and no additional error was logged - EXPECT_TRUE(log_output.find("GXFWrapper", error_pos + 1) != std::string::npos); - EXPECT_TRUE(log_output.find("error", error_pos + 1) == std::string::npos); - } + EXPECT_TRUE(log_output.find("error") == std::string::npos); } TEST_F(OperatorClassesWithGXFContext, TestBayerDemosaicOpDefaultConstructor) { @@ -403,17 +305,7 @@ TEST_F(OperatorClassesWithGXFContext, TestBayerDemosaicOpDefaultConstructor) { auto op = F.make_operator(); std::string log_output = testing::internal::GetCapturedStderr(); - auto error_pos = log_output.find("error"); - if (error_pos != std::string::npos) { - // Initializing a native operator outside the context of app.run() will result in the - // following error being logged because the GXFWrapper will not yet have been created for - // the operator: - // [error] [gxf_executor.cpp:452] Unable to get GXFWrapper for Operator 'bayer_demosaic' - - // GXFWrapper was mentioned and no additional error was logged - EXPECT_TRUE(log_output.find("GXFWrapper", error_pos + 1) != std::string::npos); - EXPECT_TRUE(log_output.find("error", error_pos + 1) == std::string::npos); - } + EXPECT_TRUE(log_output.find("error") == std::string::npos); } TEST(Operator, TestNativeOperatorWithoutFragment) { @@ -454,6 +346,102 @@ TEST_F(OperatorClassesWithGXFContext, TestPingTxOpOp) { EXPECT_TRUE(log_output.find("error") == std::string::npos); } +TEST_F(OperatorClassesWithGXFContext, TestPingTxWithStringName) { + std::string name{"tx"}; // string is not a const + + testing::internal::CaptureStderr(); + + auto op = F.make_operator(name); + EXPECT_EQ(op->name(), name); + + std::string log_output = testing::internal::GetCapturedStderr(); + EXPECT_TRUE(log_output.find("error") == std::string::npos); +} + +TEST_F(OperatorClassesWithGXFContext, TestAsyncPingRxOpOp) { + const std::string name{"async_rx"}; + + testing::internal::CaptureStderr(); + + auto op = F.make_operator(name, Arg("delay", 10L), Arg("count", 10UL)); + EXPECT_EQ(op->name(), name); + + std::string log_output = testing::internal::GetCapturedStderr(); + EXPECT_TRUE(log_output.find("error") == std::string::npos); +} + +TEST_F(OperatorClassesWithGXFContext, TestAsyncPingTxOpOp) { + const std::string name{"async_tx"}; + + testing::internal::CaptureStderr(); + + auto op = F.make_operator(name, Arg("delay", 10L), Arg("count", 10UL)); + EXPECT_EQ(op->name(), name); + + std::string log_output = testing::internal::GetCapturedStderr(); + EXPECT_TRUE(log_output.find("error") == std::string::npos); +} + +TEST_F(OperatorClassesWithGXFContext, TestV4L2VideoCaptureOp) { + const std::string name{"video_capture"}; + uint32_t width = 1024; + uint32_t height = 768; + uint32_t exposure_time = 500; + uint32_t gain = 100; + + ArgList kwargs{Arg{"device", std::string("/dev/video0")}, + Arg{"pixel_format", std::string("auto")}, + Arg{"width", width}, + Arg{"height", height}, + Arg{"allocator", F.make_resource("pool")}, + Arg{"exposure_time", exposure_time}, + Arg{"gain", gain}}; + + testing::internal::CaptureStderr(); + + auto op = F.make_operator(name, kwargs); + EXPECT_EQ(op->name(), name); + EXPECT_EQ(typeid(op), typeid(std::make_shared(kwargs))); + EXPECT_TRUE(op->description().find("name: " + name) != std::string::npos); + + std::string log_output = testing::internal::GetCapturedStderr(); + EXPECT_TRUE(log_output.find("error") == std::string::npos); +} + +TEST_F(OperatorClassesWithGXFContext, TestV4L2VideoCaptureOpYAMLConfig) { + const std::string name{"video_capture"}; + + ArgList kwargs = F.from_config("v4l2_video_capture"); + kwargs.add(Arg{"allocator", F.make_resource("pool")}); + testing::internal::CaptureStderr(); + + auto op = F.make_operator(name, kwargs); + EXPECT_EQ(op->name(), name); + EXPECT_EQ(typeid(op), typeid(std::make_shared(kwargs))); + EXPECT_TRUE(op->description().find("name: " + name) != std::string::npos); + + std::string log_output = testing::internal::GetCapturedStderr(); + EXPECT_TRUE(log_output.find("error") == std::string::npos); +} + +TEST_F(OperatorClassesWithGXFContext, TestV4L2VideoCaptureOpDefaults) { + const std::string name{"video_capture"}; + + // load most arguments from the YAML file + ArgList kwargs = F.from_config("demosaic"); + kwargs.add(Arg{"allocator", F.make_resource("pool")}); + + testing::internal::CaptureStderr(); + + auto op = F.make_operator(name, kwargs); + EXPECT_EQ(op->name(), name); + EXPECT_EQ(typeid(op), typeid(std::make_shared(kwargs))); + EXPECT_TRUE(op->description().find("name: " + name) != std::string::npos); + + std::string log_output = testing::internal::GetCapturedStderr(); + EXPECT_TRUE(log_output.find("error") == std::string::npos); +} + TEST_F(OperatorClassesWithGXFContext, TestInvalidOperatorName) { EXPECT_THROW( { diff --git a/tests/system/demosaic_op_app.cpp b/tests/system/demosaic_op_app.cpp index 538c0477..9a0c659a 100644 --- a/tests/system/demosaic_op_app.cpp +++ b/tests/system/demosaic_op_app.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -83,9 +83,15 @@ TEST(DemosaicOpApp, TestDummyDemosaicApp) { std::string log_output = testing::internal::GetCapturedStderr(); EXPECT_TRUE(log_output.find("Graph activation failed") == std::string::npos); + // verify that there are now no warnings about GPUDevice not being found std::string resource_warning = "cannot find Resource of type: nvidia::gxf::GPUDevice"; EXPECT_TRUE(log_output.find(resource_warning) == std::string::npos); + + // Verify that BlockMemoryPool and CudaStreamPool did not get initialized on a separate entity + // from DummyDemosaicApp. (check for absence of warning from GXFResource::initialize). + std::string graph_entity_warning = "initialized independent of a parent entity"; + EXPECT_TRUE(log_output.find(graph_entity_warning) == std::string::npos); } TEST(DemosaicOpApp, TestDummyDemosaicAppWithExplicitInit) { @@ -102,7 +108,13 @@ TEST(DemosaicOpApp, TestDummyDemosaicAppWithExplicitInit) { std::string log_output = testing::internal::GetCapturedStderr(); EXPECT_TRUE(log_output.find("Graph activation failed") == std::string::npos); + // verify that there are now no warnings about GPUDevice not being found std::string resource_warning = "cannot find Resource of type: nvidia::gxf::GPUDevice"; EXPECT_TRUE(log_output.find(resource_warning) == std::string::npos); + + // Due to `set_explicit_stream_pool_init = true` we expect to see a warning from + // GXFResource::initialize due to explicit initialization of a resource to its own entity. + std::string graph_entity_warning = "initialized independent of a parent entity"; + EXPECT_TRUE(log_output.find(graph_entity_warning) != std::string::npos); } diff --git a/tests/system/distributed/distributed_app.cpp b/tests/system/distributed/distributed_app.cpp index 68be77be..f68a6775 100644 --- a/tests/system/distributed/distributed_app.cpp +++ b/tests/system/distributed/distributed_app.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,10 +19,12 @@ #include #include +#include #include #include +#include "../env_wrapper.hpp" #include "utility_apps.hpp" namespace holoscan { @@ -130,13 +132,12 @@ TEST(DistributedApp, TestUCXConnectionApp2) { TEST(DistributedApp, TestUCXLinearPipelineApp) { auto app = make_application(); - // capture output so that we can check that the expected value is present testing::internal::CaptureStderr(); app->run(); std::string log_output = testing::internal::GetCapturedStderr(); - EXPECT_TRUE(log_output.find("received count: 10") != std::string::npos); + EXPECT_TRUE(log_output.find("received count: 20") != std::string::npos); } TEST(DistributedApp, TestUCXBroadcastApp) { diff --git a/tests/system/distributed/distributed_demosaic_op_app.cpp b/tests/system/distributed/distributed_demosaic_op_app.cpp index 33d1ce6d..567f0a21 100644 --- a/tests/system/distributed/distributed_demosaic_op_app.cpp +++ b/tests/system/distributed/distributed_demosaic_op_app.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -27,7 +27,6 @@ #include "holoscan/holoscan.hpp" #include "holoscan/operators/bayer_demosaic/bayer_demosaic.hpp" - static HoloscanTestConfig test_config; class GenerateAndDemosaicFragment : public holoscan::Fragment { @@ -53,7 +52,10 @@ class GenerateAndDemosaicFragment : public holoscan::Fragment { Arg("generate_alpha", false), Arg("bayer_grid_pos", 2), Arg("interpolation_mode", 0), - Arg("pool", make_resource("pool", 1, rows * columns * channels, 2)), + // The pool size is set to 10 to prevent memory allocation errors during testing. + // Additional memory pool may be required as UCXTransmitter sends data asynchronously + // without checking the receiver's queue. + Arg("pool", make_resource("pool", 1, rows * columns * channels, 10)), Arg("cuda_stream_pool", cuda_stream_pool)}; std::shared_ptr bayer_demosaic; diff --git a/tests/system/distributed/ucx_message_serialization_ping_app.cpp b/tests/system/distributed/ucx_message_serialization_ping_app.cpp index 98c0af85..d926a165 100644 --- a/tests/system/distributed/ucx_message_serialization_ping_app.cpp +++ b/tests/system/distributed/ucx_message_serialization_ping_app.cpp @@ -30,6 +30,7 @@ #include "ping_message_rx_op.hpp" #include "ping_message_tx_op.hpp" +#include "utils.hpp" using namespace std::string_literals; @@ -165,6 +166,8 @@ TEST_P(UcxMessageTypeParmeterizedTestFixture, TestUcxMessageSerializationApp) { EXPECT_TRUE(log_output.find("Found expected value in deserialized message.") != std::string::npos); + EXPECT_TRUE(remove_ignored_errors(log_output).find("error") == std::string::npos); + // restore the original log level if (message_type == MessageType::VEC_DOUBLE_LARGE) { if (env_orig) { diff --git a/tests/system/distributed/utility_apps.hpp b/tests/system/distributed/utility_apps.hpp index 8b89ab49..6bfa188c 100644 --- a/tests/system/distributed/utility_apps.hpp +++ b/tests/system/distributed/utility_apps.hpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,6 +19,7 @@ #define SYSTEM_UTILITY_APPS_HPP #include +#include #include #include @@ -215,12 +216,17 @@ class SingleOpFragment : public holoscan::Fragment { class OneTxFragment : public holoscan::Fragment { public: + explicit OneTxFragment(int64_t count = 10) : count_(count) {} + void compose() override { using namespace holoscan; - auto tx = make_operator("tx", make_condition(10)); + auto tx = make_operator("tx", make_condition(count_)); add_operator(tx); } + + private: + int64_t count_ = 10; }; class OneTwoOutputsTxFragment : public holoscan::Fragment { @@ -261,7 +267,7 @@ class OneMxFragment : public holoscan::Fragment { public: void compose() override { using namespace holoscan; - auto mx = make_operator("mx", make_condition(10)); + auto mx = make_operator("mx"); add_operator(mx); } @@ -271,7 +277,7 @@ class BroadcastFragment : public holoscan::Fragment { public: void compose() override { using namespace holoscan; - auto broadcast = make_operator("broadcast", make_condition(10)); + auto broadcast = make_operator("broadcast"); add_operator(broadcast); } @@ -281,7 +287,7 @@ class OneRxFragment : public holoscan::Fragment { public: void compose() override { using namespace holoscan; - auto rx = make_operator("rx", make_condition(10)); + auto rx = make_operator("rx"); add_operator(rx); } @@ -669,7 +675,8 @@ class UCXLinearPipelineApp : public holoscan::Application { void compose() override { using namespace holoscan; - auto fragment1 = make_fragment("fragment1"); + int64_t count = 20; + auto fragment1 = make_fragment("fragment1", count); auto fragment2 = make_fragment("fragment2"); auto fragment3 = make_fragment("fragment3"); diff --git a/tests/system/distributed/utils.hpp b/tests/system/distributed/utils.hpp new file mode 100644 index 00000000..8fef450a --- /dev/null +++ b/tests/system/distributed/utils.hpp @@ -0,0 +1,55 @@ +/* + * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include + +namespace { + +std::string remove_ignored_errors(const std::string& captured_error) { + HOLOSCAN_LOG_INFO("original_capure: {}", captured_error); + std::vector err_lines; + std::string error_string = captured_error; + size_t pos = 0; + std::string delimiter = "\n"; + + while ((pos = error_string.find(delimiter)) != std::string::npos) { + std::string line = error_string.substr(0, pos); + err_lines.push_back(line); + error_string.erase(0, pos + delimiter.length()); + } + + std::vector errors_to_ignore = { + // some versions of the UCX extension print this error during application shutdown + "Connection dropped with status -25"}; + + for (const std::string& err : errors_to_ignore) { + err_lines.erase(std::remove_if(err_lines.begin(), + err_lines.end(), + [&](const std::string& line) { + return line.find(err) != std::string::npos; + }), + err_lines.end()); + } + + std::string result; + for (const std::string& line : err_lines) { result += line + "\n"; } + + return result; +} + +} // namespace diff --git a/tests/system/exception_handling.cpp b/tests/system/exception_handling.cpp index 580eddc3..fc6b4cf8 100644 --- a/tests/system/exception_handling.cpp +++ b/tests/system/exception_handling.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,7 +17,9 @@ #include #include +#include +#include #include #include @@ -26,6 +28,34 @@ static HoloscanTestConfig test_config; +namespace { + +enum class ThrowMethod : uint8_t { kStart, kStop, kCompute, kInitialize, kNone }; + +class MethodParmeterizedTestFixture : public ::testing::TestWithParam {}; + +} // namespace + +// need convert for ThrowMethod to be able to use it as a Parameter +template <> +struct YAML::convert { + static Node encode(const ThrowMethod& rhs) { + Node node; + node = static_cast(rhs); + return node; + } + + static bool decode(const Node& node, ThrowMethod& rhs) { + if (!node.IsScalar()) return false; + uint8_t throw_method = node.as(); + if (throw_method <= static_cast(ThrowMethod::kNone)) { + rhs = static_cast(throw_method); + return true; + } + return false; + } +}; + namespace holoscan { namespace ops { @@ -36,29 +66,91 @@ class MinimalThrowOp : public Operator { MinimalThrowOp() = default; + void initialize() override { + register_converter(); + + Operator::initialize(); + + if (throw_type_.get() == ThrowMethod::kInitialize) { + throw std::runtime_error("Exception occurred in MinimalThrowOp::initialize"); + } + }; + + void start() override { + if (throw_type_.get() == ThrowMethod::kStart) { + throw std::runtime_error("Exception occurred in MinimalThrowOp::start"); + } + }; + void compute(InputContext& op_input, OutputContext&, ExecutionContext&) override { - throw std::runtime_error("Exception occurred in MinimalThrowOp::compute"); + if (throw_type_.get() == ThrowMethod::kCompute) { + throw std::runtime_error("Exception occurred in MinimalThrowOp::compute"); + } + }; + + void stop() override { + if (throw_type_.get() == ThrowMethod::kStop) { + throw std::runtime_error("Exception occurred in MinimalThrowOp::stop"); + } }; + + void setup(OperatorSpec& spec) override { + spec.param( + throw_type_, "throw_type", "Throw Type", "Specifies which method throws the exception"); + } + + private: + Parameter throw_type_; }; } // namespace ops class MinimalThrowApp : public holoscan::Application { public: + /** + * @brief Construct a new MinimalThrowApp object + * + * @param throw_type enum controlling which method (if any) throws an exception + */ + explicit MinimalThrowApp(ThrowMethod throw_type) : throw_type_(throw_type) {} + void compose() override { using namespace holoscan; - auto op = make_operator("min_op", make_condition(3)); + auto op = make_operator( + "min_op", make_condition(3), Arg("throw_type", throw_type_)); add_operator(op); } + + private: + ThrowMethod throw_type_ = ThrowMethod::kNone; }; -TEST(MinimalNativeOperatorApp, TestComputeMethodExceptionHandling) { - auto app = make_application(); +INSTANTIATE_TEST_CASE_P(MinimalNativeOperatorAppTests, MethodParmeterizedTestFixture, + ::testing::Values(ThrowMethod::kStart, ThrowMethod::kStop, + ThrowMethod::kCompute, ThrowMethod::kInitialize, + ThrowMethod::kNone)); + +TEST_P(MethodParmeterizedTestFixture, TestMethodExceptionHandling) { + ThrowMethod throw_method = GetParam(); + auto app = make_application(throw_method); - const std::string config_file = test_config.get_test_data_file("minimal.yaml"); - app->config(config_file); + // capture output so that we can check that the expected value is present + testing::internal::CaptureStderr(); - EXPECT_THROW({ app->run(); }, std::runtime_error); + if (throw_method == ThrowMethod::kNone) { + EXPECT_NO_THROW({ app->run(); }); + } else { + EXPECT_THROW({ app->run(); }, std::runtime_error); + } + + std::string log_output = testing::internal::GetCapturedStderr(); + if ((throw_method != ThrowMethod::kNone)) { + EXPECT_TRUE(log_output.find("Exception occurred in MinimalThrowOp") != std::string::npos); + if (throw_method != ThrowMethod::kInitialize) { + // exception in initialize is before graph start, so this would not be printed + EXPECT_TRUE(log_output.find("Graph execution error: ") != std::string::npos); + } + } } } // namespace holoscan diff --git a/tests/system/multithreaded_app.cpp b/tests/system/multithreaded_app.cpp index fd7bd1e2..28bfbade 100644 --- a/tests/system/multithreaded_app.cpp +++ b/tests/system/multithreaded_app.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -51,6 +51,7 @@ TEST(MultithreadedApp, TestSendingTensorToMultipleOperators) { EnvVarWrapper wrapper({ std::make_pair("HOLOSCAN_LOG_LEVEL", "DEBUG"), + std::make_pair("HOLOSCAN_EXECUTOR_LOG_LEVEL", "INFO"), // quiet multi_thread_scheduler.cpp }); auto app = make_application(); diff --git a/tests/system/native_async_operator_ping_app.cpp b/tests/system/native_async_operator_ping_app.cpp index 7b9b1b1d..adbcf4d8 100644 --- a/tests/system/native_async_operator_ping_app.cpp +++ b/tests/system/native_async_operator_ping_app.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -80,7 +80,8 @@ TEST_P(ParameterizedAsyncPingTestFixture, TestAsyncRxApp) { auto multithreaded = GetParam(); if (multithreaded) { app->scheduler(app->make_scheduler( - "multithread-scheduler", holoscan::Arg("stop_on_deadlock", false), + "multithread-scheduler", + holoscan::Arg("stop_on_deadlock", false), holoscan::Arg("max_duration_ms", 1000L))); } diff --git a/tests/system/native_operator_ping_app.cpp b/tests/system/native_operator_ping_app.cpp index cd7a264b..788dff83 100644 --- a/tests/system/native_operator_ping_app.cpp +++ b/tests/system/native_operator_ping_app.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -205,7 +205,10 @@ TEST(NativeOperatorPingApp, TestNativeForwardOpAppDanglingOutput) { std::string log_output = testing::internal::GetCapturedStderr(); // string tested here is from GXF itself, so may have to update it as GXF is updated - EXPECT_TRUE(log_output.find("Connection not found for Tx data2") != std::string::npos); + EXPECT_TRUE(log_output.find( + "No receiver connected to transmitter of DownstreamReceptiveSchedulingTerm") != + std::string::npos); + EXPECT_TRUE(log_output.find("The entity will never tick") != std::string::npos); } TEST(NativeOperatorPingApp, TestNativeForwardOpAppDanglingInput) { diff --git a/tests/system/ping_tensor_tx_op.cpp b/tests/system/ping_tensor_tx_op.cpp index c2c55807..55b09855 100644 --- a/tests/system/ping_tensor_tx_op.cpp +++ b/tests/system/ping_tensor_tx_op.cpp @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -92,9 +92,7 @@ void PingTensorTxOp::compute(InputContext&, OutputContext& op_output, ExecutionC // Allocate and initialize the CUDA memory. CUDA_TRY(cudaMalloc(pointer.get(), nbytes)); std::vector data(nbytes); - for (size_t index = 0; index < data.size(); ++index) { - data[index] = (index_ + index) % 256; - } + for (size_t index = 0; index < data.size(); ++index) { data[index] = (index_ + index) % 256; } CUDA_TRY(cudaMemcpy(*pointer, data.data(), nbytes, cudaMemcpyKind::cudaMemcpyHostToDevice)); // Holoscan Tensor doesn't support direct memory allocation.