diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
index 55a74bd8..72e26f87 100644
--- a/.devcontainer/devcontainer.json
+++ b/.devcontainer/devcontainer.json
@@ -34,10 +34,10 @@
"NVIDIA_DRIVER_CAPABILITIES": "graphics,video,compute,utility,display",
// Set the following environment variables to use the same folder name as the host machine.
// This is needed to launch container from the workspace folder that is not same as the SDK source root folder.
- "HOLOSCAN_PUBLIC_FOLDER": "${env:HOLOSCAN_PUBLIC_FOLDER}",
+ "HOLOSCAN_PUBLIC_FOLDER": "${localEnv:HOLOSCAN_PUBLIC_FOLDER}",
// This is necessary to prevent memory overuse during the SDK build process.
// The `CMAKE_BUILD_PARALLEL_LEVEL` environment variable is set by the `run vscode` command.
- "CMAKE_BUILD_PARALLEL_LEVEL": "${env:CMAKE_BUILD_PARALLEL_LEVEL}",
+ "CMAKE_BUILD_PARALLEL_LEVEL": "${localEnv:CMAKE_BUILD_PARALLEL_LEVEL}",
},
"mounts": [
"source=/tmp/.X11-unix,target=/tmp/.X11-unix,type=bind,consistency=cached",
@@ -64,7 +64,7 @@
"shd101wyy.markdown-preview-enhanced",
"cschlosser.doxdocgen",
"mine.cpplint",
- "benjamin-simmonds.pythoncpp-debug" , // Python/C++ debugging
+ "benjamin-simmonds.pythoncpp-debug", // Python/C++ debugging
]
}
},
diff --git a/scripts/debug_python b/.vscode/debug_python
similarity index 96%
rename from scripts/debug_python
rename to .vscode/debug_python
index 8aab2e2d..02faf1d3 100755
--- a/scripts/debug_python
+++ b/.vscode/debug_python
@@ -19,7 +19,7 @@
SCRIPT_DIR=$(dirname "$(readlink -f "$0")")
if [ -e ${SCRIPT_DIR}/debug_env.sh ]; then
- # User can place debug_env.sh in the same directory as this script (scripts/debug_env.sh would be ignored in git repo)
+ # User can place debug_env.sh in the same directory as this script (.vscode/debug_env.sh would be ignored in git repo)
. ${SCRIPT_DIR}/debug_env.sh
fi
diff --git a/.vscode/launch.json b/.vscode/launch.json
index 0e8231cc..ee2d522d 100644
--- a/.vscode/launch.json
+++ b/.vscode/launch.json
@@ -36,7 +36,7 @@
"request": "launch",
"program": "/usr/bin/bash",
"args": [
- "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python",
+ "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python",
"${file}",
],
"stopAtEntry": false,
@@ -98,7 +98,7 @@
"program": "/usr/bin/bash",
// https://github.com/catchorg/Catch2/blob/devel/docs/command-line.md#specifying-which-tests-to-run
"args": [
- "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python",
+ "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python",
"${command:cmake.buildDirectory}/examples/aja_capture/python/aja_capture.py",
],
"stopAtEntry": false,
@@ -128,7 +128,7 @@
"request": "launch",
"program": "/usr/bin/bash",
"args": [
- "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python",
+ "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python",
"${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/bring_your_own_model/python/byom.py",
],
"stopAtEntry": false,
@@ -212,7 +212,7 @@
"request": "launch",
"program": "/usr/bin/bash",
"args": [
- "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python",
+ "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python",
"${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/conditions/periodic/python/ping_periodic.py",
],
"stopAtEntry": false,
@@ -242,7 +242,7 @@
"request": "launch",
"program": "/usr/bin/bash",
"args": [
- "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python",
+ "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python",
"${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/cupy_native/matmul.py",
],
"stopAtEntry": false,
@@ -295,7 +295,7 @@
"request": "launch",
"program": "/usr/bin/bash",
"args": [
- "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python",
+ "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python",
"${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/flow_tracker/python/flow_tracker.py",
],
"stopAtEntry": false,
@@ -348,7 +348,7 @@
"request": "launch",
"program": "/usr/bin/bash",
"args": [
- "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python",
+ "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python",
"${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/hello_world/python/hello_world.py",
],
"stopAtEntry": false,
@@ -401,7 +401,7 @@
"request": "launch",
"program": "/usr/bin/bash",
"args": [
- "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python",
+ "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python",
"${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/holoviz/python/holoviz_geometry.py",
],
"stopAtEntry": false,
@@ -431,7 +431,7 @@
"request": "launch",
"program": "/usr/bin/bash",
"args": [
- "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python",
+ "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python",
"${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/holoviz/python/holoviz_geometry_3d.py",
],
"stopAtEntry": false,
@@ -461,7 +461,7 @@
"request": "launch",
"program": "/usr/bin/bash",
"args": [
- "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python",
+ "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python",
"${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/holoviz/python/holoviz_views.py",
],
"stopAtEntry": false,
@@ -518,7 +518,7 @@
"request": "launch",
"program": "/usr/bin/bash",
"args": [
- "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python",
+ "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python",
"${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/multithread/python/multithread.py",
],
"stopAtEntry": false,
@@ -552,7 +552,7 @@
"request": "launch",
"program": "/usr/bin/bash",
"args": [
- "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python",
+ "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python",
"${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/numpy_native/convolve.py",
],
"stopAtEntry": false,
@@ -651,7 +651,7 @@
"request": "launch",
"program": "/usr/bin/bash",
"args": [
- "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python",
+ "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python",
"${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/ping_custom_op/python/ping_custom_op.py",
],
"stopAtEntry": false,
@@ -733,7 +733,7 @@
"request": "launch",
"program": "/usr/bin/bash",
"args": [
- "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python",
+ "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python",
"${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/ping_distributed/python/ping_distributed.py",
"--driver",
"--worker",
@@ -793,7 +793,7 @@
"request": "launch",
"program": "/usr/bin/bash",
"args": [
- "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python",
+ "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python",
"${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/ping_multi_port/python/ping_multi_port.py",
],
"stopAtEntry": false,
@@ -846,7 +846,7 @@
"request": "launch",
"program": "/usr/bin/bash",
"args": [
- "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python",
+ "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python",
"${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/ping_simple/python/ping_simple.py",
],
"stopAtEntry": false,
@@ -899,7 +899,7 @@
"request": "launch",
"program": "/usr/bin/bash",
"args": [
- "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python",
+ "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python",
"${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/ping_simple_run_async/python/ping_simple_run_async.py",
],
"stopAtEntry": false,
@@ -952,7 +952,7 @@
"request": "launch",
"program": "/usr/bin/bash",
"args": [
- "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python",
+ "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python",
"${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/ping_vector/python/ping_vector.py",
],
"stopAtEntry": false,
@@ -1005,7 +1005,7 @@
"request": "launch",
"program": "/usr/bin/bash",
"args": [
- "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python",
+ "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python",
"${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/resources/clock/python/ping_clock.py",
],
"stopAtEntry": false,
@@ -1058,7 +1058,7 @@
"request": "launch",
"program": "/usr/bin/bash",
"args": [
- "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python",
+ "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python",
"${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/tensor_interop/python/tensor_interop.py",
],
"stopAtEntry": false,
@@ -1127,7 +1127,7 @@
"request": "launch",
"program": "/usr/bin/bash",
"args": [
- "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python",
+ "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python",
"${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/v4l2_camera/python/v4l2_camera.py",
],
"stopAtEntry": false,
@@ -1188,7 +1188,7 @@
"request": "launch",
"program": "/usr/bin/bash",
"args": [
- "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python",
+ "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python",
"${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/video_replayer/python/video_replayer.py",
],
"stopAtEntry": false,
@@ -1249,7 +1249,7 @@
"request": "launch",
"program": "/usr/bin/bash",
"args": [
- "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python",
+ "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python",
"${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/examples/video_replayer_distributed/python/video_replayer_distributed.py",
],
"stopAtEntry": false,
@@ -1317,7 +1317,7 @@
"request": "launch",
"program": "/usr/bin/bash",
"args": [
- "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/scripts/debug_python",
+ "${workspaceFolder}/${env:HOLOSCAN_PUBLIC_FOLDER}/.vscode/debug_python",
"-m",
"pytest",
"-v",
diff --git a/.vscode/settings.json b/.vscode/settings.json
index 5dba339e..6174db62 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -118,7 +118,10 @@
"charconv": "cpp",
"cuchar": "cpp",
"propagate_const": "cpp",
- "ranges": "cpp"
+ "ranges": "cpp",
+ "barrier": "cpp",
+ "latch": "cpp",
+ "syncstream": "cpp"
},
"git.alwaysSignOff": true,
"git.untrackedChanges": "separate",
diff --git a/.vscode/tasks.json b/.vscode/tasks.json
index 0fdf1b92..50cac519 100644
--- a/.vscode/tasks.json
+++ b/.vscode/tasks.json
@@ -10,6 +10,7 @@
"env": {
"PATH": "${env:HOME}/.local/bin:${env:PATH}",
"CUDACXX": "/usr/local/cuda/bin/nvcc",
+ "CMAKE_BUILD_PARALLEL_LEVEL": "${env:CMAKE_BUILD_PARALLEL_LEVEL}",
}
},
"presentation": {
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 47bd06f8..2ced8e43 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -159,6 +159,11 @@ if(HOLOSCAN_BUILD_GXF_EXTENSIONS)
add_subdirectory(gxf_extensions)
endif()
+# ##############################################################################
+# # Configure scripts
+# ##############################################################################
+add_subdirectory(scripts)
+
# ##############################################################################
# # Package project
# ##############################################################################
@@ -192,6 +197,7 @@ list(APPEND HOLOSCAN_INSTALL_TARGETS
op_video_stream_recorder
op_video_stream_replayer
op_v4l2
+ spdlog_logger
)
if(HOLOSCAN_BUILD_LIBTORCH)
@@ -250,17 +256,37 @@ install(FILES ${${HOLOSCAN_PACKAGE_NAME}_BINARY_DIR}/include/holoscan/version_co
# Install GXF
install(DIRECTORY
- ${GXF_core_INCLUDE_DIR}/common
- ${GXF_core_INCLUDE_DIR}/gxf/core
- ${GXF_core_INCLUDE_DIR}/gxf/cuda
- ${GXF_core_INCLUDE_DIR}/gxf/multimedia
- ${GXF_core_INCLUDE_DIR}/gxf/network
- ${GXF_core_INCLUDE_DIR}/gxf/npp
- ${GXF_core_INCLUDE_DIR}/gxf/serialization
- ${GXF_core_INCLUDE_DIR}/gxf/std
+ ${GXF_INCLUDE_DIR}/common
+ ${GXF_INCLUDE_DIR}/gxf/app
+ ${GXF_INCLUDE_DIR}/gxf/core
+ ${GXF_INCLUDE_DIR}/gxf/cuda
+ ${GXF_INCLUDE_DIR}/gxf/logger
+ ${GXF_INCLUDE_DIR}/gxf/multimedia
+ ${GXF_INCLUDE_DIR}/gxf/serialization
+ ${GXF_INCLUDE_DIR}/gxf/std
+ ${GXF_INCLUDE_DIR}/gxf/ucx
DESTINATION "include/gxf"
COMPONENT "holoscan-gxf_libs"
)
+foreach(_component ${HOLOSCAN_GXF_COMPONENTS})
+ string(TOUPPER "${CMAKE_BUILD_TYPE}" _build_type)
+ get_target_property(GXF_${_component}_LOCATION GXF::${_component} IMPORTED_LOCATION_${_build_type})
+ if(NOT GXF_${_component}_LOCATION)
+ get_target_property(GXF_${_component}_LOCATION GXF::${_component} IMPORTED_LOCATION)
+ endif()
+ if("${_component}" STREQUAL "gxe")
+ install(FILES ${HOLOSCAN_GXE_LOCATION}
+ DESTINATION "bin"
+ PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE
+ COMPONENT "holoscan-gxf_libs"
+ )
+ else()
+ install(FILES "${GXF_${_component}_LOCATION}"
+ DESTINATION ${HOLOSCAN_INSTALL_LIB_DIR}
+ COMPONENT "holoscan-gxf_libs"
+ )
+ endif()
+endforeach()
# Install CMake script to build GXE applications
install(FILES "${CMAKE_SOURCE_DIR}/cmake/modules/GenerateGXEAppInstall.cmake"
@@ -286,21 +312,6 @@ DESTINATION "${HOLOSCAN_INSTALL_LIB_DIR}/cmake/holoscan"
COMPONENT "holoscan-core"
)
-install(FILES "${CMAKE_SOURCE_DIR}/scripts/download_ngc_data"
- "${CMAKE_SOURCE_DIR}/scripts/convert_video_to_gxf_entities.py"
- "${CMAKE_SOURCE_DIR}/scripts/gxf_entity_codec.py"
-DESTINATION "${HOLOSCAN_INSTALL_LIB_DIR}/cmake/holoscan"
-PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ
-COMPONENT "holoscan-core"
-)
-
-# Install CMake script to download example data from NGC
-install(FILES "${CMAKE_SOURCE_DIR}/scripts/download_example_data"
-DESTINATION "examples"
-PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ
-COMPONENT "holoscan-core"
-)
-
# Define docs and hooks
set(holoscan_doc_string [=[
libholoscan: Holoscan SDK C++ API
@@ -316,19 +327,30 @@ if(NOT TARGET fmt::fmt-header-only)
add_library(fmt::fmt-header-only INTERFACE IMPORTED)
endif()
-set(_GXFlibs core std multimedia cuda network npp serialization behavior_tree)
+set(_GXF_components @HOLOSCAN_GXF_COMPONENTS@)
-foreach(gxflib IN LISTS _GXFlibs)
- if(NOT TARGET GXF::${gxflib})
- add_library(GXF::${gxflib} SHARED IMPORTED)
- set_target_properties(GXF::${gxflib} PROPERTIES
- IMPORTED_LOCATION "${PACKAGE_PREFIX_DIR}/lib/libgxf_${gxflib}.so"
+foreach(gxf_component IN LISTS _GXF_components)
+ if(NOT TARGET GXF::${gxf_component} AND NOT (${gxf_component} STREQUAL "gxe"))
+ add_library(GXF::${gxf_component} SHARED IMPORTED)
+ set_target_properties(GXF::${gxf_component} PROPERTIES
+ IMPORTED_LOCATION "${PACKAGE_PREFIX_DIR}/lib/libgxf_${gxf_component}.so"
IMPORTED_NO_SONAME ON
INTERFACE_INCLUDE_DIRECTORIES "${PACKAGE_PREFIX_DIR}/include;${PACKAGE_PREFIX_DIR}/include/gxf"
)
endif()
endforeach()
+if(TARGET GXF::ucx)
+ # GXF UCX classes publicly depend on UCX headers.
+ # Workaround to include those headers without explicitly providing UCX targets.
+ # http://cdash.nvidia.com/viewBuildError.php?buildid=4461
+ set_property(
+ TARGET GXF::ucx
+ APPEND PROPERTY
+ INTERFACE_INCLUDE_DIRECTORIES "${PACKAGE_PREFIX_DIR}/include/3rdparty/ucx"
+ )
+endif()
+
if(NOT TARGET GXF::gxe)
add_executable(GXF::gxe IMPORTED)
set_target_properties(GXF::gxe PROPERTIES
@@ -342,6 +364,7 @@ set(GXF_EXTENSIONS_DIR "${PACKAGE_PREFIX_DIR}/lib/gxf_extensions")
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}")
]=])
+string(CONFIGURE "${holoscan_install_hook_code_string}" holoscan_install_hook_code_string @ONLY)
set(holoscan_build_hook_code_string [=[
]=])
diff --git a/DEVELOP.md b/DEVELOP.md
new file mode 100644
index 00000000..df13cc4d
--- /dev/null
+++ b/DEVELOP.md
@@ -0,0 +1,217 @@
+# Developer Resources
+
+This document aims to guide users with recommended and advanced workflows to build and use Holoscan SDK. This is generally not the simplest way to use the SDK, so make sure to review the [project README](../README.md) before getting started.
+
+> **⚠️ Disclaimer**: we only recommend building the SDK from source if you are a developer of the SDK, or need to build the SDK with debug symbols or other options not used as part of the published packages.
+> - If you want to write your own operator or application, you can use the SDK as a dependency (and contribute to [HoloHub](https://github.com/nvidia-holoscan/holohub)).
+> - If you need to make other modifications to the SDK, [file a feature or bug request](https://forums.developer.nvidia.com/c/healthcare/holoscan-sdk/320/all).
+> - Refer to the [Holoscan SDK User Guide installation instructions](https://docs.nvidia.com/holoscan/sdk-user-guide/sdk_installation.html#install-the-sdk) for guidance on installing Holoscan SDK from published packages.
+
+## Table of Contents
+
+- [Building the SDK from source](#building-the-sdk-from-source)
+ - [Prerequisites](#prerequisites)
+ - [(Recommended) using the `run` script](#recommended-using-the-run-script)
+ - [Cross-compilation](#cross-compilation)
+ - [(Advanced) Docker + CMake](#advanced-docker--cmake)
+ - [(Advanced) Local environment + CMake](#advanced-local-environment--cmake)
+- [Runtime Container](#runtime-container)
+- [Utilities](#utilities)
+ - [Testing](#testing)
+ - [Linting](#linting)
+ - [VSCode](#vscode)
+
+## Building the SDK from source
+
+### Prerequisites
+
+- Prerequisites for each supported platform are documented in [the user guide](https://docs.nvidia.com/holoscan/sdk-user-guide/sdk_installation.html#prerequisites).
+- To build and run the SDK in a containerized environment (recommended) you'll need:
+ - the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html) v1.12.2+
+ - [Docker](https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository), including the buildx plugin (`docker-buildx-plugin`)
+
+### (Recommended) Using the `run` script
+
+Call **`./run build`** within the repository to build the build container and the CMake project.
+
+- *If you encounter errors during the CMake build, you can execute `./run clear_cache` to remove cache/build/install folders*
+- *Execute `./run build --help` for more information*
+- *Execute `./run build --dryrun` to see the commands that will be executed*
+- *That command can be broken-up in more granular commands also:*
+
+ ```sh
+ ./run check_system_deps # ensure the system is properly configured for building
+ ./run build_image # create the build Docker container
+ ./run build # run the CMake configuration, build, and install steps
+ ```
+
+Call the **`./run launch`** command to start and enter the build container.
+
+- *You can run from the `install` or `build` tree by passing the working directory as an argument (ex: `./run launch install`)*
+- *Execute `./run launch --help` for more information*
+- *Execute `./run launch --dryrun` to see the commands that will be executed*
+- *Execute `./run launch --run-cmd "..."` to execute a bash command directly in the container*
+
+Run the [**examples**](./examples#readme) inside the container by running their respective commands listed within each directory README file.
+
+### Cross-compilation
+
+While the Dockerfile to build the SDK does not currently support true cross-compilation, you can compile the Holoscan SDK for the developer kits (arm64) from a x86_64 host using an emulation environment.
+
+1. [Install qemu](https://github.com/multiarch/qemu-user-static)
+2. Clear your build cache: `./run clear_cache`
+3. Rebuild for `linux/arm64` using `--arch|-a` or `HOLOSCAN_BUILD_ARCH`:
+ - `./run build --arch arm64`
+ - `HOLOSCAN_BUILD_ARCH=arm64 ./run build`
+
+You can then copy the `install` folder generated by CMake to a developer kit with a configured environment or within a container to use for running and developing applications.
+
+### (Advanced) Docker + CMake
+
+The [`run`](./run) script mentioned above is helpful to understand how Docker and CMake are configured and run, as commands will be printed when running it or using `--dryrun`.
+We recommend looking at those commands if you want to use Docker and CMake manually, and reading the comments inside the script for details about each parameter (specifically the `build()` and `launch()` methods).
+
+### (Advanced) Local environment + CMake
+
+> **⚠️ Disclaimer**: this method of building the SDK is not actively tested or maintained. Instructions below might go out of date.
+
+#### Software Requirements
+
+To build the Holoscan SDK on a local environment, the following versions of dev dependencies are needed (or tested). The last column refers to the stage (`FROM`) in the [Dockerfile](./Dockerfile) where respective commands can be found to build/install these dependencies.
+
+| Dependency | Min version | Needed by | Dockerfile stage |
+|---|---|---|---|
+| CUDA | 12.2 | Core SDK | base |
+| gRPC | 1.54.2 | Core SDK | grpc-builder |
+| UCX | 1.15.0 | Core SDK | ucx-builder |
+| GXF | 3.1 | Core SDK | gxf-downloader |
+| MOFED | 23.07 | ConnectX | mofed-installer |
+| TensorRT | 8.6.1 | Inference operator | base |
+| ONNX Runtime | 1.15.1 | Inference operator | onnxruntime-downloader |
+| LibTorch | 2.1.0 | Inference operator
(torch plugin) | torch-downloader-[x86_64\|arm64] |
+| TorchVision | 0.16.0 | Inference operator
(torch plugin) | torchvision-downloader-[x86_64\|arm64] |
+| Vulkan SDK | 1.3.216 | Holoviz operator | vulkansdk-builder |
+| Vulkan loader and
validation layers | 1.3.204 | Holoviz operator | dev |
+| spirv-tools | 2022.1 | Holoviz operator | dev |
+| V4L2 | 1.22.1 | V4L2 operator | dev |
+| CMake | 3.24.0 | Build process | build-tools |
+| Patchelf | N/A | Build process | build-tools |
+
+Note: refer to the [Dockerfile](./Dockerfile) for other dependencies which are not needed to build, but might be needed for:
+
+- runtime (openblas/mkl for torch, egl for headless rendering, cloudpickle for distributed python apps, cupy for some examples...)
+- testing (valgrind, pytest, xvfb...)
+- utilities (v4l-utils, ...)
+
+For CMake to find these dependencies, install them in default system paths, or pass `CMAKE_PREFIX_PATH`, `CMAKE_LIBRARY_PATH`, and/or `CMAKE_INCLUDE_PATH` during configuration.
+
+#### Build example
+
+```sh
+# Configure
+cmake -S $source_dir -B $build_dir \
+ -G Ninja \
+ -D CMAKE_BUILD_TYPE=Release \
+ -D CUDAToolkit_ROOT:PATH="/usr/local/cuda"
+
+# Build
+cmake --build $build_dir -j
+
+# Install
+cmake --install $build_dir --prefix $install_dir
+```
+
+The commands to run the [**examples**](./examples#readme) are then the same as in the dockerized environment, and can be found in the respective source directory READMEs.
+
+## Runtime Container
+
+There are multiple containers associated with Holoscan:
+
+- The **build** container generated by the [top-level Dockerfile](./Dockerfile) is designed to pull dependencies to build and test the SDK itself. The image does not contain the SDK itself, as it is mounted with during `docker run` to run the cmake build or run tests.
+- The **development** container available at [NGC | Holoscan Container](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/clara-holoscan/containers/holoscan/tags) which includes all the development tools and libraries needed to *build* Holoscan applications.
+ - This image is ~13 GB when uncompressed. However, once a Holoscan application is created, it does not need all those same development tools just to *run* an application.
+- To address this, a **runtime** container can now be generated with the [runtime_docker/Dockerfile](./runtime_docker/Dockerfile) which contains only the runtime dependencies of the Holoscan SDK.
+ - This Dockerfile is based on the [CUDA-base](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/cuda) image, which begins with Ubuntu:22.04 and installs the CUDA runtime and Compat package.
+ - This image is ~8.7 GB on x86_64, and can be further reduced based on use cases (see below).
+
+> ⚠️ Disclaimer: Currently iGPU is not supported by the runtime container
+
+### Generate the runtime container
+
+The [`run`](./run) script contains the command `build_run_image` to build the runtime Holoscan SDK image:
+
+```bash
+./run build_run_image
+```
+
+Once this image is built, it can be run exactly as the Holoscan development container on NGC is. Simply follow the 'Running the container' instructions beginning at step #3 at [NGC | Holoscan Container](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/clara-holoscan/containers/holoscan), but replace `${NGC_CONTAINER_IMAGE_PATH}` by `holoscan-sdk-run-[-]` in step #4 (name outputted at the end of the above command).
+
+### Further Reducing Runtime Size
+
+If you have a specific application you wish to deploy, you can further reduce this runtime image size in two ways:
+
+1. **Targeting different stages of the [runtime Dockerfile](./runtime_docker/Dockerfile)**.
+ 1. add `--cpp` to the command above to not pull in python dependencies.
+ 2. add `--cpp-no-mkl` to the command above to not pull in MKL (x86_64-only libtorch dependency) in addition to the above.
+
+2. **Modifying the Dockerfile**
+
+The [runtime Dockerfile](./runtime_docker/Dockerfile) is thoroughly documented to indicate which dependency is used by which component of the Holoscan SDK. If you do not use some of these components (ex: Torch inference backend, ONNX Runtime inference backend, TensorRT inference backend, Python/Cupy, format_converter operator, etc...), comment out the appropriate line in the Dockerfile and run the build command above.
+
+## Utilities
+
+Some utilities are available in the [`scripts`](./scripts) folder, others closer to the built process are listed below:
+
+### Testing
+
+Existing tests are using GTest for C++ and pytest for Python, and can be found under [tests](./tests/) and [python/tests](./python/tests/) respectively. The Holoscan SDK uses CTest as a framework to build and execute these tests.
+
+Run the tests using the following command:
+
+```sh
+./run test
+```
+
+> Note: Run `run test --help` to see additional options.
+
+### Linting
+
+Run the following command to run various linting tools on the repository:
+
+```sh
+./run lint # optional: specify directories
+```
+
+> Note: Run `run lint --help` to see the list of tools that are used. If a lint command fails due to a missing module or executable on your system, you can install it using `python3 -m pip install `.
+
+### Building the User Guide
+
+The source of the user guide hosted at is located in [docs](./docs/). It can be built with the following commands:
+
+- PDF: `./run build_pdf`
+- HTML: `./run build_html` (auto-reload: `./run live_html`)
+
+Run `./run help` for more commands related to the user guide documentation.
+
+### VSCode
+
+Visual Studio Code can be utilized to develop the Holoscan SDK. The `.devcontainer` folder holds the configuration for setting up a [development container](https://code.visualstudio.com/docs/remote/containers) with all necessary tools and libraries installed.
+
+The `./run` script contains `vscode` and `vscode_remote` commands for launching Visual Studio Code in a container or from a remote machine, respectively.
+
+- To launch Visual Studio Code in a dev container, use `./run vscode`.
+- To attach to an existing dev container from a remote machine, use `./run vscode_remote`. For more information, refer to the instructions from `./run vscode_remote -h`.
+
+Once Visual Studio Code is launched, the development container will be built and the recommended extensions will be installed automatically, along with CMake being configured.
+
+#### Configuring CMake in the Development Container
+
+For manual configuration of CMake, open the command palette (`Ctrl + Shift + P`) and run the `CMake: Configure` command.
+
+#### Building the Source Code in the Development Container
+
+The source code in the development container can be built by either pressing `Ctrl + Shift + B` or executing `Tasks: Run Build Task` from the command palette (`Ctrl + Shift + P`).
+
+#### Debugging the Source Code in the Development Container
+
+To debug the source code in the development container, open the `Run and Debug` view (`Ctrl + Shift + D`), select a debug configuration from the dropdown list, and press `F5` to initiate debugging.
diff --git a/Dockerfile b/Dockerfile
index c007ace2..df8f1971 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -18,14 +18,16 @@
############################################################
# Versions
############################################################
-ARG ONNX_RUNTIME_VERSION=1.15.1
+# Dependencies ending in _YY.MM are built or extracted from
+# the TensorRT or PyTorch NGC containers of that same version
+ARG ONNX_RUNTIME_VERSION=1.15.1_23.08
ARG LIBTORCH_VERSION=2.1.0_23.08
ARG TORCHVISION_VERSION=0.16.0_23.08
ARG VULKAN_SDK_VERSION=1.3.216.0
ARG GRPC_VERSION=1.54.2
ARG UCX_VERSION=1.15.0
-ARG GXF_VERSION=3.1_20240103_6bf4fcd2
-ARG MOFED_VERSION=23.07-0.5.1.2
+ARG GXF_VERSION=4.0_20240409_bc03d9d
+ARG MOFED_VERSION=23.10-2.1.3.1
############################################################
# Base image
@@ -82,8 +84,9 @@ ARG ONNX_RUNTIME_VERSION
# note: built with CUDA and TensorRT providers
WORKDIR /opt/onnxruntime
RUN curl -S -L -# -o ort.tgz \
- https://edge.urm.nvidia.com/artifactory/sw-holoscan-thirdparty-generic-local/onnxruntime/onnxruntime-${ONNX_RUNTIME_VERSION}-cuda-12.1-$(uname -m).tar.gz
-RUN tar -xf ort.tgz --strip-components 1
+ https://edge.urm.nvidia.com/artifactory/sw-holoscan-thirdparty-generic-local/onnxruntime/onnxruntime-${ONNX_RUNTIME_VERSION}-cuda-12.2-$(uname -m).tar.gz
+RUN mkdir -p ${ONNX_RUNTIME_VERSION}
+RUN tar -xf ort.tgz -C ${ONNX_RUNTIME_VERSION} --strip-components 2
############################################################
# Libtorch
@@ -172,7 +175,7 @@ ARG MOFED_VERSION
# only dependencies in the `MOFED_DEPS` variable (parsing the output of `--check-deps-only`) to
# remove them in that same layer, to ensure they are not propagated in the final image.
WORKDIR /opt/nvidia/mofed
-ARG MOFED_INSTALL_FLAGS="--upstream-libs --dpdk --with-mft --user-space-only --force --without-fw-update"
+ARG MOFED_INSTALL_FLAGS="--dpdk --with-mft --user-space-only --force --without-fw-update"
RUN UBUNTU_VERSION=$(cat /etc/lsb-release | grep DISTRIB_RELEASE | cut -d= -f2) \
&& OFED_PACKAGE="MLNX_OFED_LINUX-${MOFED_VERSION}-ubuntu${UBUNTU_VERSION}-$(uname -m)" \
&& curl -S -# -o ${OFED_PACKAGE}.tgz -L \
@@ -227,7 +230,7 @@ RUN patchelf --set-rpath '$ORIGIN/../lib' bin/*
############################################################
# GXF
############################################################
-FROM base as gxf-downloader
+FROM base as gxf-builder
ARG GXF_VERSION
WORKDIR /opt/nvidia/gxf
@@ -235,7 +238,7 @@ RUN if [ $(uname -m) = "aarch64" ]; then ARCH=arm64; else ARCH=x86_64; fi \
&& curl -S -# -L -o gxf.tgz \
https://edge.urm.nvidia.com/artifactory/sw-holoscan-thirdparty-generic-local/gxf/gxf_${GXF_VERSION}_holoscan-sdk_${ARCH}.tar.gz
RUN mkdir -p ${GXF_VERSION}
-RUN tar -xzf gxf.tgz -C ${GXF_VERSION} --strip-components 1
+RUN tar xzf gxf.tgz -C ${GXF_VERSION} --strip-components 1
############################################################
# Build image (final)
@@ -304,9 +307,21 @@ ENV LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:${UCX}/lib"
# Copy GXF
ARG GXF_VERSION
ENV GXF=/opt/nvidia/gxf/${GXF_VERSION}
-COPY --from=gxf-downloader ${GXF} ${GXF}
+COPY --from=gxf-builder ${GXF} ${GXF}
ENV CMAKE_PREFIX_PATH="${CMAKE_PREFIX_PATH}:${GXF}"
+# Setup Docker & NVIDIA Container Toolkit's apt repositories to enable DooD
+# for packaging & running applications with the CLI
+# Ref: Docker installation: https://docs.docker.com/engine/install/ubuntu/
+# DooD (Docker-out-of-Docker): use the Docker (or Moby) CLI in your dev container to connect to
+# your host's Docker daemon by bind mounting the Docker Unix socket.
+RUN install -m 0755 -d /etc/apt/keyrings \
+ && curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg \
+ && chmod a+r /etc/apt/keyrings/docker.gpg \
+ && echo "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \
+ "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
+ tee /etc/apt/sources.list.d/docker.list > /dev/null
+
# APT INSTALLS
# valgrind - static analysis
# xvfb - testing on headless systems
@@ -319,6 +334,8 @@ ENV CMAKE_PREFIX_PATH="${CMAKE_PREFIX_PATH}:${GXF}"
# v4l-utils - V4L2 operator utility
# libpng-dev - torchvision dependency
# libjpeg-dev - torchvision dependency
+# docker-ce-cli - enable Docker DooD for CLI
+# docker-buildx-plugin - enable Docker DooD for CLI
RUN apt-get update \
&& apt-get install --no-install-recommends -y \
valgrind="1:3.18.1-*" \
@@ -338,6 +355,8 @@ RUN apt-get update \
v4l-utils="1.22.1-*" \
libpng-dev="1.6.37-*" \
libjpeg-turbo8-dev="2.1.2-*" \
+ docker-ce-cli="5:25.0.3-*" \
+ docker-buildx-plugin="0.12.1-*" \
&& rm -rf /var/lib/apt/lists/*
# PIP INSTALLS
@@ -345,14 +364,16 @@ RUN apt-get update \
# requirements.dev.txt
# coverage - test coverage of python tests
# pytest* - testing
-# requirements.txt
+# requirements
+# pip - 20.3+ needed for PEP 600
+# cupy-cuda - dependency for holoscan python + examples
# cloudpickle - dependency for distributed apps
# python-on-whales - dependency for holoscan CLI
# Jinja2 - dependency for holoscan CLI
# packaging - dependency for holoscan CLI
# pyyaml - dependency for holoscan CLI
# requests - dependency for holoscan CLI
-# cupy-cuda - dependency for holoscan python + examples
+# psutil - dependency for holoscan CLI
RUN if [ $(uname -m) = "x86_64" ]; then \
python3 -m pip install --no-cache-dir \
mkl==2021.1.1 \
@@ -362,6 +383,35 @@ RUN if [ $(uname -m) = "x86_64" ]; then \
# This can be removed once upgrading to an MKL pip wheel that fixes the symlinks
find /usr/local/lib -maxdepth 1 -type f -regex '.*\/lib\(tbb\|mkl\).*\.so\(\.[0-9]+\.[0-9]+\)?' -exec rm -v {} +; \
fi
-COPY python/requirements.dev.txt /tmp
-COPY python/requirements.txt /tmp
+COPY python/requirements.dev.txt /tmp/requirements.dev.txt
+COPY python/requirements.txt /tmp/requirements.txt
RUN python3 -m pip install --no-cache-dir -r /tmp/requirements.dev.txt -r /tmp/requirements.txt
+
+# Creates a home directory for docker-in-docker to store files temporarily in the container,
+# necessary when running the holoscan CLI packager
+ENV HOME=/home/holoscan
+RUN mkdir -p $HOME && chmod 777 $HOME
+
+############################################################################################
+# Extra stage: igpu build image
+# The iGPU CMake build depends on libnvcudla.so as well as libnvdla_compiler.so, which are
+# part of the L4T BSP. As such, they should not be in the container, but mounted at runtime
+# (which the nvidia container runtime handles). However, we need the symbols at build time
+# for the TensorRT libraries to resolve. Since there is no stub library (unlike libcuda.so),
+# we need to include them in our builder. We use a separate stage so that `run build` can
+# use it if needed, but `run launch` (used to run apps in the container) doesn't need to.
+############################################################################################
+FROM build as build-igpu
+ARG GPU_TYPE
+RUN if [ ${GPU_TYPE} = "igpu" ]; then \
+ tmp_dir=$(mktemp -d) \
+ && curl -S -# -L -o $tmp_dir/l4t_core.deb \
+ https://repo.download.nvidia.com/jetson/t234/pool/main/n/nvidia-l4t-core/nvidia-l4t-core_36.1.0-20231206095146_arm64.deb \
+ && curl -S -# -L -o $tmp_dir/l4t_cuda.deb \
+ https://repo.download.nvidia.com/jetson/t234/pool/main/n/nvidia-l4t-cuda/nvidia-l4t-cuda_36.1.0-20231206095146_arm64.deb \
+ && curl -S -# -L -o $tmp_dir/l4t_dla.deb \
+ https://repo.download.nvidia.com/jetson/common/pool/main/n/nvidia-l4t-dla-compiler/nvidia-l4t-dla-compiler_36.1.0-20231206095146_arm64.deb \
+ && dpkg -x $tmp_dir/l4t_core.deb / \
+ && dpkg -x $tmp_dir/l4t_cuda.deb / \
+ && dpkg -x $tmp_dir/l4t_dla.deb /; \
+ fi
diff --git a/FAQ.md b/FAQ.md
new file mode 100644
index 00000000..00f7ef0e
--- /dev/null
+++ b/FAQ.md
@@ -0,0 +1,74 @@
+## Troubleshooting the SDK
+
+### X11: Failed to open display :0 [...] Failed to initialize GLFW
+
+Enable permissions to your X server from Docker, either:
+
+- Passing `-u $(id -u):$(id -g)` to `docker run`, or
+- Running `xhost +local:docker` on your host
+
+### GLX: Failed to create context: GLXBadFBConfig
+
+You may encounter the error message if the Holoscan Application runs on a Virtual Machine (by a Cloud Service Provider) or without a physical display attached. If you want to run applications that use GPU on x11 (e.g., VNC or NoMachine), the following environment variables need to be set before executing the application to offload the rendering to GPU.
+
+```sh
+export __NV_PRIME_RENDER_OFFLOAD=1
+export __GLX_VENDOR_LIBRARY_NAME=nvidia
+```
+
+### `GXF_ENTITY_COMPONENT_NOT_FOUND` or `GXF_ENTITY_NOT_FOUND`
+
+Ensure all your application connections in the yaml file (`nvidia::gxf::Connection`) refer to entities or components defined within. This can occur when attempting to remove a component and not cleaning up the stale connections.
+
+### No receiver connected to transmitter of of entity . The entity will never tick
+
+Ensure your entity or component is not an orphan, but is connected to a `nvidia::gxf::Connection`.
+
+### AJA device errors
+
+These errors indicate that you don't have AJA support in your environment.
+
+```sh
+2022-06-09 18:45:13.826 ERROR gxf_extensions/aja/aja_source.cpp@80: Device 0 not found.
+2022-06-09 18:45:13.826 ERROR gxf_extensions/aja/aja_source.cpp@251: Failed to open device 0
+```
+
+Double check that you have installed the AJA ntv2 driver, loaded the driver after every reboot, and that you have specified `--device /dev/ajantv20:/dev/ajantv20` in the `docker run` command if you’re running a docker container.
+
+### GXF format converter errors
+
+These errors may indicate that you need to reconfigure your format converter's num_block number.
+
+```sh
+2022-06-09 18:47:30.769 ERROR gxf_extensions/format_converter/format_converter.cpp@521: Failed to allocate memory for the channel conversion
+2022-06-09 18:47:30.769 ERROR gxf_extensions/format_converter/format_converter.cpp@359: Failed to convert tensor format (conversion type:6)
+```
+
+Try increasing the current num_block number by 1 in the yaml file for all format converter entities. This may happen if your yaml file was configured for running with RDMA and you have decided to disable RDMA.
+
+### Video device error
+
+Some of those errors may occur when running the V4L2 codelet:
+
+```
+Failed to open device, OPEN: No such file or directory
+```
+
+Ensure you have a video device connected (ex: USB webcam) and listed when running `ls -l /dev/video*`.
+
+```
+Failed to open device, OPEN: Permission denied
+```
+
+This means the `/dev/video*` device is not available to the user from within docker. Give `--group-add video` to the `docker run` command.
+
+### HolovizOp fails on hybrid GPU systems with non-NVIDIA integrated GPU and NVIDIA discrete GPU
+
+You may encounter an error when trying to run the Holoviz operator on a laptop equipped with an integrated and a discrete GPU. By default these systems will be using the integrated GPU when running an application. The integrated GPU does not provide the capabilities the Holoviz operator needs and the operator will fail.
+
+The following environment variables need to be set before executing the application to offload the rendering to the discrete GPU. See [PRIME Render Offload](https://download.nvidia.com/XFree86/Linux-x86_64/535.54.03/README/primerenderoffload.html) for more information.
+
+```sh
+export __NV_PRIME_RENDER_OFFLOAD=1
+export __GLX_VENDOR_LIBRARY_NAME=nvidia
+```
\ No newline at end of file
diff --git a/NOTICE.txt b/NOTICE.txt
index c17f2040..7d02d28c 100644
--- a/NOTICE.txt
+++ b/NOTICE.txt
@@ -30,6 +30,14 @@ Licensed under MIT (https://github.com/ocornut/imgui/blob/master/LICENSE.txt)
DLPack (https://github.com/dmlc/dlpack)
Licensed under Apache-2.0 (https://github.com/dmlc/dlpack/blob/v0.7/LICENSE)
+docker-buildx-plugin (https://github.com/docker/buildx)
+Copyright 2013-2017 Docker, Inc.
+Licensed under Apache-2.0 (https://github.com/docker/buildx/blob/master/LICENSE)
+
+docker-ce-cli (https://github.com/docker/cli/)
+Copyright 2013-2017 Docker, Inc.
+Licensed under Apache-2.0 (https://github.com/docker/cli/blob/master/LICENSE)
+
expected (https://github.com/TartanLlama/expected)
Licensed under CC0-1.0 (https://github.com/TartanLlama/expected/blob/v1.1.0/COPYING)
@@ -96,6 +104,10 @@ Copyright (c) 1991-2020 Guido Vollbeding. All Rights Reserved.
Copyright (c) 2010 Nokia Corporation
Licensed under JPEG (https://github.com/libjpeg-turbo/libjpeg-turbo/blob/main/LICENSE.md)
+libnuma (https://github.com/numactl/numactl)
+Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+Licensed under LGPLv2.1 (https://github.com/numactl/numactl/blob/master/LICENSE.LGPL2.1)
+
libpng (https://packages.ubuntu.com/jammy/libpng-dev)
Copyright (c) 1995-2023 The PNG Reference Library Authors.
Copyright (c) 2018-2023 Cosmin Truta.
@@ -132,6 +144,10 @@ libvulkan1 (https://packages.ubuntu.com/jammy/libvulkan1)
2015-2016 LunarG, Inc
Licensed under Apache-2.0 (http://changelogs.ubuntu.com/changelogs/pool/main/v/vulkan-loader/vulkan-loader_1.3.204.1-2/copyright)
+magic_enum (https://github.com/Neargye/magic_enum)
+Copyright (c) 2019 - 2023 Daniil Goncharov
+Licensed under MIT (https://github.com/Neargye/magic_enum/blob/v0.9.3/LICENSE)
+
Intel® oneAPI Math Kernel Library (https://pypi.org/project/mkl/2021.1.1/)
Copyright (c) Intel Corporation
Licensed under ISSL (https://www.intel.com/content/www/us/en/developer/articles/license/end-user-license-agreement.html#intel-simplified-software-license)
diff --git a/README.md b/README.md
index 5ed4e787..32136618 100644
--- a/README.md
+++ b/README.md
@@ -2,302 +2,55 @@
The **Holoscan SDK** is part of [NVIDIA Holoscan](https://developer.nvidia.com/holoscan-sdk), the AI sensor processing platform that combines hardware systems for low-latency sensor and network connectivity, optimized libraries for data processing and AI, and core microservices to run streaming, imaging, and other applications, from embedded to edge to cloud. It can be used to build streaming AI pipelines for a variety of domains, including Medical Devices, High Performance Computing at the Edge, Industrial Inspection and more.
-> In previous releases, the prefix [`Clara`](https://developer.nvidia.com/industries/healthcare) was used to define Holoscan as a platform designed initially for [medical devices](https://www.nvidia.com/en-us/clara/developer-kits/). As Holoscan has grown, its potential to serve other areas has become apparent. With version 0.4.0, we're proud to announce that the Holoscan SDK is now officially built to be domain-agnostic and can be used to build sensor AI applications in multiple domains. Note that some of the content of the SDK (sample applications) or the documentation might still appear to be healthcare-specific pending additional updates. Going forward, domain specific content will be hosted on the [HoloHub](https://nvidia-holoscan.github.io/holohub) repository.
-
## Table of Contents
- [Getting Started](#getting-started)
-- [Building the SDK from source](#building-the-sdk-from-source)
- - [Prerequisites](#prerequisites)
- - [(Recommended) using the `run` script](#recommended-using-the-run-script)
- - [Cross-compilation](#cross-compilation)
- - [(Advanced) Docker + CMake](#advanced-docker--cmake)
- - [(Advanced) Local environment + CMake](#advanced-local-environment--cmake)
-- [Runtime Container](#runtime-container)
-- [Utilities](#utilities)
- - [Testing](#testing)
- - [Linting](#linting)
- - [VSCode](#vscode)
-- [Troubleshooting](#troubleshooting)
-- [Repository structure](#repository-structure)
+- [Obtaining the Holoscan SDK](#obtaining-the-holoscan-sdk)
+- [Troubleshooting and Feedback](#troubleshooting-and-feedback)
+- [Additional Notes](#additional-notes)
## Getting Started
Visit the Holoscan User Guide to get started with the Holoscan SDK:
-## Building the SDK from source
-
-> **⚠️ Disclaimer**: we only recommend building the SDK from source if you are a developer of the SDK, or need to build the SDK with debug symbols or other options not used as part of the published packages. If you want to write your own operator or application, you can use the SDK as a dependency (and contribute to [HoloHub](https://github.com/nvidia-holoscan/holohub)). If you need to make other modifications to the SDK, [file a feature or bug request](https://forums.developer.nvidia.com/c/healthcare/holoscan-sdk/320/all). If that's not the case, prefer installing the SDK from [published packages](https://docs.nvidia.com/holoscan/sdk-user-guide/sdk_installation.html#install-the-sdk).
-
-### Prerequisites
-
-- Prerequisites for each supported platform are documented in [the user guide](https://docs.nvidia.com/holoscan/sdk-user-guide/sdk_installation.html#prerequisites).
-- To build and run the SDK in a containerized environment (recommended) you'll need:
- - the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html) v1.12.2+
- - [Docker](https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository), including the buildx plugin (`docker-buildx-plugin`)
-
-### (Recommended) Using the `run` script
-
-Call **`./run build`** within the repository to build the build container and the CMake project.
-
-- *If you encounter errors during the CMake build, you can execute `./run clear_cache` to remove cache/build/install folders*
-- *Execute `./run build --help` for more information*
-- *Execute `./run build --dryrun` to see the commands that will be executed*
-- *That command can be broken-up in more granular commands also:*
-
- ```sh
- ./run check_system_deps # ensure the system is properly configured for building
- ./run build_image # create the build Docker container
- ./run build # run the CMake configuration, build, and install steps
- ```
-
-Call the **`./run launch`** command to start and enter the build container.
-
-- *You can run from the `install` or `build` tree by passing the working directory as an argument (ex: `./run launch install`)*
-- *Execute `./run launch --help` for more information*
-- *Execute `./run launch --dryrun` to see the commands that will be executed*
-- *Execute `./run launch --run-cmd "..."` to execute a bash command directly in the container*
-
-Run the [**examples**](./examples#readme) inside the container by running their respective commands listed within each directory README file.
-
-### Cross-compilation
-
-While the Dockerfile to build the SDK does not currently support true cross-compilation, you can compile the Holoscan SDK for the developer kits (arm64) from a x86_64 host using an emulation environment.
-
-1. [Install qemu](https://github.com/multiarch/qemu-user-static)
-2. Clear your build cache: `./run clear_cache`
-3. Rebuild for `linux/arm64` using `--arch|-a` or `HOLOSCAN_BUILD_ARCH`:
- - `./run build --arch arm64`
- - `HOLOSCAN_BUILD_ARCH=arm64 ./run build`
-
-You can then copy the `install` folder generated by CMake to a developer kit with a configured environment or within a container to use for running and developing applications.
-
-### (Advanced) Docker + CMake
-
-The [`run`](./run) script mentioned above is helpful to understand how Docker and CMake are configured and run, as commands will be printed when running it or using `--dryrun`.
-We recommend looking at those commands if you want to use Docker and CMake manually, and reading the comments inside the script for details about each parameter (specifically the `build()` and `launch()` methods).
-
-### (Advanced) Local environment + CMake
-
-> **⚠️ Disclaimer**: this method of building the SDK is not actively tested or maintained. Instructions below might go out of date.
-
-#### Dependencies
-
-To build the Holoscan SDK on a local environment, the following versions of dev dependencies are needed (or tested). The last column refers to the stage (`FROM`) in the [Dockerfile](./Dockerfile) where respective commands can be found to build/install these dependencies.
-
-| Dependency | Min version | Needed by | Dockerfile stage |
-|---|---|---|---|
-| CUDA | 12.2 | Core SDK | base |
-| gRPC | 1.54.2 | Core SDK | grpc-builder |
-| UCX | 1.15.0 | Core SDK | ucx-builder |
-| GXF | 3.1 | Core SDK | gxf-downloader |
-| MOFED | 23.07 | ConnectX | mofed-installer |
-| TensorRT | 8.6.1 | Inference operator | base |
-| ONNX Runtime | 1.15.1 | Inference operator | onnxruntime-downloader |
-| LibTorch | 2.1.0 | Inference operator
(torch plugin) | torch-downloader-[x86_64\|arm64] |
-| TorchVision | 0.16.0 | Inference operator
(torch plugin) | torchvision-downloader-[x86_64\|arm64] |
-| Vulkan SDK | 1.3.216 | Holoviz operator | vulkansdk-builder |
-| Vulkan loader and
validation layers | 1.3.204 | Holoviz operator | dev |
-| spirv-tools | 2022.1 | Holoviz operator | dev |
-| V4L2 | 1.22.1 | V4L2 operator | dev |
-| CMake | 3.24.0 | Build process | build-tools |
-| Patchelf | N/A | Build process | build-tools |
-
-Note: refer to the [Dockerfile](./Dockerfile) for other dependencies which are not needed to build, but might be needed for:
-
-- runtime (openblas/mkl for torch, egl for headless rendering, cloudpickle for distributed python apps, cupy for some examples...)
-- testing (valgrind, pytest, xvfb...)
-- utilities (v4l-utils, ...)
-
-For CMake to find these dependencies, install them in default system paths, or pass `CMAKE_PREFIX_PATH`, `CMAKE_LIBRARY_PATH`, and/or `CMAKE_INCLUDE_PATH` during configuration.
-
-#### Build example
-
-```sh
-# Configure
-cmake -S $source_dir -B $build_dir \
- -G Ninja \
- -D CMAKE_BUILD_TYPE=Release \
- -D CUDAToolkit_ROOT:PATH="/usr/local/cuda"
-
-# Build
-cmake --build $build_dir -j
-
-# Install
-cmake --install $build_dir --prefix $install_dir
-```
-
-The commands to run the [**examples**](./examples#readme) are then the same as in the dockerized environment, and can be found in the respective source directory READMEs.
-
-## Runtime Container
-
-There are multiple containers associated with Holoscan:
-
-- The **build** container generated by the [top-level Dockerfile](./Dockerfile) is designed to pull dependencies to build and test the SDK itself. The image does not contain the SDK itself, as it is mounted with during `docker run` to run the cmake build or run tests.
-- The **development** container available at [NGC | Holoscan Container](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/clara-holoscan/containers/holoscan/tags) which includes all the development tools and libraries needed to *build* Holoscan applications.
- - This image is ~13 GB when uncompressed. However, once a Holoscan application is created, it does not need all those same development tools just to *run* an application.
-- To address this, a **runtime** container can now be generated with the [runtime_docker/Dockerfile](./runtime_docker/Dockerfile) which contains only the runtime dependencies of the Holoscan SDK.
- - This Dockerfile is based on the [CUDA-base](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/cuda) image, which begins with Ubuntu:22.04 and installs the CUDA runtime and Compat package.
- - This image is ~8.7 GB on x86_64, and can be further reduced based on use cases (see below).
-
-> ⚠️ Disclaimer: Currently iGPU is not supported by the runtime container
-
-### Generate the runtime container
-
-The [`run`](./run) script contains the command `build_run_image` to build the runtime Holoscan SDK image:
-
-```bash
-./run build_run_image
-```
-
-Once this image is built, it can be run exactly as the Holoscan development container on NGC is. Simply follow the 'Running the container' instructions beginning at step #3 at [NGC | Holoscan Container](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/clara-holoscan/containers/holoscan), but replace `${NGC_CONTAINER_IMAGE_PATH}` by `holoscan-sdk-run-[-]` in step #4 (name outputted at the end of the above command).
-
-### Further Reducing Runtime Size
-
-If you have a specific application you wish to deploy, you can further reduce this runtime image size in two ways:
-
-1. **Targeting different stages of the [runtime Dockerfile](./runtime_docker/Dockerfile)**.
- 1. add `--cpp` to the command above to not pull in python dependencies.
- 2. add `--cpp-no-mkl` to the command above to not pull in MKL (x86_64-only libtorch dependency) in addition to the above.
-
-2. **Modifying the Dockerfile**
-
-The [runtime Dockerfile](./runtime_docker/Dockerfile) is thoroughly documented to indicate which dependency is used by which component of the Holoscan SDK. If you do not use some of these components (ex: Torch inference backend, ONNX Runtime inference backend, TensorRT inference backend, Python/Cupy, format_converter operator, etc...), comment out the appropriate line in the Dockerfile and run the build command above.
-
-## Utilities
-
-Some utilities are available in the [`scripts`](./scripts) folder, others closer to the built process are listed below:
-
-### Testing
-
-Existing tests are using GTest for C++ and pytest for Python, and can be found under [tests](./tests/) and [python/tests](./python/tests/) respectively. The Holoscan SDK uses CTest as a framework to build and execute these tests.
-
-Run the tests using the following command:
-
-```sh
-./run test
-```
-
-> Note: Run `run test --help` to see additional options.
-
-### Linting
-
-Run the following command to run various linting tools on the repository:
-
-```sh
-./run lint # optional: specify directories
-```
-
-> Note: Run `run lint --help` to see the list of tools that are used. If a lint command fails due to a missing module or executable on your system, you can install it using `python3 -m pip install `.
-
-### Building the User Guide
-
-The source of the user guide hosted at is located in [docs](./docs/). It can be built with the following commands:
-
-- PDF: `./run build_pdf`
-- HTML: `./run build_html` (auto-reload: `./run live_html`)
-
-Run `./run help` for more commands related to the user guide documentation.
-
-### VSCode
-
-Visual Studio Code can be utilized to develop the Holoscan SDK. The `.devcontainer` folder holds the configuration for setting up a [development container](https://code.visualstudio.com/docs/remote/containers) with all necessary tools and libraries installed.
-
-The `./run` script contains `vscode` and `vscode_remote` commands for launching Visual Studio Code in a container or from a remote machine, respectively.
-
-- To launch Visual Studio Code in a dev container, use `./run vscode`.
-- To attach to an existing dev container from a remote machine, use `./run vscode_remote`. For more information, refer to the instructions from `./run vscode_remote -h`.
-
-Once Visual Studio Code is launched, the development container will be built and the recommended extensions will be installed automatically, along with CMake being configured.
-
-#### Configuring CMake in the Development Container
-
-For manual configuration of CMake, open the command palette (`Ctrl + Shift + P`) and run the `CMake: Configure` command.
-
-#### Building the Source Code in the Development Container
-
-The source code in the development container can be built by either pressing `Ctrl + Shift + B` or executing `Tasks: Run Build Task` from the command palette (`Ctrl + Shift + P`).
-
-#### Debugging the Source Code in the Development Container
-
-To debug the source code in the development container, open the `Run and Debug` view (`Ctrl + Shift + D`), select a debug configuration from the dropdown list, and press `F5` to initiate debugging.
-
-## Troubleshooting
-
-### X11: Failed to open display :0 [...] Failed to initialize GLFW
-
-Enable permissions to your X server from Docker, either:
-
-- Passing `-u $(id -u):$(id -g)` to `docker run`, or
-- Running `xhost +local:docker` on your host
-
-### GLX: Failed to create context: GLXBadFBConfig
-
-You may encounter the error message if the Holoscan Application runs on a Virtual Machine (by a Cloud Service Provider) or without a physical display attached. If you want to run applications that use GPU on x11 (e.g., VNC or NoMachine), the following environment variables need to be set before executing the application to offload the rendering to GPU.
-
-```sh
-export __NV_PRIME_RENDER_OFFLOAD=1
-export __GLX_VENDOR_LIBRARY_NAME=nvidia
-```
-
-### `GXF_ENTITY_COMPONENT_NOT_FOUND` or `GXF_ENTITY_NOT_FOUND`
-
-Ensure all your application connections in the yaml file (`nvidia::gxf::Connection`) refer to entities or components defined within. This can occur when attempting to remove a component and not cleaning up the stale connections.
-
-### No receiver connected to transmitter of of entity . The entity will never tick
-
-Ensure your entity or component is not an orphan, but is connected to a `nvidia::gxf::Connection`.
-
-### AJA device errors
-
-These errors indicate that you don't have AJA support in your environment.
-
-```sh
-2022-06-09 18:45:13.826 ERROR gxf_extensions/aja/aja_source.cpp@80: Device 0 not found.
-2022-06-09 18:45:13.826 ERROR gxf_extensions/aja/aja_source.cpp@251: Failed to open device 0
-```
-
-Double check that you have installed the AJA ntv2 driver, loaded the driver after every reboot, and that you have specified `--device /dev/ajantv20:/dev/ajantv20` in the `docker run` command if you’re running a docker container.
-
-### GXF format converter errors
-
-These errors may indicate that you need to reconfigure your format converter's num_block number.
+The Holoscan User Guide includes:
+- An introduction to the NVIDIA Holoscan platform, including the Holoscan C++/Python SDK;
+- Requirements and setup steps;
+- Detailed SDK documentation, including a developer introduction, examples, and API details.
-```sh
-2022-06-09 18:47:30.769 ERROR gxf_extensions/format_converter/format_converter.cpp@521: Failed to allocate memory for the channel conversion
-2022-06-09 18:47:30.769 ERROR gxf_extensions/format_converter/format_converter.cpp@359: Failed to convert tensor format (conversion type:6)
-```
+We also recommend visiting [NVIDIA HoloHub](https://nvidia-holoscan.github.io/holohub/) to view
+community projects and reusable components available for your Holoscan project.
-Try increasing the current num_block number by 1 in the yaml file for all format converter entities. This may happen if your yaml file was configured for running with RDMA and you have decided to disable RDMA.
+## Obtaining the Holoscan SDK
-### Video device error
+The Holoscan User Guide documents several options to install and run the Holoscan SDK:
-Some of those errors may occur when running the V4L2 codelet:
+- As an [NGC Container 🐋](https://docs.nvidia.com/holoscan/sdk-user-guide/sdk_installation.html#sd-tab-item-2)
+- As a [Debian Package 📦️](https://docs.nvidia.com/holoscan/sdk-user-guide/sdk_installation.html#sd-tab-item-3)
+- As a [Python Wheel 🐍](https://docs.nvidia.com/holoscan/sdk-user-guide/sdk_installation.html#sd-tab-item-4)
-```
-Failed to open device, OPEN: No such file or directory
-```
+Visit the [Holoscan User Guide](https://docs.nvidia.com/holoscan/sdk-user-guide/sdk_installation.html#not-sure-what-to-choose) for
+guidance to help choose which installation option may be right for your use case.
-Ensure you have a video device connected (ex: USB webcam) and listed when running `ls -l /dev/video*`.
+If the options above do not support your use case, you may prefer to [build the SDK from source](./DEVELOP.md).
-```
-Failed to open device, OPEN: Permission denied
-```
+Please review [Holoscan SDK prerequisites](https://docs.nvidia.com/holoscan/sdk-user-guide/sdk_installation.html#prerequisites)
+before getting started.
-This means the `/dev/video*` device is not available to the user from within docker. Give `--group-add video` to the `docker run` command.
+## Troubleshooting and Feedback
-### HolovizOp fails on hybrid GPU systems with non-NVIDIA integrated GPU and NVIDIA discrete GPU
+We appreciate community discussion and feedback in support of Holoscan platform users and developers. We ask that users:
+- Review the [Holoscan SDK Frequently Asked Questions](FAQ.md) document for common solutions and workarounds.
+- Direct questions to the [NVIDIA Support Forum](https://forums.developer.nvidia.com/c/healthcare/holoscan-sdk/320/all).
+- Enter SDK issues on the [SDK GitHub Issues board](https://github.com/nvidia-holoscan/holoscan-sdk/issues).
-You may encounter an error when trying to run the Holoviz operator on a laptop equipped with an integrated and a discrete GPU. By default these systems will be using the integrated GPU when running an application. The integrated GPU does not provide the capabilities the Holoviz operator needs and the operator will fail.
+## Additional Notes
-The following environment variables need to be set before executing the application to offload the rendering to the discrete GPU. See [PRIME Render Offload](https://download.nvidia.com/XFree86/Linux-x86_64/535.54.03/README/primerenderoffload.html) for more information.
+### Relation to NVIDIA Clara
-```sh
-export __NV_PRIME_RENDER_OFFLOAD=1
-export __GLX_VENDOR_LIBRARY_NAME=nvidia
-```
+In previous releases, the prefix [`Clara`](https://developer.nvidia.com/industries/healthcare) was used to define Holoscan as a platform designed initially for [medical devices](https://www.nvidia.com/en-us/clara/developer-kits/). As Holoscan has grown, its potential to serve other areas has become apparent. With version 0.4.0, we're proud to announce that the Holoscan SDK is now officially built to be domain-agnostic and can be used to build sensor AI applications in multiple domains. Note that some of the content of the SDK (sample applications) or the documentation might still appear to be healthcare-specific pending additional updates. Going forward, domain specific content will be hosted on the [HoloHub](https://nvidia-holoscan.github.io/holohub) repository.
-## Repository structure
+### Repository structure
The repository is organized as such:
diff --git a/VERSION b/VERSION
index 21e8796a..359a5b95 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-1.0.3
+2.0.0
\ No newline at end of file
diff --git a/cmake/deps/glfw_rapids.cmake b/cmake/deps/glfw_rapids.cmake
index 0051d3fc..ab2861a4 100644
--- a/cmake/deps/glfw_rapids.cmake
+++ b/cmake/deps/glfw_rapids.cmake
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -55,6 +55,7 @@ rapids_cpm_find(GLFW 3.3.7
GITHUB_REPOSITORY glfw/glfw
GIT_TAG 3.3.7
OPTIONS
+ "BUILD_SHARED_LIBS OFF"
"CXXOPTS_BUILD_EXAMPLES OFF"
"CXXOPTS_BUILD_TESTS OFF"
"GLFW_BUILD_TESTS OFF"
@@ -63,10 +64,3 @@ rapids_cpm_find(GLFW 3.3.7
"GLFW_INSTALL OFF"
EXCLUDE_FROM_ALL
)
-
-if(GLFW_ADDED)
- install(TARGETS glfw
- DESTINATION "${HOLOSCAN_INSTALL_LIB_DIR}"
- COMPONENT "holoscan-dependencies"
- )
-endif()
diff --git a/cmake/deps/gxf.cmake b/cmake/deps/gxf.cmake
index 9ab649f4..33b9a7ca 100644
--- a/cmake/deps/gxf.cmake
+++ b/cmake/deps/gxf.cmake
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -13,13 +13,106 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-find_package(GXF 2.4 MODULE REQUIRED
- COMPONENTS
+set(HOLOSCAN_GXF_COMPONENTS
+ # For Holoscan to use and distribute
+ app
core
cuda
gxe
+ logger
multimedia
+ sample # dependency of GXF::app
serialization
std
ucx
)
+
+find_package(GXF 4.0 CONFIG REQUIRED
+ COMPONENTS ${HOLOSCAN_GXF_COMPONENTS}
+)
+message(STATUS "Found GXF: ${GXF_DIR}")
+
+# Workaround: If the GXF distribution implicitly includes an HTTP target dependency
+# for other libraries, add it to the list of imports.
+# https://jirasw.nvidia.com/browse/NVG-3245
+if(TARGET GXF::http)
+ list(APPEND HOLOSCAN_GXF_COMPONENTS http)
+endif()
+
+# Copy shared libraries and their headers to the GXF build folder
+# to be found alongside Holoscan GXF extensions.
+
+if(NOT HOLOSCAN_INSTALL_LIB_DIR)
+ if(DEFINED HOLOSCAN_SDK_PATH)
+ # Find library directory from HOLOSCAN_SDK_PATH
+ find_path(HOLOSCAN_INSTALL_LIB_DIR
+ NAMES libholoscan.so
+ PATHS ${HOLOSCAN_SDK_PATH}/lib ${HOLOSCAN_SDK_PATH}/lib64
+ NO_DEFAULT_PATH
+ REQUIRED
+ )
+
+ # Take only file name from path
+ get_filename_component(HOLOSCAN_INSTALL_LIB_DIR "${HOLOSCAN_INSTALL_LIB_DIR}" NAME)
+ else()
+ message(FATAL_ERROR "Unable to guess HOLOSCAN_INSTALL_LIB_DIR from HOLOSCAN_SDK_PATH")
+ endif()
+endif()
+
+set(HOLOSCAN_GXF_LIB_DIR "${CMAKE_BINARY_DIR}/${HOLOSCAN_INSTALL_LIB_DIR}")
+set(HOLOSCAN_GXF_BIN_DIR "${CMAKE_BINARY_DIR}/bin")
+foreach(component ${HOLOSCAN_GXF_COMPONENTS})
+ # Copy the GXF library to the build folder so that executables can find shared libraries
+ get_target_property(GXF_${component}_LOCATION GXF::${component} IMPORTED_LOCATION)
+ if(NOT GXF_${component}_LOCATION)
+ string(TOUPPER "${CMAKE_BUILD_TYPE}" _build_type)
+ get_target_property(GXF_${component}_LOCATION GXF::${component} IMPORTED_LOCATION_${_build_type})
+ endif()
+ if(GXF_${component}_LOCATION)
+ if(NOT "${component}" STREQUAL "gxe")
+ file(COPY "${GXF_${component}_LOCATION}"
+ DESTINATION "${HOLOSCAN_GXF_LIB_DIR}"
+ FILE_PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ
+ )
+ get_filename_component(${component}_filename ${GXF_${component}_LOCATION} NAME)
+ set(HOLOSCAN_GXF_${component}_LOCATION "${HOLOSCAN_GXF_LIB_DIR}/${${component}_filename}")
+ set_target_properties(GXF::${component} PROPERTIES
+ IMPORTED_LOCATION_${_build_type} ${HOLOSCAN_GXF_${component}_LOCATION}
+ IMPORTED_LOCATION ${HOLOSCAN_GXF_${component}_LOCATION}
+ )
+ else()
+ file(COPY "${GXF_${component}_LOCATION}"
+ DESTINATION "${HOLOSCAN_GXF_BIN_DIR}"
+ FILE_PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE
+ )
+ set(HOLOSCAN_GXE_LOCATION "${HOLOSCAN_GXF_BIN_DIR}/gxe")
+ set_target_properties(GXF::gxe PROPERTIES
+ IMPORTED_LOCATION_${_build_type} {HOLOSCAN_GXE_LOCATION}
+ IMPORTED_LOCATION ${HOLOSCAN_GXE_LOCATION}
+ )
+
+ # Patch `gxe` executable RUNPATH to find required GXF libraries in the self-contained HSDK installation.
+ # GXF 4.0 libraries are entirely self-contained and do not require RPATH updates.
+ find_program(PATCHELF_EXECUTABLE patchelf)
+ if(PATCHELF_EXECUTABLE)
+ execute_process(
+ COMMAND "${PATCHELF_EXECUTABLE}"
+ "--set-rpath"
+ "\$ORIGIN:\$ORIGIN/../${HOLOSCAN_INSTALL_LIB_DIR}"
+ "${HOLOSCAN_GXE_LOCATION}"
+ )
+ else()
+ message(WARNING "Failed to patch the GXE executable RUNPATH. Must set LD_LIBRARY_PATH to use the executable.")
+ endif()
+ endif()
+ else()
+ message(FATAL_ERROR "No imported location found for GXF::${component}")
+ endif()
+endforeach()
+
+# Set variables in parent scope for use throughout the Holoscan project
+set(GXF_INCLUDE_DIR ${GXF_INCLUDE_DIR} PARENT_SCOPE)
+set(HOLOSCAN_GXF_LIB_DIR ${HOLOSCAN_GXF_LIB_DIR} PARENT_SCOPE)
+set(HOLOSCAN_GXF_BIN_DIR ${HOLOSCAN_GXF_BIN_DIR} PARENT_SCOPE)
+set(HOLOSCAN_GXE_LOCATION ${HOLOSCAN_GXE_LOCATION} PARENT_SCOPE)
+set(HOLOSCAN_GXF_COMPONENTS ${HOLOSCAN_GXF_COMPONENTS} PARENT_SCOPE)
diff --git a/cmake/deps/magic_enum.cmake b/cmake/deps/magic_enum.cmake
new file mode 100644
index 00000000..3f8203b7
--- /dev/null
+++ b/cmake/deps/magic_enum.cmake
@@ -0,0 +1,43 @@
+# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# https://docs.rapids.ai/api/rapids-cmake/stable/command/rapids_cpm_find.html
+include(${rapids-cmake-dir}/cpm/find.cmake)
+
+# GXF 4.0 added a dependency on magic_enum
+
+rapids_cpm_find(magic_enum 0.9.3
+ GLOBAL_TARGETS magic_enum
+ BUILD_EXPORT_SET ${HOLOSCAN_PACKAGE_NAME}-exports
+ CPM_ARGS
+
+ GITHUB_REPOSITORY Neargye/magic_enum
+ GIT_TAG v0.9.3
+ GIT_SHALLOW TRUE
+
+ EXCLUDE_FROM_ALL
+)
+
+# Set 'magic_enum_SOURCE_DIR' with PARENT_SCOPE so that
+# root project can use it to include headers
+set(magic_enum_SOURCE_DIR ${magic_enum_SOURCE_DIR} PARENT_SCOPE)
+
+if(magic_enum_ADDED)
+ # Install the headers needed for development with the SDK
+ install(FILES ${magic_enum_SOURCE_DIR}/include/magic_enum.hpp
+ DESTINATION "include"
+ COMPONENT "holoscan-dependencies"
+ )
+endif()
diff --git a/cmake/deps/ucx.cmake b/cmake/deps/ucx.cmake
index 21bd3244..20fa4f2f 100644
--- a/cmake/deps/ucx.cmake
+++ b/cmake/deps/ucx.cmake
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,8 +15,17 @@
find_package(ucx 1.14.0 REQUIRED)
-install(DIRECTORY ${UCX_LIBRARIES}
+install(
+ DIRECTORY ${UCX_LIBRARIES}
DESTINATION "${HOLOSCAN_INSTALL_LIB_DIR}/.."
COMPONENT "holoscan-dependencies"
FILES_MATCHING PATTERN "*.so*"
)
+
+foreach(ucx_target ucm ucp ucs uct)
+ install(
+ DIRECTORY ${UCX_INCLUDE_DIRS}/${ucx_target}
+ DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/3rdparty/ucx"
+ COMPONENT "holoscan-dependencies"
+ )
+endforeach()
diff --git a/cmake/modules/FindGXF.cmake b/cmake/modules/FindGXF.cmake
deleted file mode 100644
index f07c391b..00000000
--- a/cmake/modules/FindGXF.cmake
+++ /dev/null
@@ -1,273 +0,0 @@
-# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-# SPDX-License-Identifier: Apache-2.0
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Create GXF imported cmake targets
-#
-# This module defines GXF_FOUND if all GXF libraries are found or
-# if the required libraries (COMPONENTS property in find_package)
-# are found.
-#
-# A new imported target is created for each component (library)
-# under the GXF namespace (GXF::${component_name})
-#
-# Note: this leverages the find-module paradigm [1]. The config-file paradigm [2]
-# is recommended instead in CMake.
-# [1] https://cmake.org/cmake/help/latest/manual/cmake-packages.7.html#config-file-packages
-# [2] https://cmake.org/cmake/help/latest/manual/cmake-packages.7.html#find-module-packages
-
-# Define environment
-if(CMAKE_SYSTEM_PROCESSOR STREQUAL x86_64 OR CMAKE_SYSTEM_PROCESSOR STREQUAL amd64)
- set(_internal_GXF_recipe "gxf_x86_64")
- set(_public_GXF_recipe "x86_64")
-elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL aarch64 OR CMAKE_SYSTEM_PROCESSOR STREQUAL arm64)
- set(_internal_GXF_recipe "gxf_jetpack50")
- set(_public_GXF_recipe "arm64")
-else()
- message(FATAL_ERROR "CMAKE_SYSTEM_PROCESSOR=${CMAKE_SYSTEM_PROCESSOR} is not an architecture supported by GXF")
-endif()
-
-if(NOT HOLOSCAN_INSTALL_LIB_DIR)
- if(DEFINED HOLOSCAN_SDK_PATH)
- # Find library directory from HOLOSCAN_SDK_PATH
- find_path(HOLOSCAN_INSTALL_LIB_DIR
- NAMES libholoscan.so
- PATHS ${HOLOSCAN_SDK_PATH}/lib ${HOLOSCAN_SDK_PATH}/lib64
- NO_DEFAULT_PATH
- REQUIRED
- )
-
- # Take only file name from path
- get_filename_component(HOLOSCAN_INSTALL_LIB_DIR "${HOLOSCAN_INSTALL_LIB_DIR}" NAME)
- else()
- message(FATAL_ERROR "Unable to guess HOLOSCAN_INSTALL_LIB_DIR from HOLOSCAN_SDK_PATH")
- endif()
-endif()
-
-# Need PatchELF to update the RPATH of the libs
-find_program(PATCHELF_EXECUTABLE patchelf)
-if(NOT PATCHELF_EXECUTABLE)
- message(FATAL_ERROR "Please specify the PATCHELF executable")
-endif()
-
-# Library names
-list(APPEND _GXF_EXTENSIONS
- behavior_tree
- cuda
- multimedia
- network
- npp
- python_codelet
- sample
- serialization
- std
- stream
- ucx
-)
-
-# Common headers
-find_path(GXF_common_INCLUDE_DIR
- NAMES common/
- REQUIRED
-)
-mark_as_advanced(GXF_common_INCLUDE_DIR)
-list(APPEND GXF_INCLUDE_DIR_VARS GXF_common_INCLUDE_DIR)
-
-# Libraries and their headers
-list(APPEND _GXF_LIBRARIES ${_GXF_EXTENSIONS} core)
-
-foreach(component IN LISTS _GXF_LIBRARIES)
- # headers
- find_path(GXF_${component}_INCLUDE_DIR
- NAMES "gxf/${component}/"
- )
- mark_as_advanced(GXF_${component}_INCLUDE_DIR)
- list(APPEND GXF_INCLUDE_DIR_VARS GXF_${component}_INCLUDE_DIR)
-
- # library
- find_library(GXF_${component}_LIBRARY
- NAMES "gxf_${component}"
- PATH_SUFFIXES
- "${_internal_GXF_recipe}/${component}"
- "${_public_GXF_recipe}/${component}"
- )
- mark_as_advanced(GXF_${component}_LIBRARY)
- list(APPEND GXF_LIBRARY_VARS GXF_${component}_LIBRARY)
-
- # create imported target
- if(GXF_${component}_LIBRARY)
- if(NOT TARGET GXF::${component})
- # Assume SHARED, though technically UNKNOWN since we don't enforce .so
- add_library(GXF::${component} SHARED IMPORTED)
-
- endif()
-
- ##############################################################################
- # TODO: config/patching/install should not be in this file, only target import
-
- # Set the internal location to the binary directory
- get_filename_component(gxf_component_filename "${GXF_${component}_LIBRARY}" NAME)
- set(gxf_component_build_dir "${CMAKE_BINARY_DIR}/${HOLOSCAN_INSTALL_LIB_DIR}")
- set(gxf_component_build_path "${gxf_component_build_dir}/${gxf_component_filename}")
-
- # Copy the GXF library to the build folder
- # Needed for permissions to run patchelf for RUNPATH
- file(COPY "${GXF_${component}_LIBRARY}"
- DESTINATION "${gxf_component_build_dir}"
- FILE_PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ
- )
-
- # Patch RUNPATH
- list(APPEND _GXF_LIB_RPATH "\$ORIGIN" "\$ORIGIN/gxf_extensions")
- if(CMAKE_SYSTEM_PROCESSOR STREQUAL aarch64 OR CMAKE_SYSTEM_PROCESSOR STREQUAL arm64)
- # The video encoder/decoder libraries need an extra path for aarch64
- # To find the right l4t libraries
- if(component STREQUAL videoencoderio
- OR component STREQUAL videoencoder
- OR component STREQUAL videodecoderio
- OR component STREQUAL videodecoder)
- list(APPEND _GXF_LIB_RPATH "/usr/lib/aarch64-linux-gnu/tegra/")
- endif()
- endif()
- list(JOIN _GXF_LIB_RPATH ":" _GXF_LIB_RPATH)
- execute_process(COMMAND
- "${PATCHELF_EXECUTABLE}"
- "--set-rpath"
- "${_GXF_LIB_RPATH}"
- "${gxf_component_build_path}"
- )
- unset(_GXF_LIB_RPATH)
-
- # Install the GXF library
- # Use the build location since RUNPATH has changed
- install(FILES "${gxf_component_build_path}"
- DESTINATION "${HOLOSCAN_INSTALL_LIB_DIR}"
- COMPONENT "holoscan-gxf_libs"
- )
- ##############################################################################
-
- # Include dirs
- list(APPEND GXF_${component}_INCLUDE_DIRS ${GXF_common_INCLUDE_DIR})
- if(GXF_${component}_INCLUDE_DIR)
- list(APPEND GXF_${component}_INCLUDE_DIRS ${GXF_${component}_INCLUDE_DIR})
- endif()
-
- set_target_properties(GXF::${component} PROPERTIES
- IMPORTED_LOCATION "${gxf_component_build_path}"
-
- # Without this, make and ninja's behavior is different.
- # GXF's shared libraries doesn't seem to set soname.
- # (https://gitlab.kitware.com/cmake/cmake/-/issues/22307)
- IMPORTED_NO_SONAME ON
- INTERFACE_INCLUDE_DIRECTORIES "${GXF_${component}_INCLUDE_DIRS}"
- )
-
- set(GXF_${component}_FOUND TRUE)
- else()
- set(GXF_${component}_FOUND FALSE)
- endif()
-endforeach()
-
-unset(_GXF_EXTENSIONS)
-unset(_GXF_LIBRARIES)
-
-# Find version
-if(GXF_core_INCLUDE_DIR)
- # Note: "kGxfCoreVersion \"(.*)\"$" does not work with a simple string
- # REGEX (doesn't stop and EOL, neither $ nor \n), so we first extract
- # the line with file(STRINGS), then the version with string(REGEX)
- file(STRINGS "${GXF_core_INCLUDE_DIR}/gxf/core/gxf.h" _GXF_VERSION_LINE
- REGEX "kGxfCoreVersion"
- )
- string(REGEX MATCH "kGxfCoreVersion \"(.*)\"" _ ${_GXF_VERSION_LINE})
- set(GXF_VERSION ${CMAKE_MATCH_1})
- unset(_GXF_VERSION_LINE)
-endif()
-
-# GXE
-find_program(GXF_gxe_PATH
- NAMES gxe
- PATH_SUFFIXES
- "${_internal_GXF_recipe}/gxe"
- "${_public_GXF_recipe}/gxe"
-)
-
-if(GXF_gxe_PATH)
- if(NOT TARGET GXF::gxe)
- add_executable(GXF::gxe IMPORTED)
- endif()
-
- ##############################################################################
- # TODO: config/patching/install should not be in this file, only target import
-
- # Set the internal location to the binary directory
- # This is need for RPATH to work
- set(GXE_BUILD_DIR "${CMAKE_BINARY_DIR}/bin")
- set(GXE_BUILD_PATH "${GXE_BUILD_DIR}/gxe")
-
- # Copy gxe binary to the build folder
- # Needed for permissions to run patchelf for RUNPATH
- file(COPY "${GXF_gxe_PATH}"
- DESTINATION "${GXE_BUILD_DIR}"
- FILE_PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE
- )
-
- # Patch RUNPATH so that it can find libgxf_core.so library.
- execute_process(COMMAND
- "${PATCHELF_EXECUTABLE}"
- "--set-rpath"
- "\$ORIGIN:\$ORIGIN/../${HOLOSCAN_INSTALL_LIB_DIR}"
- "${GXE_BUILD_PATH}"
- )
-
- # Install GXE
- # Use the build location since RUNPATH has changed
- install(FILES "${GXE_BUILD_PATH}"
- DESTINATION "bin"
- PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE
- COMPONENT "holoscan-gxf_bins"
- )
- ##############################################################################
-
- set_target_properties(GXF::gxe PROPERTIES
- IMPORTED_LOCATION "${GXE_BUILD_PATH}"
- )
-
- set(GXF_gxe_FOUND TRUE)
-else()
- set(GXF_gxe_FOUND FALSE)
-endif()
-
-# Generate GXF_FOUND
-include(FindPackageHandleStandardArgs)
-
-if(GXF_FIND_COMPONENTS)
- # ... based on requested components/libraries
- find_package_handle_standard_args(GXF
- FOUND_VAR GXF_FOUND
- VERSION_VAR GXF_VERSION
- HANDLE_COMPONENTS # Looks for GXF_${component}_FOUND
- )
-else()
- # ... need all the libraries
- find_package_handle_standard_args(GXF
- FOUND_VAR GXF_FOUND
- VERSION_VAR GXF_VERSION
- REQUIRED_VARS ${GXF_INCLUDE_DIR_VARS} ${GXF_LIBRARY_VARS} GXF_gxe_PATH
- )
-endif()
-
-# Clean
-unset(_internal_GXF_recipe)
-unset(_public_GXF_recipe)
diff --git a/cmake/modules/GenerateGXEApp.cmake b/cmake/modules/GenerateGXEApp.cmake
index 1a62edbc..1c0e1328 100644
--- a/cmake/modules/GenerateGXEApp.cmake
+++ b/cmake/modules/GenerateGXEApp.cmake
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -32,6 +32,10 @@ function(_get_lib_file_path location target)
if(imported)
get_target_property(lib ${target} IMPORTED_LOCATION)
+ if(NOT lib)
+ string(TOUPPER "${CMAKE_BUILD_TYPE}" _BUILD_TYPE)
+ get_target_property(lib ${target} IMPORTED_LOCATION_${_BUILD_TYPE})
+ endif()
else()
set(lib $)
endif()
@@ -170,6 +174,10 @@ function(create_gxe_application)
COMPONENT "${GXE_APP_COMPONENT}"
)
+ # GXE apps are expected to be run from the top of the build/install directory
+ # to find `gxe_executable`.
+ file(RELATIVE_PATH gxe_executable ${CMAKE_BINARY_DIR} ${HOLOSCAN_GXE_LOCATION})
+
# Create bash script
set(GXE_APP_EXECUTABLE "${CMAKE_CURRENT_BINARY_DIR}/${GXE_APP_NAME}")
file(GENERATE
@@ -177,7 +185,7 @@ function(create_gxe_application)
CONTENT
"#!/usr/bin/env bash
export LD_LIBRARY_PATH=$(pwd):$(pwd)/${HOLOSCAN_INSTALL_LIB_DIR}:\${LD_LIBRARY_PATH}
-./bin/gxe --app ${GXE_APP_YAML_RELATIVE_PATH} --manifest ${GXE_APP_MANIFEST_RELATIVE_PATH} $@
+${gxe_executable} --app ${GXE_APP_YAML_RELATIVE_PATH} --manifest ${GXE_APP_MANIFEST_RELATIVE_PATH} $@
"
FILE_PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE
)
diff --git a/cmake/modules/cpack/NOTICE.txt b/cmake/modules/cpack/NOTICE.txt
index b3fba64c..38355b6b 100644
--- a/cmake/modules/cpack/NOTICE.txt
+++ b/cmake/modules/cpack/NOTICE.txt
@@ -69,6 +69,10 @@ jq (https://github.com/jqlang/jq)
Copyright (C) 2012 Stephen Dolan authors.
Licensed under MIT (https://github.com/jqlang/jq/raw/master/COPYING)
+magic_enum (https://github.com/Neargye/magic_enum)
+Copyright (c) 2019 - 2023 Daniil Goncharov
+Licensed under MIT (https://github.com/Neargye/magic_enum/blob/v0.9.3/LICENSE)
+
AJA NTV2 SDK (https://github.com/ibstewart/ntv2)
Copyright (c) 2021 AJA Video Systems
Licensed under MIT (https://github.com/ibstewart/ntv2/blob/holoscan-v0.2.0/LICENSE)
diff --git a/cmake/setup_dependencies.cmake b/cmake/setup_dependencies.cmake
index d3658c51..7ccb6131 100644
--- a/cmake/setup_dependencies.cmake
+++ b/cmake/setup_dependencies.cmake
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -39,14 +39,15 @@ superbuild_depend(expected_rapids)
superbuild_depend(fmt_rapids)
superbuild_depend(glfw_rapids)
superbuild_depend(grpc)
-superbuild_depend(gxf)
superbuild_depend(hwloc)
+superbuild_depend(magic_enum)
superbuild_depend(spdlog_rapids)
superbuild_depend(tensorrt)
superbuild_depend(threads)
superbuild_depend(ucx)
superbuild_depend(v4l2)
superbuild_depend(yaml-cpp_rapids)
+superbuild_depend(gxf)
# Testing dependencies
if(HOLOSCAN_BUILD_TESTS)
diff --git a/docs/Dockerfile b/docs/Dockerfile
index 55d68f34..da82c18c 100644
--- a/docs/Dockerfile
+++ b/docs/Dockerfile
@@ -28,7 +28,7 @@ ARG DEBIAN_FRONTEND=noninteractive
# Deadsnakes repo is added then package index files are updated
# software-properties-common - Needed to use `add-apt-repository`
# build-essential - Adds GNU/g++ compiler collection
-# curl - Used to download Doxygen and Node.js
+# curl - Used to download Doxygen
# python3-pip - Needed for pip installs
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
@@ -36,6 +36,7 @@ RUN apt-get update \
curl \
python3-pip \
gnupg \
+ graphviz \
&& rm -rf /var/lib/apt/lists/*
# Install up to date doxygen for better C++ parsing with a few cases like
@@ -48,21 +49,6 @@ RUN cd /tmp/ \
&& cd .. \
&& rm -rf doxygen*
-# Install Node.js 20 using DEB packages
-# https://github.com/nodesource/distributions#debian-and-ubuntu-based-distributions
-RUN mkdir -p /etc/apt/keyrings \
- && curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key \
- | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg \
- && echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_20.x nodistro main" \
- | tee /etc/apt/sources.list.d/nodesource.list \
- && apt-get update \
- && apt-get install -y --no-install-recommends \
- nodejs \
- && rm -rf /var/lib/apt/lists/*
-
-# Install Mermaid CLI
-RUN npm install --production -g @mermaid-js/mermaid-cli
-
# Install Python dependencies
# Pinned additional sphinxcontrib-* extensions to specific versions to avoid following error:
# "The sphinxcontrib.* extension used by this project needs at least Sphinx v5.0;"
@@ -88,37 +74,14 @@ RUN python3 -m pip install --no-cache-dir \
FROM $BASE_IMAGE as docs-html
# Copy over installed denpendencies from docs-base
-COPY --from=docs-base /usr/bin/curl /usr/bin/curl
-COPY --from=docs-base /usr/bin/node /usr/bin/node
-COPY --from=docs-base /usr/bin/npx /usr/bin/npx
-COPY --from=docs-base /usr/bin/npm /usr/bin/npm
-COPY --from=docs-base /usr/bin/mmdc /usr/bin/mmdc
-COPY --from=docs-base /usr/bin/corepack /usr/bin/corepack
+COPY --from=docs-base /usr/bin/dot /usr/bin/dot
COPY --from=docs-base /usr/local/bin/doxygen /usr/local/bin/doxygen
COPY --from=docs-base /usr/local/lib/python3.10/dist-packages /usr/local/lib/python3.10/dist-packages
COPY --from=docs-base /usr/local/bin/sphinx-build /usr/local/bin/sphinx-build
COPY --from=docs-base /usr/local/bin/sphinx-autobuild /usr/local/bin/sphinx-autobuild
-COPY --from=docs-base /usr/lib/node_modules /usr/lib/node_modules
COPY --from=docs-base /usr/lib/python3/dist-packages /usr/lib/python3/dist-packages
-COPY --from=docs-base /usr/include/node /usr/include/node
-COPY --from=docs-base /usr/share/doc/nodejs /usr/share/doc/nodejs
-
-# Below logic needed due to copy cmds being used instead of local installs
-# ------------------------------------------------------------------------
-# Update npm executable to invoke the npm module's CLI script and pass the
-# current Node.js process as an argument
-RUN echo '#!/usr/bin/env node' > /usr/bin/npm \
- && echo "require('/usr/lib/node_modules/npm/lib/cli.js')(process)" >> /usr/bin/npm && \
- # Update mmdc (mermaid-cli) executable to set the current process title to 'mmdc',
- # invoke the mermaid module's CLI function, and print any error if one is encountered
- echo '#!/usr/bin/env node' > /usr/bin/mmdc \
- && echo "process.title = 'mmdc'; \
- import('/usr/lib/node_modules/@mermaid-js/mermaid-cli/src/index.js') \
- .then(({ cli, error }) => cli().catch((exception) => error(exception instanceof Error ? exception.stack : exception))) \
- .catch((err) => { \
- console.error(err); \
- process.exit(1); \
- });" >> /usr/bin/mmdc
+COPY --from=docs-base /usr/share/fonts /usr/share/fonts
+COPY --from=docs-base /lib/x86_64-linux-gnu/ /lib/x86_64-linux-gnu/
#################################################################
# PDF docs image that installs pdf/latex dependencies to the base
@@ -137,6 +100,3 @@ RUN apt-get update \
libgbm1 \
libasound2 \
&& rm -rf /var/lib/apt/lists/*
-
-# Add configuration for for puppeteer
-RUN echo '{"args": ["--no-sandbox"]}' >> /usr/bin/puppeteer-config.json
diff --git a/docs/aja_setup.rst b/docs/aja_setup.rst
index 35ff5112..c16184f7 100644
--- a/docs/aja_setup.rst
+++ b/docs/aja_setup.rst
@@ -14,14 +14,14 @@ applications as sysmem to GPU copies are eliminated from the processing
pipeline.
The following instructions describe the steps required to setup and use an AJA
-device with RDMA support on Holoscan Developer Kits. Note that the AJA NTV2
+device with RDMA support on NVIDIA Developer Kits with a PCIe slot. Note that the AJA NTV2
SDK support for Holoscan includes all of the `AJA Developer Products`_,
though the following instructions have only been verified for the `Corvid 44
12G BNC`_ and `KONA HDMI`_ products, specifically.
.. Note::
- The addition of an AJA device to a Holoscan Developer Kit is
+ The addition of an AJA device to a NVIDIA Developer Kit is
optional. The Holoscan SDK has elements that can be run with an AJA device
with the additional features mentioned above, but those elements can also
run without AJA. For example, there are Holoscan sample applications that have
diff --git a/docs/api/holoscan_cpp_api.md b/docs/api/holoscan_cpp_api.md
index 17cdfe64..86f08ee7 100644
--- a/docs/api/holoscan_cpp_api.md
+++ b/docs/api/holoscan_cpp_api.md
@@ -149,6 +149,7 @@
- {ref}`exhale_class_classholoscan_1_1Receiver`
- {ref}`exhale_class_classholoscan_1_1SerializationBuffer`
- {ref}`exhale_class_classholoscan_1_1StdComponentSerializer`
+- {ref}`exhale_class_classholoscan_1_1StdEntitySerializer`
- {ref}`exhale_class_classholoscan_1_1Transmitter`
- {ref}`exhale_class_classholoscan_1_1UcxComponentSerializer`
- {ref}`exhale_class_classholoscan_1_1UcxEntitySerializer`
@@ -156,10 +157,10 @@
- {ref}`exhale_class_classholoscan_1_1UcxSerializationBuffer`
- {ref}`exhale_class_classholoscan_1_1UcxTransmitter`
- {ref}`exhale_class_classholoscan_1_1UnboundedAllocator`
-- {ref}`exhale_class_classholoscan_1_1VideoStreamSerializer`
#### Schedulers
+- {ref}`exhale_class_classholoscan_1_1EventBasedScheduler`
- {ref}`exhale_class_classholoscan_1_1GreedyScheduler`
- {ref}`exhale_class_classholoscan_1_1MultiThreadScheduler`
diff --git a/docs/cli/cli.md b/docs/cli/cli.md
index 43f3766d..6304bd5f 100644
--- a/docs/cli/cli.md
+++ b/docs/cli/cli.md
@@ -6,7 +6,7 @@
## Synopsis
-`holoscan` [](#cli-help) [](#cli-log-level) {[package](./package.md),[run](./run.md),[version](./version.md)}
+`holoscan` [](#cli-help) [](#cli-log-level) {[package](./package.md),[run](./run.md),[version](./version.md),[nics](./nics.md)}
## Positional Arguments
diff --git a/docs/cli/package.md b/docs/cli/package.md
index 5faea3dd..07d626ff 100755
--- a/docs/cli/package.md
+++ b/docs/cli/package.md
@@ -6,7 +6,7 @@
## Synopsis
-`holoscan package` [](#cli-help) [](#cli-log-level) [](#cli-package-config) [](#cli-package-docs) [](#cli-package-models) [](#cli-package-platform) [](#cli-package-platform-config) [](#cli-package-timeout) [](#cli-package-version) [](#cli-package-base-image) [](#cli-package-build-image) [](#cli-package-build-cache) [](#cli-package-cmake-args) [](#cli-package-no-cache) [](#cli-package-sdk) [](#cli-package-sdk-version) [](#cli-package-holoscan-sdk-file) [](#cli-package-monai-deploy-sdk-file) [](#cli-package-output) [](#cli-package-tag) [](#cli-package-username) [](#cli-package-uid) [](#cli-package-gid) [](#cli-package-application)
+`holoscan package` [](#cli-help) [](#cli-log-level) [](#cli-package-config) [](#cli-package-docs) [](#cli-package-models) [](#cli-package-platform) [](#cli-package-platform-config) [](#cli-package-timeout) [](#cli-package-version) [](#cli-package-base-image) [](#cli-package-build-image) [](#cli-package-build-cache) [](#cli-package-cmake-args) [](#cli-package-no-cache) [](#cli-package-sdk) [](#cli-package-source) [](#cli-package-sdk-version) [](#cli-package-holoscan-sdk-file) [](#cli-package-monai-deploy-sdk-file) [](#cli-package-output) [](#cli-package-tag) [](#cli-package-username) [](#cli-package-uid) [](#cli-package-gid) [](#cli-package-application) [](#cli-package-source)
## Examples
@@ -120,7 +120,6 @@ A comma-separated list of platform types to generate. Each platform value specif
`PLATFORM` must be one of: `clara-agx-devkit`, `igx-orin-devkit`, `jetson-agx-orin-devkit`, `x64-workstation`.
-- `clara-agx-devkit`: Clara AGX DevKit
- `igx-orin-devkit`: IGX Orin DevKit
- `jetson-agx-orin-devkit`: Orin AGX DevKit
- `x64-workstation`: systems with a [x86-64](https://en.wikipedia.org/wiki/X86-64) processor(s)
@@ -168,7 +167,7 @@ Optionally specifies the build container image for building C++ applications. It
### `[--build-cache BUILD_CACHE]`
-Specifies a directory path for storing Docker cache. Defaults to `~/.holoscan_build_cache`.
+Specifies a directory path for storing Docker cache. Defaults to `~/.holoscan_build_cache`. If the `$HOME` directory is inaccessible, the CLI uses the `/tmp` directory.
(#cli-package-cmake-args)=
@@ -194,6 +193,14 @@ Do not use cache when building image.
SDK for building the application: Holoscan or MONAI-Deploy. `SDK` must be one of: holoscan, monai-deploy.
+(#cli-package-source)=
+
+### `[--source URL|FILE]`
+
+Override the artifact manifest source with a securely hosted file or from the local file system.
+
+E.g. https://my.domain.com/my-file.json
+
(#cli-package-sdk-version)=
### `[--sdk-version SDK_VERSION]`
@@ -259,3 +266,9 @@ It is recommended to use the default value of `1000` when packaging an applicati
### `[--gid GID]`
Optional *group ID* to be associated with the user created with `--username` with default of `1000`.
+
+(#cli-package-source)=
+
+### `[--source PATH|URL]`
+
+Overrides the default manifest file source. This value can be a local file path or a HTTPS url.
\ No newline at end of file
diff --git a/docs/cli/run.md b/docs/cli/run.md
index 24b2a010..5233bff6 100755
--- a/docs/cli/run.md
+++ b/docs/cli/run.md
@@ -21,7 +21,7 @@ spec:
## Synopsis
-`holoscan run` [](#cli-help) [](#cli-log-level) [](#cli-run-address) [](#cli-run-driver) [](#cli-run-input) [](#cli-run-output) [](#cli-run-fragments) [](#cli-run-worker) [](#cli-run-worker-address) [](#cli-run-config) [](#cli-run-network) [](#cli-run-nic) [](#cli-run-use-all-nics) [](#cli-run-render) [](#cli-run-quiet) [](#cli-run-shm-size)[](#cli-run-terminal) [](#cli-run-device) [](#cli-run-uid) [](#cli-run-gid)[](#cli-run-image-tag)
+`holoscan run` [](#cli-help) [](#cli-log-level) [](#cli-run-address) [](#cli-run-driver) [](#cli-run-input) [](#cli-run-output) [](#cli-run-fragments) [](#cli-run-worker) [](#cli-run-worker-address) [](#cli-run-config) [](#cli-run-network) [](#cli-run-nic) [](#cli-run-use-all-nics) [](#cli-run-render) [](#cli-run-quiet) [](#cli-run-shm-size)[](#cli-run-terminal) [](#cli-run-device) [](#cli-run-gpu) [](#cli-run-uid) [](#cli-run-gid)[](#cli-run-image-tag)
## Examples
@@ -75,6 +75,11 @@ When specified, a directory mount is set up to the value defined in the environm
Ensure that the directory on the host is accessible by the current user or the user specified with [--uid](#cli-run-uid).
:::
+:::{note}
+Use the host system path when running applications inside Docker (DooD).
+:::
+
+
(#cli-run-output)=
### `[--output|-o OUTPUT]`
@@ -212,6 +217,24 @@ holoscan run --render --device ajantv0 video1 -- my-application-image:1.0
:::
+(#cli-run-gpu)=
+
+### `[--gpu]`
+
+Override the value of the `NVIDIA_VISIBLE_DEVICES` environment variable with the default value set to
+the value defined in the [package manifest file](./hap.md#package-manifest) or `all` if undefined.
+
+Refer to the [GPU Enumeration](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/docker-specialized.html#gpu-enumeration)
+page for all available options.
+
+:::{note}
+The default value is `nvidia.com/igpu=0` when running a HAP built for iGPU on a system with both iGPU and dGPU,
+:::
+
+:::{note}
+A single integer value translates to the device index, not the number of GPUs.
+:::
+
(#cli-run-uid)=
### `[--uid UID]`
diff --git a/docs/components/schedulers.md b/docs/components/schedulers.md
index fc8adad1..1a8b43e5 100644
--- a/docs/components/schedulers.md
+++ b/docs/components/schedulers.md
@@ -5,9 +5,10 @@ The Scheduler component is a critical part of the system responsible for governi
The Holoscan SDK offers multiple schedulers that can cater to various use cases. These schedulers are:
1. [Greedy Scheduler](#greedy-scheduler): This basic single-threaded scheduler tests conditions in a greedy manner. It is suitable for simple use cases and provides predictable execution. However, it may not be ideal for large-scale applications as it may incur significant overhead in condition execution.
-2. [MultiThread Scheduler](#multithreadscheduler): The MultiThread Scheduler is designed to handle complex execution patterns in large-scale applications. This scheduler consists of a dispatcher thread that monitors the status of each operator and dispatches it to a thread pool of worker threads responsible for executing them. Once execution is complete, worker threads enqueue the operator back on the dispatch queue. The MultiThread Scheduler offers superior performance and scalability over the Greedy Scheduler.
+2. [MultiThread Scheduler](#multithreadscheduler): The multithread scheduler is designed to handle complex execution patterns in large-scale applications. This scheduler consists of a dispatcher thread that monitors the status of each operator and dispatches it to a thread pool of worker threads responsible for executing them. Once execution is complete, worker threads enqueue the operator back on the dispatch queue. The multithread scheduler offers superior performance and scalability over the greedy scheduler.
+3. [Event-Based Scheduler](#eventbasedscheduler): The event-based scheduler is also a multi-thread scheduler, but as the name indicates it is event-based rather than polling based. Instead of having a thread that constantly polls for the execution readiness of each operator, it instead waits for an event to be received which indicates that an operator is ready to execute. The event-based scheduler will have a lower latency than using the multi-thread scheduler with a long polling interval (`check_recession_period_ms`), but without the high CPU usage seen for a multi-thread scheduler with a very short polling interval.
-It is essential to select the appropriate scheduler for the use case at hand to ensure optimal performance and efficient resource utilization.
+It is essential to select the appropriate scheduler for the use case at hand to ensure optimal performance and efficient resource utilization. Since most parameters of the schedulers overlap, it is easy to switch between them to test which may be most performant for a given application.
:::{note}
Detailed APIs can be found here: {ref}`C++ `/{py:mod}`Python `).
@@ -24,9 +25,16 @@ The greedy scheduler has a few parameters that the user can configure.
- This scheduler also has a boolean parameter, `stop_on_deadlock` that controls whether the application will terminate if a deadlock occurs. A deadlock occurs when all operators are in a `WAIT` state, but there is no periodic condition pending to break out of this state. This parameter is `true` by default.
- When setting the `stop_on_deadlock_timeout` parameter, the scheduler will wait this amount of time (in ms) before determining that it is in deadlock and should stop. It will reset if a job comes in during the wait. A negative value means no stop on deadlock. This parameter only applies when `stop_on_deadlock=true`.
-## MultiThreadScheduler
+## Multithread Scheduler
-The multithread scheduler has several parameters that the user can configure. These are a superset of the parameters available for the GreedyScheduler (described in the section above). Only the parameters unique to the multithread scheduler are described here.
+The multithread scheduler has several parameters that the user can configure. These are a superset of the parameters available for the `GreedyScheduler` (described in the section above). Only the parameters unique to the multithread scheduler are described here. The multi-thread scheduler uses a dedicated thread to poll the status of operators and schedule any that are ready to execute. This will lead to high CPU usage by this polling thread when `check_recession_period_ms` is close to 0.
- The number of worker threads used by the scheduler can be set via `worker_thread_number`, which defaults to `1`. This should be set based on a consideration of both the workflow and the available hardware. For example, the topology of the computation graph will determine how many operators it may be possible to run in parallel. Some operators may potentially launch multiple threads internally, so some amount of performance profiling may be required to determine optimal parameters for a given workflow.
- The value of `check_recession_period_ms` controls how long the scheduler will sleep before checking a given condition again. In other words, this is the polling interval for operators that are in a `WAIT` state. The default value for this parameter is `5` ms.
+
+
+## Event-Based Scheduler
+
+The event-based scheduler is also a multi-thread scheduler, but it is event-based rather than polling based. As such, there is no `check_recession_period_ms` parameter, and this scheduler will not have the high CPU usage that can occur when polling at a short interval. Instead, the scheduler only wakes up when an event is received indicating that an operator is ready to execute. The parameters of this scheduler are a superset of the parameters available for the `GreedyScheduler` (described above). Only the parameters unique to the event-based scheduler are described here.
+
+- The number of worker threads used by the scheduler can be set via `worker_thread_number`, which defaults to `1`. This should be set based on a consideration of both the workflow and the available hardware. For example, the topology of the computation graph will determine how many operators it may be possible to run in parallel. Some operators may potentially launch multiple threads internally, so some amount of performance profiling may be required to determine optimal parameters for a given workflow.
diff --git a/docs/conf.py b/docs/conf.py
index 9e976a2f..c684f39c 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -34,7 +34,7 @@
# -- Project information -----------------------------------------------------
project = "Holoscan SDK"
-copyright = "2022-2023, NVIDIA" # noqa: A001
+copyright = "2022-2024, NVIDIA" # noqa: A001
author = "NVIDIA"
# The full version, including alpha/beta/rc tags
@@ -57,13 +57,14 @@
"exhale",
"myst_parser",
"numpydoc",
+ "sphinx.ext.graphviz",
"sphinx.ext.autosectionlabel", # https://docs.readthedocs.io/en/stable/guides/cross-referencing-with-sphinx.html#automatically-label-sections # noqa: E501
"sphinx.ext.autodoc", # needed for Python API docs (provides automodule)
"sphinx.ext.autosummary", # needed for Python API docs (provides autosummary)
- "sphinxcontrib.mermaid", # https://sphinxcontrib-mermaid-demo.readthedocs.io/en/latest/
"sphinx_design", # https://sphinx-design.readthedocs.io/en/latest/
]
+
# Make sure the target is unique
autosectionlabel_prefix_document = True
# Set the maximum depth of the section label
@@ -80,6 +81,9 @@
# Enabling to be consistent with prior documentation
numfig = True
+# -- Options for graphviz output ---------------------------------------------
+graphviz_output_format = "svg"
+
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
@@ -211,21 +215,6 @@
autosummary_generate = False
-# -- Options for sphinx-mermaid ------------------------------------------------
-# Reference: https://github.com/mgaitan/sphinxcontrib-mermaid/issues/44
-# (we eventually use mmdc's PDF generation capability instead)
-
-mermaid_version = "9.2.2"
-
-# Use PDF diagram for latex PDF generation (with cropping the generated PDF)
-if tags.has("noapi"): # noqa: F821
- mermaid_cmd = "/usr/bin/mmdc"
- # 'docker/docs-builder/Dockerfile' creates 'puppeteer-config.json'
- mermaid_params = ["-p", "/usr/bin/puppeteer-config.json"]
- # pdfcrop is installed in the docker image (by 'texlive-extra-utils')
- mermaid_pdfcrop = "pdfcrop"
- mermaid_output_format = "pdf"
-
# -- Options for Sphinx --------------------------------------------------------
# Tell sphinx what the primary language being documented is.
diff --git a/docs/deployment_stack.md b/docs/deployment_stack.md
index f6a93149..335d52f8 100644
--- a/docs/deployment_stack.md
+++ b/docs/deployment_stack.md
@@ -3,14 +3,14 @@
NVIDIA Holoscan accelerates deployment of production-quality applications
by providing a set of **OpenEmbedded** build recipes and reference configurations
that can be leveraged to customize and build Holoscan-compatible Linux4Tegra (L4T)
-embedded board support packages (BSP) on Holoscan Developer Kits.
+embedded board support packages (BSP) on the NVIDIA IGX Developer Kits.
[Holoscan OpenEmbedded/Yocto recipes](https://github.com/nvidia-holoscan/meta-tegra-holoscan) add
-OpenEmbedded recipes and sample build configurations to build BSPs for NVIDIA Holoscan Developer Kits
+OpenEmbedded recipes and sample build configurations to build BSPs for the NVIDIA IGX Developer Kit
that feature support for discrete GPUs (dGPU), AJA Video Systems I/O boards, and the Holoscan
SDK.
-These BSPs are built on a developer's host machine and are then flashed onto a Holoscan Developer Kit
-using provided scripts.
+These BSPs are built on a developer's host machine and are then flashed onto the NVIDIA IGX
+Developer Kit using provided scripts.
There are two options available to set up a build environment and start
building Holoscan BSP images using OpenEmbedded.
diff --git a/docs/emergent_setup.md b/docs/emergent_setup.md
index 02cfac6b..585e27c9 100644
--- a/docs/emergent_setup.md
+++ b/docs/emergent_setup.md
@@ -1,18 +1,12 @@
(emergent-vision-tech)=
# Emergent Vision Technologies (EVT)
-Thanks to a collaboration with [Emergent Vision Technologies](https://emergentvisiontec.com/), the Holoscan SDK now supports EVT high-speed cameras.
-
-:::{note}
-The addition of an EVT camera to the Holoscan Developer Kits
-is optional. The Holoscan SDK has an application that can be run with the EVT camera,
-but there are other applications that can be run without EVT camera.
-:::
+Thanks to a collaboration with [Emergent Vision Technologies](https://emergentvisiontec.com/), the Holoscan SDK now supports EVT high-speed cameras on NVIDIA Developer Kits equipped with a [ConnectX NIC](https://www.nvidia.com/en-us/networking/ethernet-adapters/) using the [Rivermax SDK](https://developer.nvidia.com/networking/rivermax).
(emergent-hw-install)=
## Installing EVT Hardware
-The EVT cameras can be connected to Holoscan Developer Kits though [Mellanox ConnectX SmartNIC](https://www.nvidia.com/en-us/networking/ethernet-adapters/), with the most simple connection method being a single cable between a camera and the devkit.
+The EVT cameras can be connected to NVIDIA Developer Kits through a [Mellanox ConnectX SmartNIC](https://www.nvidia.com/en-us/networking/ethernet-adapters/), with the most simple connection method being a single cable between a camera and the devkit.
For 25 GigE cameras that use the SFP28 interface, this can be achieved by using [SFP28](https://store.nvidia.com/en-us/networking/store/product/MCP2M00-A001E30N/NVIDIAMCP2M00A001E30NDACCableEthernet25GbESFP281m/) cable with [QSFP28 to SFP28 adaptor](https://store.nvidia.com/en-us/networking/store/product/MAM1Q00A-QSA28/NVIDIAMAM1Q00AQSA28CableAdapter100Gbsto25GbsQSFP28toSFP28/).
:::{note}
diff --git a/docs/examples/byom.md b/docs/examples/byom.md
index b6fc7072..0f3ac04f 100644
--- a/docs/examples/byom.md
+++ b/docs/examples/byom.md
@@ -61,15 +61,14 @@ You can also follow along using your own dataset by adjusting the operator param
The video stream replayer supports reading video files that are encoded as gxf entities. These files are provided with the ultrasound dataset as the `ultrasound_256x256.gxf_entities` and `ultrasound_256x256.gxf_index` files.
:::{note}
-To use your own video data, you can use the `convert_video_to_gxf_entities.py` script from [here](https://github.com/nvidia-holoscan/holoscan-sdk/tree/main/scripts#convert_video_to_gxf_entitiespy) to encode your video.
-
+To use your own video data, you can use the `convert_video_to_gxf_entities.py` script (installed in `/opt/nvidia/holoscan/bin` or [on GitHub](https://github.com/nvidia-holoscan/holoscan-sdk/tree/main/scripts#convert_video_to_gxf_entitiespy)) to encode your video. Note that - using this script - the metadata in the generated GXF tensor files will indicate that the data should be copied to the GPU on read.
:::
### Input model
Currently, the inference operators in Holoscan are able to load [ONNX models](https://onnx.ai/), or [TensorRT](https://developer.nvidia.com/tensorrt) engine files built for the GPU architecture on which you will be running the model. TensorRT engines are automatically generated from ONNX by the operators when the applications run.
-If you are converting your model from PyTorch to ONNX, chances are your input is NCHW and will need to be converted to NHWC. We provide an example [transformation script on Github](https://github.com/nvidia-holoscan/holoscan-sdk/tree/main/scripts#graph_surgeonpy) named `graph_surgeon.py`. You may need to modify the dimensions as needed before modifying your model.
+If you are converting your model from PyTorch to ONNX, chances are your input is NCHW and will need to be converted to NHWC. We provide an example transformation script named `graph_surgeon.py`, installed in `/opt/nvidia/holoscan/bin` or available [on GitHub](https://github.com/nvidia-holoscan/holoscan-sdk/tree/main/scripts#graph_surgeonpy). You may need to modify the dimensions as needed before modifying your model.
:::{tip}
To get a better understanding of your model, and if this step is necessary, websites such as [netron.app](https://netron.app/) can be used.
diff --git a/docs/examples/ping_custom_op.md b/docs/examples/ping_custom_op.md
index 083bc40f..f750d55c 100644
--- a/docs/examples/ping_custom_op.md
+++ b/docs/examples/ping_custom_op.md
@@ -19,28 +19,18 @@ The example source code and run instructions can be found in the [examples](http
Here is the diagram of the operators and workflow used in this example.
-```{mermaid}
+```{digraph} custom_op
:align: center
:caption: A linear workflow with new custom operator
-%%{init: {"theme": "base", "themeVariables": { "fontSize": "16px"}} }%%
+ rankdir="LR"
+ node [shape=record];
-classDiagram
- direction LR
-
- PingTxOp --|> PingMxOp : out...in
- PingMxOp --|> PingRxOp : out...in
-
- class PingTxOp {
- out(out) int
- }
- class PingMxOp {
- [in]in: int
- out(out) int
- }
- class PingRxOp {
- [in]in: int
- }
+ tx [label="PingTxOp| |out(out) : int"];
+ mx [label="PingMxOp| [in]in : int | out(out) : int "];
+ rx [label="PingRxOp| [in]in : int | "];
+ tx -> mx [label="out...in"]
+ mx -> rx [label="out...in"]
```
Compared to the previous example, we are adding a new **PingMxOp** operator between the
diff --git a/docs/examples/ping_multi_port.md b/docs/examples/ping_multi_port.md
index 977c6f99..df56538c 100644
--- a/docs/examples/ping_multi_port.md
+++ b/docs/examples/ping_multi_port.md
@@ -17,33 +17,20 @@ The example source code and run instructions can be found in the [examples](http
Here is the diagram of the operators and workflow used in this example.
-```{mermaid}
+```{digraph} ping_multi_port
:align: center
:caption: A workflow with multiple inputs and outputs
-%%{init: {"theme": "base", "themeVariables": { "fontSize": "16px"}} }%%
-
-classDiagram
- direction LR
-
- PingTxOp --|> PingMxOp : out1...in1
- PingTxOp --|> PingMxOp : out2...in2
- PingMxOp --|> PingRxOp : out1...receivers
- PingMxOp --|> PingRxOp : out2...receivers
-
- class PingTxOp {
- out1(out) ValueData
- out2(out) ValueData
- }
- class PingMxOp {
- [in]in1 : ValueData
- [in]in2 : ValueData
- out1(out) ValueData
- out2(out) ValueData
- }
- class PingRxOp {
- [in]receivers : ValueData
- }
+ rankdir="LR"
+ node [shape=record];
+
+ tx [label="PingTxOp| |out1(out) : ValueData\nout2(out) : ValueData"];
+ mx [label="PingMxOp|[in]in1 : ValueData\n[in]in2 : ValueData|out1(out) : ValueData\nout2(out) : ValueData"];
+ rx [label="PingRxOp|[in]receivers : ValueData | "];
+ tx -> mx [label="out1...in1"]
+ tx -> mx [label="out2...in2"]
+ mx -> rx [label="out1...receivers"]
+ mx -> rx [label="out2...receivers"]
```
In this example, `PingTxOp` sends a stream of odd integers to the `out1` port, and even integers to the `out2` port. `PingMxOp` receives these values using `in1` and `in2` ports, multiplies them by a constant factor, then forwards them to a single port - `receivers` - on `PingRxOp`.
@@ -494,6 +481,6 @@ Running the application should give you output similar to the following in your
```
:::{note}
-Depending on your log level you may see more or fewer messages. The output above was generated using the default value of `INFO`.
+Depending on your log level you may see more or fewer messages. The output above was generated using the default value of `INFO`.
Refer to the {ref}`Logging` section for more details on how to set the log level.
:::
diff --git a/docs/examples/ping_simple.md b/docs/examples/ping_simple.md
index 8546ba9d..a8134b75 100644
--- a/docs/examples/ping_simple.md
+++ b/docs/examples/ping_simple.md
@@ -16,23 +16,17 @@ The example source code and run instructions can be found in the [examples](http
Here is a example workflow involving two operators that are connected linearly.
-```{mermaid}
+```{digraph} ping_simple
:align: center
:caption: A linear workflow
-%%{init: {"theme": "base", "themeVariables": { "fontSize": "16px"}} }%%
+ rankdir="LR"
-classDiagram
- direction LR
+ node [shape=record];
- PingTxOp --|> PingRxOp : out...in
-
- class PingTxOp {
- out(out) int
- }
- class PingRxOp {
- [in]in : int
- }
+ tx [label="PingTxOp| |out(out) : int"];
+ rx [label="PingRxOp|[in]in : int | "];
+ tx -> rx [label="out...in"]
```
In this example, the source operator **PingTxOp** produces integers from 1 to 10 and passes it to the sink operator **PingRxOp** which prints the integers to standard output.
diff --git a/docs/examples/video_replayer.md b/docs/examples/video_replayer.md
index f1011042..46945749 100644
--- a/docs/examples/video_replayer.md
+++ b/docs/examples/video_replayer.md
@@ -19,23 +19,17 @@ The example source code and run instructions can be found in the [examples](http
Here is the diagram of the operators and workflow used in this example.
-```{mermaid}
+```{digraph} video_replayer
:align: center
:caption: Workflow to load and display video from a file
-%%{init: {"theme": "base", "themeVariables": { "fontSize": "16px"}} }%%
+ rankdir="LR"
+ node [shape=record];
-classDiagram
- direction LR
+ replayer [label="VideoStreamReplayerOp| |output(out) : Tensor"];
+ viz [label="HolovizOp|[in]receivers : Tensor | "];
- VideoStreamReplayerOp --|> HolovizOp : output...receivers
-
- class VideoStreamReplayerOp {
- output(out) Tensor
- }
- class HolovizOp {
- [in]receivers : Tensor
- }
+ replayer -> viz [label="output...receivers"]
```
We connect the "output" port of the replayer operator to the "receivers" port of the Holoviz
@@ -43,7 +37,7 @@ operator.
## Video Stream Replayer Operator
-The built-in video stream replayer operator can be used to replay a video stream that has been encoded as gxf entities. You can use the [convert_video_to_gxf_entities.py](https://github.com/nvidia-holoscan/holoscan-sdk/tree/main/scripts#convert_video_to_gxf_entitiespy) script to encode a video file as gxf entities for use by this operator.
+The built-in video stream replayer operator can be used to replay a video stream that has been encoded as gxf entities. You can use the `convert_video_to_gxf_entities.py` script (installed in `/opt/nvidia/holoscan/bin` or available [on GitHub](https://github.com/nvidia-holoscan/holoscan-sdk/tree/main/scripts#convert_video_to_gxf_entitiespy)) to encode a video file as gxf entities for use by this operator.
This operator processes the encoded file sequentially and supports realtime, faster than realtime, or slower than realtime playback of prerecorded data. The input data can optionally be repeated to loop forever or only for a specified count. For more details, see {ref}`operators-video-stream-replayer`.
diff --git a/docs/examples/video_replayer_distributed.md b/docs/examples/video_replayer_distributed.md
index 4c0cff47..f77062f8 100644
--- a/docs/examples/video_replayer_distributed.md
+++ b/docs/examples/video_replayer_distributed.md
@@ -18,23 +18,17 @@ The example source code and run instructions can be found in the [examples](http
Here is the diagram of the operators and workflow used in this example.
-```{mermaid}
+```{digraph} video_replayer_distributed
:align: center
:caption: Workflow to load and display video from a file
-%%{init: {"theme": "base", "themeVariables": { "fontSize": "16px"}} }%%
+ rankdir="LR"
+ node [shape=record];
-classDiagram
- direction LR
+ replayer [label="VideoStreamReplayerOp| |output(out) : Tensor"];
+ viz [label="HolovizOp|[in]receivers : Tensor | "];
- VideoStreamReplayerOp --|> HolovizOp : output...receivers
-
- class VideoStreamReplayerOp {
- output(out) Tensor
- }
- class HolovizOp {
- [in]receivers : Tensor
- }
+ replayer -> viz [label="output...receivers"]
```
This is the same workflow as the [single fragment video replayer](./video_replayer.md), each operator is assigned to a separate fragment and there is now a network connection between the fragments.
diff --git a/docs/holoscan_create_app.md b/docs/holoscan_create_app.md
index 013fbd7a..61d8850e 100644
--- a/docs/holoscan_create_app.md
+++ b/docs/holoscan_create_app.md
@@ -86,21 +86,21 @@ It is also possible to instead launch the application asynchronously (i.e. non-b
`````{tab-set}
````{tab-item} C++
-This can be done simply by replacing the call to {cpp:func}`run()` with {cpp:func}`run_async()` which returns a `std::future`. Calling `future.wait()` will block until the application has finished running.
+This can be done simply by replacing the call to {cpp:func}`run()` with {cpp:func}`run_async()` which returns a `std::future`. Calling `future.get()` will block until the application has finished running and throw an exception if a runtime error occurred during execution.
```{code-block} cpp
:emphasize-lines: 3-4
:name: holoscan-app-skeleton-cpp-async
int main() {
auto app = holoscan::make_application();
- future = app->run_async();
- future.wait();
+ auto future = app->run_async();
+ future.get();
return 0;
}
```
````
````{tab-item} Python
-This can be done simply by replacing the call to {py:func}`run()` with {py:func}`run_async()` which returns a Python `concurrent.futures.Future`. Calling `future.result()` will block until the application has finished running.
+This can be done simply by replacing the call to {py:func}`run()` with {py:func}`run_async()` which returns a Python `concurrent.futures.Future`. Calling `future.result()` will block until the application has finished running and raise an exception if a runtime error occurred during execution.
```{code-block} python
:emphasize-lines: 3-4
:name: holoscan-app-skeleton-python-async
@@ -521,7 +521,7 @@ def compose(self):
````
:::{note}
-Python operators that wrap an underlying C++ operator currently do not accept resources as positional arguments. Instead one needs to call the {py:func}`add_arg()` method after the object has been constructed to add the resource.
+Python operators that wrap an underlying C++ operator currently do not accept resources as positional arguments. Instead one needs to call the {py:func}`add_arg()` method after the object has been constructed to add the resource.
:::
(configuring-app-scheduler)=
@@ -532,7 +532,7 @@ The [scheduler](./components/schedulers.md) controls how the application schedul
The default scheduler is a single-threaded [`GreedyScheduler`](./components/schedulers.md#greedy-scheduler). An application can be configured to use a different scheduler `Scheduler` ({cpp:class}`C++ `/{py:class}`Python `) or change the parameters from the default scheduler, using the `scheduler()` function ({cpp:func}`C++ `/{py:func}`Python `).
-For example, if an application needs to run multiple operators in parallel, a [`MultiThreadScheduler`](./components/schedulers.md#multithreadscheduler) can instead be used.
+For example, if an application needs to run multiple operators in parallel, the [`MultiThreadScheduler`](./components/schedulers.md#multithreadscheduler) or [`EventBasedScheduler`](./components/schedulers.md#eventbasedscheduler) can instead be used. The difference between the two is that the MultiThreadScheduler is based on actively polling operators to determine if they are ready to execute, while the EventBasedScheduler will instead wait for an event indicating that an operator is ready to execute.
The code snippet belows shows how to set and configure a non-default scheduler:
@@ -545,7 +545,7 @@ The code snippet belows shows how to set and configure a non-default scheduler:
:name: holoscan-config-scheduler-cpp
auto app = holoscan::make_application();
-auto scheduler = app->make_scheduler(
+auto scheduler = app->make_scheduler(
"myscheduler",
Arg("worker_thread_number", 4),
Arg("stop_on_deadlock", true)
@@ -562,7 +562,7 @@ app->run();
:name: holoscan-config-scheduler-python
app = App()
-scheduler = holoscan.schedulers.MultiThreadScheduler(
+scheduler = holoscan.schedulers.EventBasedScheduler(
app,
name="myscheduler",
worker_thread_number=4,
@@ -600,17 +600,14 @@ instantiation and execution order of the operators.
The simplest form of a workflow would be a single operator.
-```{mermaid}
+```{digraph} myop
:align: center
:caption: A one-operator workflow
-%%{init: {"theme": "base", "themeVariables": { "fontSize": "16px"}} }%%
+ rankdir="LR"
+ node [shape=record];
-classDiagram
- direction LR
-
- class MyOp {
- }
+ myop [label="MyOp| | "];
```
The graph above shows an **Operator** ({cpp:class}`C++ `/{py:class}`Python `) (named `MyOp`) that has neither inputs nor output ports.
@@ -658,28 +655,18 @@ class App(Application):
Here is an example workflow where the operators are connected linearly:
-```{mermaid}
+```{digraph} linear_workflow
:align: center
:caption: A linear workflow
-%%{init: {"theme": "base", "themeVariables": { "fontSize": "16px"}} }%%
-
-classDiagram
- direction LR
+ rankdir="LR"
+ node [shape=record];
- SourceOp --|> ProcessOp : output...input
- ProcessOp --|> SinkOp : output...input
-
- class SourceOp {
- output(out) Tensor
- }
- class ProcessOp {
- [in]input : Tensor
- output(out) Tensor
- }
- class SinkOp {
- [in]input : Tensor
- }
+ sourceop [label="SourceOp| |output(out) : Tensor"];
+ processop [label="ProcessOp| [in]input : Tensor | output(out) : Tensor "];
+ sinkop [label="SinkOp| [in]input : Tensor | "];
+ sourceop -> processop [label="output...input"]
+ processop -> sinkop [label="output...input"]
```
In this example, **SourceOp** produces a message and passes it to **ProcessOp**. **ProcessOp** produces another message and passes it to **SinkOp**.
@@ -738,53 +725,27 @@ class App(Application):
You can design a complex workflow like below where some operators have multi-inputs and/or multi-outputs:
-```{mermaid}
+```{digraph} complex_workflow
:align: center
:caption: A complex workflow (multiple inputs and outputs)
-%%{init: {"theme": "base", "themeVariables": { "fontSize": "16px"}} }%%
-
-classDiagram
- direction TB
-
- Reader1 --|> Processor1 : image...{image1,image2}\nmetadata...metadata
- Reader2 --|> Processor2 : roi...roi
- Processor1 --|> Processor2 : image...image
- Processor2 --|> Processor3 : image...image
- Processor2 --|> Notifier : image...image
- Processor1 --|> Writer : image...image
- Processor3 --|> Writer : seg_image...seg_image
-
- class Reader1 {
- image(out)
- metadata(out)
- }
- class Reader2 {
- roi(out)
- }
- class Processor1 {
- [in]image1
- [in]image2
- [in]metadata
- image(out)
- }
- class Processor2 {
- [in]image
- [in]roi
- image(out)
- }
- class Processor3 {
- [in]image
- seg_image(out)
- }
- class Writer {
- [in]image
- [in]seg_image
- }
- class Notifier {
- [in]image
- }
-
+ node [shape=record];
+
+ reader1 [label="{Reader1| |image(out)\nmetadata(out)}"];
+ reader2 [label="{Reader2| |roi(out)}"];
+ processor1 [label="{Processor1|[in]image1\n[in]image2\n[in]metadata|image(out)}"];
+ processor2 [label="{Processor2|[in]image\n[in]roi|image(out)}"];
+ processor3 [label="{Processor3|[in]image|seg_image(out)}"];
+ writer [label="{Writer|[in]image\n[in]seg_image| }"];
+ notifier [label="{Notifier|[in]image| }"];
+
+ reader1->processor1 [label="image...{image1,image2}\nmetadata...metadata"]
+ reader2->processor2 [label="roi...roi"]
+ processor1->processor2 [label="image...image"]
+ processor1->writer [label="image...image"]
+ processor2->notifier [label="image...image"]
+ processor2->processor3 [label="image...image"]
+ processor3->writer [label="seg_image...seg_image"]
```
diff --git a/docs/holoscan_create_distributed_app.md b/docs/holoscan_create_distributed_app.md
index f11e359f..2e698de0 100644
--- a/docs/holoscan_create_distributed_app.md
+++ b/docs/holoscan_create_distributed_app.md
@@ -238,7 +238,7 @@ You can set environment variables to modify the default actions of services and
- **HOLOSCAN_HEALTH_CHECK_PORT** : designates the port number on which the Health Checking Service is launched. It must be an integer value representing a valid port number. If unspecified, it defaults to `8777`.
-- **HOLOSCAN_DISTRIBUTED_APP_SCHEDULER** : controls which scheduler is used for distributed applications. It can be set to either `greedy` or `multithread`. If unspecified, the default scheduler is `multithread`.
+- **HOLOSCAN_DISTRIBUTED_APP_SCHEDULER** : controls which scheduler is used for distributed applications. It can be set to either `greedy`, `multi_thread` or `event_based`. `multithread` is also allowed as a synonym for `multi_thread` for backwards compatibility. If unspecified, the default scheduler is `multi_thread`.
- **HOLOSCAN_STOP_ON_DEADLOCK** : can be used in combination with `HOLOSCAN_DISTRIBUTED_APP_SCHEDULER` to control whether or not the application will automatically stop on deadlock. Values of "True", "1" or "ON" will be interpreted as true (enable stop on deadlock). It is true if unspecified. This environment variable is only used when `HOLOSCAN_DISTRIBUTED_APP_SCHEDULER` is explicitly set.
@@ -294,6 +294,120 @@ A table of the types that have codecs pre-registered so that they can be seriali
| GXF-specific types | nvidia::gxf::TimeStamp, nvidia::gxf::EndOfStream |
+:::{warning}
+If an operator transmitting both CPU and GPU tensors is to be used in distributed applications, the same output port cannot mix both GPU and CPU tensors. CPU and GPU tensor outputs should be placed on separate output ports. This is a limitation of the underlying UCX library being used for zero-copy tensor serialization between operators.
+
+As a concrete example, assume an operator, `MyOperator` with a single output port named "out" defined in it's setup method. If the output port is only ever going to connect to other operators within a fragment, but never across fragments then it is okay to have a `TensorMap` with a mixture of host and device arrays on that single port.
+
+`````{tab-set}
+````{tab-item} C++
+
+```cpp
+void MyOperator::setup(OperatorSpec& spec) {
+ spec.output("out");
+}
+
+void MyOperator::compute(OperatorSpec& spec) {
+
+ // omitted: some computation resulting in multiple holoscan::Tensors
+ // (two on CPU ("cpu_coords_tensor" and "cpu_metric_tensor") and one on device ("gpu_tensor").
+
+ TensorMap out_message;
+
+ // insert all tensors in one TensorMap (mixing CPU and GPU tensors is okay when ports only connect within a Fragment)
+ out_message.insert({"coordinates", cpu_coords_tensor});
+ out_message.insert({"metrics", cpu_metric_tensor});
+ out_message.insert({"mask", gpu_tensor});
+
+ op_output.emit(out_message, "out");
+}
+
+```
+
+````
+````{tab-item} Python
+
+```python
+class MyOperator:
+
+ def setup(self, spec: OperatorSpec):
+ spec.output("out")
+
+
+ def compute(self, op_input, op_output, context):
+ # Omitted: assume some computation resulting in three holoscan::Tensor or tensor-like
+ # objects. Two on CPU ("cpu_coords_tensor" and "cpu_metric_tensor") and one on device
+ # ("gpu_tensor").
+
+ # mixing CPU and GPU tensors in a single dict is okay only for within-Fragment connections
+ op_output.emit(
+ dict(
+ coordinates=cpu_coords_tensor,
+ metrics=cpu_metrics_tensor,
+ mask=gpu_tensor,
+ ),
+ "out"
+ )
+```
+`````
+
+However, this mixing of CPU and GPU arrays on a single port will not work for distributed apps and instead separate ports should be used if it is necessary for an operator to communicate across fragments.
+
+`````{tab-set}
+````{tab-item} C++
+
+```cpp
+void MyOperator::setup(OperatorSpec& spec) {
+ spec.output("out_host");
+ spec.output("out_device");
+}
+
+void MyOperator::compute(OperatorSpec& spec) {
+
+ // some computation resulting in a pair of holoscan::Tensor, one on CPU ("cpu_tensor") and one on device ("gpu_tensor").
+ TensorMap out_message_host;
+ TensorMap out_message_device;
+
+ // put all CPU tensors on one port
+ out_message_host.insert({"coordinates", cpu_coordinates_tensor});
+ out_message_host.insert({"metrics", cpu_metrics_tensor});
+ op_output.emit(out_message_host, "out_host");
+
+ // put all GPU tensors on another
+ out_message_device.insert({"mask", gpu_tensor});
+ op_output.emit(out_message_device, "out_device");
+}
+```
+
+````
+````{tab-item} Python
+
+```python
+class MyOperator:
+
+ def setup(self, spec: OperatorSpec):
+ spec.output("out_host")
+ spec.output("out_device")
+
+
+ def compute(self, op_input, op_output, context):
+ # Omitted: assume some computation resulting in three holoscan::Tensor or tensor-like
+ # objects. Two on CPU ("cpu_coords_tensor" and "cpu_metric_tensor") and one on device
+ # ("gpu_tensor").
+
+ # split CPU and GPU tensors across ports for compatibility with inter-fragment communication
+ op_output.emit(
+ dict(coordinates=cpu_coords_tensor, metrics=cpu_metrics_tensor),
+ "out_host"
+ )
+ op_output.emit(dict(mask=gpu_tensor), "out_device")
+```
+
+````
+`````
+:::
+
+
### Python
For the Python API, any array-like object supporting the [DLPack](https://dmlc.github.io/dlpack/latest/) interface, [`__array_interface__`](https://numpy.org/doc/stable/reference/arrays.interface.html) or [`__cuda_array_interface__`](https://numba.readthedocs.io/en/stable/cuda/cuda_array_interface.html) will be transmitted using {py:class}`~holoscan.core.Tensor` serialization. This is done to avoid data copies for performance reasons. Objects of type `list[holoscan.HolovizOp.InputSpec]` will be sent using the underlying C++ serializer for `std::vector`. All other Python objects will be serialized to/from a `std::string` using the [cloudpickle](https://github.com/cloudpipe/cloudpickle) library.
diff --git a/docs/holoscan_create_operator.md b/docs/holoscan_create_operator.md
index 0d5c6598..f7a9f485 100644
--- a/docs/holoscan_create_operator.md
+++ b/docs/holoscan_create_operator.md
@@ -37,21 +37,20 @@ We will cover how to use {ref}`Conditions compute
- compute --> compute
- compute --> stop
+ start [label="start"]
+ compute [label="compute"]
+ stop [label="stop"]
+ start -> compute
+ compute -> compute
+ compute -> stop
```
We can override the default behavior of the operator by implementing the above methods. The following example shows how to implement a custom operator that overrides start, stop and compute methods.
@@ -689,7 +688,7 @@ components:
parameters:
allocator: allocator
- name: entity_serializer
- type: nvidia::holoscan::stream_playback::VideoStreamSerializer # inheriting from nvidia::gxf::EntitySerializer
+ type: nvidia::gxf::StdEntitySerializer
parameters:
component_serializers: [component_serializer]
- type: MyRecorder
@@ -738,7 +737,7 @@ void MyRecorderOp::initialize() {
// Set up prerequisite parameters before calling GXFOperator::initialize()
auto frag = fragment();
auto serializer =
- frag->make_resource("serializer");
+ frag->make_resource("serializer");
add_arg(Arg("serializer") = serializer);
GXFOperator::initialize();
@@ -765,7 +764,7 @@ components:
parameters:
allocator: allocator
- name: entity_serializer
- type: nvidia::holoscan::stream_playback::VideoStreamSerializer # inheriting from nvidia::gxf::EntitySerializer
+ type: nvidia::gxf::StdEntitySerializer
parameters:
component_serializers: [component_serializer]
- type: MyRecorder
@@ -781,7 +780,7 @@ components:
```
:::{note}
-The Holoscan C++ API already provides the {cpp:class}`holoscan::VideoStreamSerializer` class which wraps the `nvidia::holoscan::stream_playback::VideoStreamSerializer` GXF component, used here as `serializer`.
+The Holoscan C++ API already provides the {cpp:class}`holoscan::StdEntitySerializer` class which wraps the `nvidia::gxf::StdEntitySerializer` GXF component, used here as `serializer`.
:::
#### Building your GXF operator
@@ -812,28 +811,19 @@ Supporting Tensor Interoperability
Consider the following example, where `GXFSendTensorOp` and `GXFReceiveTensorOp` are GXF operators, and where `ProcessTensorOp` is a C++ native operator:
-```{mermaid}
+```{digraph} interop
:align: center
:caption: The tensor interoperability between C++ native operator and GXF operator
-%%{init: {"theme": "base", "themeVariables": { "fontSize": "16px"}} }%%
-
-classDiagram
- direction LR
+ rankdir="LR"
+ node [shape=record];
- GXFSendTensorOp --|> ProcessTensorOp : signal...in
- ProcessTensorOp --|> GXFReceiveTensorOp : out...signal
+ source [label="GXFSendTensorOp| |signal(out) : Tensor"];
+ process [label="ProcessTensorOp| [in]in : TensorMap | out(out) : TensorMap "];
+ sink [label="GXFReceiveTensorOp| [in]signal : Tensor | "];
- class GXFSendTensorOp {
- signal(out) Tensor
- }
- class ProcessTensorOp {
- [in]in : TensorMap
- out(out) TensorMap
- }
- class GXFReceiveTensorOp {
- [in]signal : Tensor
- }
+ source->process [label="signal...in"]
+ process->sink [label="out...signal"]
```
The following code shows how to implement `ProcessTensorOp`'s `compute()` method as a C++ native operator communicating with GXF operators. Focus on the use of the `holoscan::gxf::Entity`:
@@ -919,21 +909,21 @@ We will cover how to use {py:mod}`Conditions ` in the {ref}
Typically, the `start()` and the `stop()` functions are only called once during the application's lifecycle. However, if the scheduling conditions are met again, the operator can be scheduled for execution, and the `start()` method will be called again.
-```{mermaid}
+```{digraph} lifecycle2
:align: center
:caption: The sequence of method calls in the lifecycle of a Holoscan Operator
-%%{init: {"theme": "base", "themeVariables": { "fontSize": "16px"}} }%%
+ rankdir="LR"
-flowchart LR
- start(start)
- stop(stop)
- compute(compute)
+ node [shape=Mrecord];
- start --> compute
- compute --> compute
- compute --> stop
+ start [label="start"]
+ compute [label="compute"]
+ stop [label="stop"]
+ start -> compute
+ compute -> compute
+ compute -> stop
```
We can override the default behavior of the operator by implementing the above methods. The following example shows how to implement a custom operator that overrides start, stop and compute methods.
@@ -1572,28 +1562,19 @@ As described in the {ref}`Interoperability between GXF and native C++ operators<
Consider the following example, where `VideoStreamReplayerOp` and `HolovizOp` are Python wrapped C++ operators, and where `ImageProcessingOp` is a Python native operator:
-```{mermaid}
+```{digraph} interop2
:align: center
:caption: The tensor interoperability between Python native operator and C++\-based Python GXF operator
-%%{init: {"theme": "base", "themeVariables": { "fontSize": "16px"}} }%%
+ rankdir="LR"
+ node [shape=record];
-classDiagram
- direction LR
+ video [label="VideoStreamReplayerOp| |output_tensor(out) : Tensor"];
+ processop [label="ImageProcessingOp| [in]input_tensor : dict[str,Tensor] | output_tensor(out) : dict[str,Tensor]"];
+ viz [label="HolovizOp| [in]receivers : Tensor | "];
- VideoStreamReplayerOp --|> ImageProcessingOp : output_tensor...input_tensor
- ImageProcessingOp --|> HolovizOp : output_tensor...receivers
-
- class VideoStreamReplayerOp {
- output_tensor(out) Tensor
- }
- class ImageProcessingOp {
- [in]input_tensor : dict[str,Tensor]
- output_tensor(out) dict[str,Tensor]
- }
- class HolovizOp {
- [in]receivers : Tensor
- }
+ video->processop [label="output_tensor...input_tensor"]
+ processop->viz [label="output_tensor...receivers"]
```
The following code shows how to implement `ImageProcessingOp`'s `compute()` method as a Python native operator communicating with C++ operators:
diff --git a/docs/holoscan_logging.md b/docs/holoscan_logging.md
index 0872ce42..0e5f6c26 100644
--- a/docs/holoscan_logging.md
+++ b/docs/holoscan_logging.md
@@ -69,11 +69,6 @@ Under the hood, Holoscan SDK uses GXF to execute the computation graph. By defau
For distributed applications, it can sometimes be useful to also enable additional logging for the UCX library used to transmit data between fragments. This can be done by setting the UCX environment variable `UCX_LOG_LEVEL` to one of: fatal, error, warn, info, debug, trace, req, data, async, func, poll. These have the behavior as described here: [UCX log levels](https://github.com/openucx/ucx/blob/v1.14.0/src/ucs/config/types.h#L16C1-L31).
:::
-#### Precedence
-If the `HOLOSCAN_LOG_LEVEL` environment variable is set, this setting is used to set the logging level. If the
-environment variable is not set, then the application setting is used if available. If not, the SDK default setting
-of INFO is used as the logging level.
-
## Logger Format
When a message is printed out, the default message format shows the message severity level, filename:linenumber, and
@@ -144,9 +139,15 @@ For more details on custom formatting and details of each flag, please see the [
Additionally, at runtime, the user can also set the `HOLOSCAN_LOG_FORMAT` environment variable to modify the logger format. The accepted string pattern is the same as the string pattern for
the `set_log_pattern()` api mentioned above.
-:::{note}
-If the `HOLOSCAN_LOG_FORMAT` environment variable is set, this setting is used to set the logger format. If the environment variable is not set, then the application setting is used if available. If not, the SDK default message format is used.
-:::
+#### Precedence of Logger Level and Logger Format
+
+The `HOLOSCAN_LOG_LEVEL` environment variable takes precedence and overrides the application settings, such as `Logger::set_log_level()` ({cpp:func}`C++ `/{py:func}`Python `).
+
+When `HOLOSCAN_LOG_LEVEL` is set, it determines the logging level. If this environment variable is unset, the application settings are used if they are available. Otherwise, the SDK's default logging level of INFO is applied.
+
+Similarly, the `HOLOSCAN_LOG_FORMAT` environment variable takes precedence and overrides the application settings, such as `Logger::set_log_pattern()` ({cpp:func}`C++ `/{py:func}`Python `).
+
+When `HOLOSCAN_LOG_FORMAT` is set, it determines the logging format. If this environment variable is unset, the application settings are used if they are available. Otherwise, the SDK's default logging format depending on the current log level (`FULL` format for `DEBUG` and `TRACE` log levels. `DEFAULT` format for other log levels) is applied.
## Calling the Logger in Your Application
diff --git a/docs/holoscan_operators_extensions.md b/docs/holoscan_operators_extensions.md
index 2734e1ba..350829a8 100644
--- a/docs/holoscan_operators_extensions.md
+++ b/docs/holoscan_operators_extensions.md
@@ -47,9 +47,7 @@ ___
## Extensions
The Holoscan SDK also includes some GXF extensions with GXF codelets, which are typically wrapped as operators, or present for legacy reasons. In addition to the core GXF extensions (std, cuda, serialization, multimedia) listed [here](gxf/doc/index.md), the Holoscan SDK includes the following GXF extensions:
-- [bayer_demosaic](#bayer-demosaic)
- [gxf_holoscan_wrapper](#gxf-holoscan-wrapper)
-- [stream_playback](#stream-playback)
- [ucx_holoscan](#ucx-holoscan)
### GXF Holoscan Wrapper
@@ -58,16 +56,6 @@ The `gxf_holoscan_wrapper` extension includes the `holoscan::gxf::OperatorWrappe
Learn more about it in the [Using Holoscan Operators in GXF Applications](gxf/gxf_wrap_holoscan_op.md) section.
-### Stream Playback
-
-The `stream_playback` extension includes the `nvidia::holoscan::stream_playback::VideoStreamSerializer` entity serializer to/from a Tensor Object.
-This extension does not include any codelets: reading and writing video stream (gxf entity files) from the disk was implemented as native operators with `VideoStreamRecorderOp` and `VideoStreamReplayerOp`, though they leverage the `VideoStreamSerializer` from this extension.
-
-:::{note}
-The `VideoStreamSerializer` codelet is based on the `nvidia::gxf::StdEntitySerializer` with the addition of a `repeat` feature.
-(If the `repeat` parameter is `true` and the frame count is out of the maximum frame index, unnecessary warning messages are printed with `nvidia::gxf::StdEntitySerializer`.)
-:::
-
(ucx-holoscan)=
### UCX (Holoscan)
@@ -81,4 +69,4 @@ ___
### HoloHub
-Visit the HoloHub repository to find a collection of additional Holoscan operators and extensions.
+Visit the [HoloHub repository](https://github.com/nvidia-holoscan/holohub) to find a collection of additional Holoscan operators and extensions.
diff --git a/docs/holoscan_packager.md b/docs/holoscan_packager.md
index a48741ee..23ca55f1 100644
--- a/docs/holoscan_packager.md
+++ b/docs/holoscan_packager.md
@@ -52,34 +52,43 @@ Ensure the following are installed in the environment where you want to run the
The Holoscan CLI is installed as part of the Holoscan SDK and can be called with the following instructions depending on your installation:
-**If installed as a python wheel**
+
+`````{tab-set}
+````{tab-item} Python Wheel
- In a virtual environment: the `holoscan` CLI should already be in the PATH
-- Globally: ensure that `$HOME/.local/bin` is added to your `PATH`. Run the following command make it available across sessions:
+- System python: ensure that `$HOME/.local/bin` is added to your `PATH`. If using bash, the following command will make it persist across sessions:
```bash
echo 'export PATH=$HOME/.local/bin:$PATH' >> ~/.bashrc
```
-**If installed as a debian package**
+````
+````{tab-item} Debian Package
-Ensure that `/opt/nvidia/holoscan/` is added to your `PATH`. Run the following command make it available across sessions:
+Ensure that `/opt/nvidia/holoscan/` is added to your `PATH`. If using bash, the following command will make it persist across sessions:
```bash
echo 'alias holoscan=/opt/nvidia/holoscan/bin/holoscan' >> ~/.bashrc
```
-**If built or installed from source (local only)**
+````
+````{tab-item} From source
+
+If building the SDK from source and starting the build container with `run launch`, the `holoscan` CLI should already be in the PATH.
-Ensure that `${BUILD_OR_INSTALL_DIR}/bin` is added to your `PATH`. Run the following command make it available across sessions:
+If building bare-metal (advanced), ensure that `/bin` is added to your `PATH`. If using bash, the following command will make it persist across sessions:
```bash
-echo 'alias holoscan=${BUILD_OR_INSTALL_DIR}/bin/holoscan' >> ~/.bashrc
+echo 'alias holoscan=/bin/holoscan' >> ~/.bashrc
```
-:::{warning}
-The Holoscan CLI is not available inside the [NGC Container](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/clara-holoscan/containers/holoscan) nor the development container (from source).
-:::
+````
+
+````{tab-item} NGC Container
+The NGC container has the CLI installed already, no additional steps are required.
+````
+`````
## Package an application
diff --git a/docs/latency_tool.rst b/docs/latency_tool.rst
index bf5c72f4..793cd02a 100644
--- a/docs/latency_tool.rst
+++ b/docs/latency_tool.rst
@@ -3,13 +3,13 @@
Video Pipeline Latency Tool
===========================
-The Holoscan Developer Kits excel as a high-performance computing platform
+The NVIDIA Developer Kits excel as a high-performance computing platform
by combining high-bandwidth video I/O components and the compute capabilities
of an NVIDIA GPU to meet the needs of the most demanding video processing and
inference applications.
For many video processing applications located at the edge--especially
-those designed to augment medical instruments and aid live medical
+those designed to augment medical instruments and aid live medical
procedures--minimizing the latency added between image capture and
display, often referred to as the end-to-end latency, is of the utmost
importance.
@@ -22,7 +22,7 @@ capture and display is incorporated as this usually involves external capture
hardware (e.g. cameras and other sensors) and displays.
In order to establish a baseline measurement of the minimal end-to-end latency
-that can be achieved with the Holoscan Developer Kits and various video I/O
+that can be achieved with the NVIDIA Developer Kits and various video I/O
hardware and software components, the Holoscan SDK includes a sample
latency measurement tool.
@@ -32,7 +32,7 @@ Requirements
Hardware
^^^^^^^^
-The latency measurement tool requires the use of a Holoscan Developer Kit in
+The latency measurement tool requires the use of a NVIDIA Developer Kit in
dGPU mode, and operates by having an output component generate a sequence of
known video frames that are then transferred back to an input component using a
physical loopback cable.
@@ -203,7 +203,7 @@ GPU To Onboard HDMI Capture Card
In this configuration, a DisplayPort to HDMI cable is connected from the GPU
to the onboard HDMI capture card. This configuration supports the
:ref:`OpenGL ` and :ref:`GStreamer ` producers, and
-the :ref:`V4L2 ` and :ref:`GStreamer ` consumers.
+the :ref:`V4L2 ` and :ref:`GStreamer ` consumers.
.. figure:: images/latency_setup_gpu_to_onboard_hdmi.jpg
:align: center
@@ -273,7 +273,7 @@ back to an input **consumer** component using a physical loopback cable.
Timestamps are compared throughout the life of the frame to measure the overall
latency that the frame sees during this process, and these results are
summarized when all of the frames have been received and the measurement
-completes. See `Producers`_, `Consumers`_, and `Example Configurations`_ for
+completes. See `Producers`_, `Consumers`_, and `Example Configurations`_ for
more details.
Frame Measurements
@@ -678,7 +678,7 @@ V4L2 (Onboard HDMI Capture Card)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This consumer (:code:`v4l2`) uses the V4L2 API directly in order to capture
-frames using the HDMI capture card that is onboard the Holoscan Developer Kits.
+frames using the HDMI capture card that is onboard some of the NVIDIA Developer Kits.
V4L2 Consumer Notes:
diff --git a/docs/overview.md b/docs/overview.md
index 14675e6e..540451dc 100644
--- a/docs/overview.md
+++ b/docs/overview.md
@@ -36,7 +36,7 @@ Packaging and deploying applications is a complex problem that can require large
As highlighted in the relevant technologies section, the soul of the Holoscan project is to achieve peak performance by leveraging hardware and software developed at NVIDIA or provided by third-parties. To validate this, Holoscan provides performance tools to help users and developers track their application performance. They currently include:
-- a {ref}`Video Pipeline Latency Measurement Tool ` to measure and estimate the total end-to-end latency of a video streaming application including the video capture, processing, and output using various hardware and software components that are supported by the Holoscan Developer Kits.
+- a {ref}`Video Pipeline Latency Measurement Tool ` to measure and estimate the total end-to-end latency of a video streaming application including the video capture, processing, and output using various hardware and software components that are supported by the NVIDIA Developer Kits.
- the [Data Flow Tracking](./flow_tracking.md) feature to profile your application and analyze the data flow between operators in its graph.
8. **Documentation**
diff --git a/docs/relevant_technologies.md b/docs/relevant_technologies.md
index e0306c76..168ada47 100644
--- a/docs/relevant_technologies.md
+++ b/docs/relevant_technologies.md
@@ -13,7 +13,7 @@ The Holoscan SDK relies on multiple core technologies to achieve low latency and
(gpudirect_rdma)=
## Rivermax and GPUDirect RDMA
-The Holoscan Developer Kits can be used along with the [NVIDIA Rivermax SDK](https://developer.nvidia.com/networking/rivermax) to provide an extremely efficient network connection using the onboard [ConnectX](https://www.nvidia.com/en-us/networking/ethernet-adapters/) network adapter that is further optimized for GPU workloads by using [GPUDirect](https://developer.nvidia.com/gpudirect) for RDMA. This technology avoids unnecessary memory copies and CPU overhead by copying data directly to or from pinned GPU memory, and supports both the integrated GPU or the discrete GPU.
+The NVIDIA Developer Kits equipped with a [ConnectX network adapter](https://www.nvidia.com/en-us/networking/ethernet-adapters/) can be used along with the [NVIDIA Rivermax SDK](https://developer.nvidia.com/networking/rivermax) to provide an extremely efficient network connection that is further optimized for GPU workloads by using [GPUDirect](https://developer.nvidia.com/gpudirect) for RDMA. This technology avoids unnecessary memory copies and CPU overhead by copying data directly to or from pinned GPU memory, and supports both the integrated GPU or the discrete GPU.
:::{note}
NVIDIA is also committed to supporting hardware vendors enable RDMA within their own drivers, an example of which is provided by the {ref}`aja_video_systems` as part of a partnership with
@@ -38,7 +38,7 @@ GXF will be mentioned in many places across this user guide, including a {ref}`d
(tensorrt)=
## TensorRT Optimized Inference
-[NVIDIA TensorRT](https://developer.nvidia.com/tensorrt) is a deep learning inference framework based on CUDA that provided the highest optimizations to run on NVIDIA GPUs, including the Holoscan Developer Kits.
+[NVIDIA TensorRT](https://developer.nvidia.com/tensorrt) is a deep learning inference framework based on CUDA that provided the highest optimizations to run on NVIDIA GPUs, including the NVIDIA Developer Kits.
The {ref}`inference module` leverages TensorRT among other backends, and provides the ability to execute multiple inferences in parallel.
diff --git a/docs/sdk_installation.md b/docs/sdk_installation.md
index 19639950..7205b595 100644
--- a/docs/sdk_installation.md
+++ b/docs/sdk_installation.md
@@ -11,14 +11,14 @@ An alternative for the [IGX Orin Developer Kit](https://www.nvidia.com/en-us/edg
## Prerequisites
`````{tab-set}
-````{tab-item} Holoscan Developer Kits (aarch64)
+````{tab-item} NVIDIA Developer Kits
Set up your developer kit:
Developer Kit | User Guide | OS | GPU Mode
------------- | ---------- | --- | ---
[NVIDIA IGX Orin][igx] | [Guide][igx-guide] | [IGX Software][igx-sw] 1.0 DP | iGPU **or*** dGPU
-[NVIDIA Jetson AGX Orin and Orin Nano][jetson-orin] | [Guide][jetson-guide] | [JetPack][jp] 6.0 | iGPU
+[NVIDIA Jetson AGX Orin and Orin Nano][jetson-orin] | [Guide][jetson-guide] | [JetPack][jp] 6.0 | iGPU
[NVIDIA Clara AGX][clara-agx]
_Only supporting the NGC container_ | [Guide][clara-guide] | [HoloPack][sdkm] 1.2 | iGPU **or*** dGPU
[clara-agx]: https://www.nvidia.com/en-gb/clara/intelligent-medical-instruments
@@ -35,14 +35,33 @@ Developer Kit | User Guide | OS | GPU Mode
_* iGPU and dGPU can be used concurrently on a single developer kit in dGPU mode. See [details here](./use_igpu_with_dgpu.md)._
````
-````{tab-item} x86_64
-
-You'll need the following to use the Holoscan SDK on x86_64:
-- OS: Ubuntu 22.04 (GLIBC >= 2.35)
-- NVIDIA discrete GPU (dGPU)
- - Ampere or above recommended for best performance
- - [Quadro/NVIDIA RTX](https://www.nvidia.com/en-gb/design-visualization/desktop-graphics/) necessary for RDMA support
- - Tested with [NVIDIA Quadro RTX 6000](https://www.nvidia.com/content/dam/en-zz/Solutions/design-visualization/quadro-product-literature/quadro-rtx-6000-us-nvidia-704093-r4-web.pdf) and [NVIDIA RTX A6000](https://www.nvidia.com/en-us/design-visualization/rtx-a6000/)
+````{tab-item} NVIDIA SuperChips
+
+This version of the Holoscan SDK was tested on the Grace-Hopper SuperChip (GH200) with Ubuntu 22.04. Follow setup instructions [here](https://docs.nvidia.com/grace-ubuntu-install-guide.pdf).
+
+:::{attention}
+Display is not supported on SBSA/superchips. You can however do headless rendering with [HoloViz](./visualization.md#holoviz-operator) for example.
+:::
+
+````
+````{tab-item} x86_64 Workstations
+
+Supported x86_64 distributions:
+
+OS | NGC Container | Debian/RPM package | Python wheel | Build from source
+-- | ------------- | -------------- | ------------ | -----------------
+**Ubuntu 22.04** | Yes | Yes | Yes | Yes
+**RHEL 9.x** | Yes | No | No | No¹
+**Other Linux distros** | No² | No | No³ | No¹
+
+¹ Not formally tested or supported, but expected to work if building bare metal with the adequate dependencies.
+² Not formally tested or supported, but expected to work if [supported by the NVIDIA container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/supported-platforms.html).
+³ Not formally tested or supported, but expected to work if the glibc version of the distribution is 2.35 or above.
+
+NVIDIA discrete GPU (dGPU) requirements:
+- Ampere or above recommended for best performance
+- [Quadro/NVIDIA RTX](https://www.nvidia.com/en-gb/design-visualization/desktop-graphics/) necessary for GPUDirect RDMA support
+- Tested with [NVIDIA Quadro RTX 6000](https://www.nvidia.com/content/dam/en-zz/Solutions/design-visualization/quadro-product-literature/quadro-rtx-6000-us-nvidia-704093-r4-web.pdf) and [NVIDIA RTX A6000](https://www.nvidia.com/en-us/design-visualization/rtx-a6000/)
- [NVIDIA dGPU drivers](https://docs.nvidia.com/datacenter/tesla/tesla-installation-notes): 535 or above
````
@@ -60,7 +79,7 @@ We provide multiple ways to install and run the Holoscan SDK:
`````{tab-set}
````{tab-item} NGC Container
-- **dGPU** (x86_64, IGX Orin dGPU, Clara AGX dGPU)
+- **dGPU** (x86_64, IGX Orin dGPU, Clara AGX dGPU, GH200)
```bash
docker pull nvcr.io/nvidia/clara-holoscan/holoscan:v1.0.3-dgpu
```
@@ -74,6 +93,7 @@ See details and usage instructions on [NGC][container].
- **IGX Orin**: Ensure the [compute stack is pre-installed](https://docs.nvidia.com/igx-orin/user-guide/latest/base-os.html#installing-the-compute-stack).
- **Jetson**: Install the latest [CUDA keyring package](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/#network-repo-installation-for-ubuntu) for `ubuntu2204/arm64`.
+- **GH200**: Install the latest [CUDA keyring package](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/#network-repo-installation-for-ubuntu) for `ubuntu2204/sbsa`.
- **x86_64**: Install the latest [CUDA keyring package](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/#network-repo-installation-for-ubuntu) for `ubuntu2204/x86_64`.
Then, install the holoscan SDK:
@@ -97,7 +117,7 @@ pip install holoscan
See details and troubleshooting on [PyPI][pypi].
:::{note}
-For x86_64, ensure that the CUDA Runtime is installed, whether through [the CUDA Toolkit debian installation](https://developer.nvidia.com/cuda-12-2-2-download-archive?target_os=Linux&target_arch=x86_64&Distribution=Ubuntu&target_version=22.04) or with `python3 -m pip install nvidia-cuda-runtime-cu12`.
+For x86_64, ensure that the [CUDA Runtime is installed](https://developer.nvidia.com/cuda-12-2-2-download-archive?target_os=Linux&target_arch=x86_64&Distribution=Ubuntu&target_version=22.04).
:::
````
@@ -108,16 +128,14 @@ For x86_64, ensure that the CUDA Runtime is installed, whether through [the CUDA
### Not sure what to choose?
-- The [**Holoscan container image on NGC**][container] it the safest way to ensure all the dependencies are present with the expected versions (including Torch and ONNX Runtime). It is the simplest way to run the embedded examples, while still allowing you to create your own C++ and Python Holoscan application on top of it. These benefits come at a cost:
+- The [**Holoscan container image on NGC**][container] it the safest way to ensure all the dependencies are present with the expected versions (including Torch and ONNX Runtime), and should work on most Linux distributions. It is the simplest way to run the embedded examples, while still allowing you to create your own C++ and Python Holoscan application on top of it. These benefits come at a cost:
- large image size from the numerous (some of them optional) dependencies. If you need a lean runtime image, see {ref}`section below`.
- standard inconvenience that exist when using Docker, such as more complex run instructions for proper configuration.
- - supporting the CLI require more work than the other solutions at this time.
-- If you are confident in your ability to manage dependencies on your own in your host environment, the **Holoscan Debian package** should provide all the capabilities needed to use the Holoscan SDK.
-- If you are not interested in the C++ API but just need to work in Python, or want to use a different version than Python 3.10, you can use the [**Holoscan python wheels**][pypi] on PyPI. While they are the easiest solution to install the SDK, it might require the most work to setup your environment with extra dependencies based on your needs.
+- If you are confident in your ability to manage dependencies on your own in your host environment, the **Holoscan Debian package** should provide all the capabilities needed to use the Holoscan SDK, assuming you are on Ubuntu 22.04.
+- If you are not interested in the C++ API but just need to work in Python, or want to use a different version than Python 3.10, you can use the [**Holoscan python wheels**][pypi] on PyPI. While they are the easiest solution to install the SDK, it might require the most work to setup your environment with extra dependencies based on your needs. Finally, they are only formally supported on Ubuntu 22.04, though should support other linux distributions with glibc 2.35 or above.
| | NGC dev Container | Debian Package | Python Wheels |
|---|:---:|:---:|:---:|
-| | | | |
| Runtime libraries | **Included** | **Included** | **Included** |
| Python module | 3.10 | 3.10 | **3.8 to 3.11** |
| C++ headers and
CMake config | **Included** | **Included** | N/A |
@@ -131,7 +149,7 @@ For x86_64, ensure that the CUDA Runtime is installed, whether through [the CUDA
| [Torch][torch] support [^7] | **Included** | require manual [^8]
installation | require manual [^8]
installation |
| [ONNX Runtime][ort] support [^9] | **Included** | require manual [^10]
installation | require manual [^10]
installation |
| [MOFED][mofed] support [^11] | **User space included**
Install kernel drivers on the host | require manual
installation | require manual
installation |
-| [CLI] support | needs docker dind
with buildx plugin
on top of the image | needs docker w/
buildx plugin | needs docker w/
buildx plugin |
+| [CLI] support | **Included** | needs docker w/
buildx plugin | needs docker w/
buildx plugin |
[examples]: https://github.com/nvidia-holoscan/holoscan-sdk/blob/main/examples#readme
[data]: https://catalog.ngc.nvidia.com/orgs/nvidia/teams/clara-holoscan/collections/clara_holoscan
@@ -153,7 +171,7 @@ For x86_64, ensure that the CUDA Runtime is installed, whether through [the CUDA
[^8]: To install LibTorch and TorchVision, either build them from source, download our [pre-built packages](https://edge.urm.nvidia.com/artifactory/sw-holoscan-thirdparty-generic-local/), or copy them from the holoscan container (in `/opt`).
[^9]: ONNXRuntime 1.15.1+ needed for the Inference operator. Note that ONNX models are also supported through the TensoRT backend of the Inference Operator.
[^10]: To install ONNXRuntime, either build it from source, download our [pre-built package](https://edge.urm.nvidia.com/artifactory/sw-holoscan-thirdparty-generic-local/) with CUDA 12 and TensoRT execution provider support, or copy it from the holoscan container (in `/opt/onnxruntime`).
-[^11]: Tested with MOFED 23.07
+[^11]: Tested with MOFED 23.10
### Need more control over the SDK?
diff --git a/docs/set_up_gpudirect_rdma.md b/docs/set_up_gpudirect_rdma.md
index ca5a458f..55fbd039 100644
--- a/docs/set_up_gpudirect_rdma.md
+++ b/docs/set_up_gpudirect_rdma.md
@@ -19,7 +19,7 @@ The following steps are required to ensure your ConnectX can be used for RDMA ov
### 1. Install MOFED drivers
-Ensure the Mellanox OFED drivers version 23.07 or above are installed:
+Ensure the Mellanox OFED drivers version 23.10 or above are installed:
```bash
cat /sys/module/mlx5_core/version
@@ -30,7 +30,7 @@ If not installed, or an older version, install the appropriate version from the
```bash
# You can choose different versions/OS or download directly from the
# Download Center in the webpage linked above
-MOFED_VERSION="23.07-0.5.1.2"
+MOFED_VERSION="23.10-2.1.3.1"
OS="ubuntu22.04"
MOFED_PACKAGE="MLNX_OFED_LINUX-${MOFED_VERSION}-${OS}-$(uname -m)"
wget --progress=dot:giga https://www.mellanox.com/downloads/ofed/MLNX_OFED-${MOFED_VERSION}/${MOFED_PACKAGE}.tgz
diff --git a/docs/use_igpu_with_dgpu.md b/docs/use_igpu_with_dgpu.md
index 5062c6e5..21d4c3b9 100644
--- a/docs/use_igpu_with_dgpu.md
+++ b/docs/use_igpu_with_dgpu.md
@@ -1,6 +1,6 @@
-# Use both Integrated and Discrete GPUs on Holoscan developer kits
+# Use both Integrated and Discrete GPUs on NVIDIA Developer Kits
-Holoscan developer kits like the [NVIDIA IGX Orin](https://www.nvidia.com/en-us/edge-computing/products/igx/) or the [NVIDIA Clara AGX](https://www.nvidia.com/en-gb/clara/intelligent-medical-instruments/) have both a discrete GPU (dGPU - optional on IGX Orin) and an integrated GPU (iGPU - Tegra SoC).
+NVIDIA Developer Kits like the [NVIDIA IGX Orin](https://www.nvidia.com/en-us/edge-computing/products/igx/) or the [NVIDIA Clara AGX](https://www.nvidia.com/en-gb/clara/intelligent-medical-instruments/) have both a discrete GPU (dGPU - optional on IGX Orin) and an integrated GPU (iGPU - Tegra SoC).
As of this release, when these developer kits are flashed to leverage the dGPU, there are two limiting factors preventing the use of the iGPU:
diff --git a/examples/CMakeLists.min.txt.in b/examples/CMakeLists.min.txt.in
index 89a57a7d..d1e50e95 100644
--- a/examples/CMakeLists.min.txt.in
+++ b/examples/CMakeLists.min.txt.in
@@ -18,7 +18,11 @@ project(holoscan_examples)
# Finds the package holoscan
find_package(holoscan REQUIRED CONFIG
- PATHS "/opt/nvidia/holoscan" "/workspace/holoscan-sdk/install")
+ PATHS "/opt/nvidia/holoscan" "/workspace/holoscan-sdk/install"
+ "/workspace/holoscan-sdk/install-x86_64"
+ "/workspace/holoscan-sdk/install-aarch64-dgpu"
+ "/workspace/holoscan-sdk/install-aarch64-igpu"
+ )
# Enable testing
include(CTest)
diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt
index 0d172fe4..2c1f7944 100644
--- a/examples/CMakeLists.txt
+++ b/examples/CMakeLists.txt
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -50,13 +50,38 @@ endforeach()
configure_file(CMakeLists.min.txt.in CMakeLists.min.txt @ONLY)
install(FILES "${CMAKE_CURRENT_BINARY_DIR}/CMakeLists.min.txt"
- RENAME "CMakeLists.txt"
- DESTINATION examples
- COMPONENT holoscan-examples
+ RENAME "CMakeLists.txt"
+ DESTINATION examples
+ COMPONENT holoscan-examples
)
install(FILES README.md
- DESTINATION examples
- COMPONENT holoscan-examples
+ DESTINATION examples
+ COMPONENT holoscan-examples
)
+
+# Install CMake script to download example data from NGC
+install(FILES ${CMAKE_SOURCE_DIR}/scripts/download_example_data
+ DESTINATION examples
+ PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ
+ COMPONENT holoscan-examples
+)
+
+# Files for testing
+install(FILES testing/run_example_tests
+ DESTINATION examples/testing
+ COMPONENT holoscan-examples
+ PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ
+)
+
+install(DIRECTORY ${CMAKE_SOURCE_DIR}/tests/data/validation_frames
+ DESTINATION examples/testing
+ COMPONENT holoscan-examples
+)
+
+install(FILES ${CMAKE_SOURCE_DIR}/tests/recorder.hpp
+ DESTINATION examples/testing
+ COMPONENT holoscan-examples
+)
+
endif()
diff --git a/examples/README.md b/examples/README.md
index e36c1a48..150bdd31 100644
--- a/examples/README.md
+++ b/examples/README.md
@@ -1,8 +1,45 @@
# Holoscan SDK Examples
-This directory contains examples to help users learn how to use the Holoscan SDK for development. See [HoloHub](https://nvidia-holoscan.github.io/holohub) to find additional reference applications.
+This directory contains examples to help users learn how to use the Holoscan SDK for development.
+See [HoloHub](https://nvidia-holoscan.github.io/holohub) to find additional reference applications.
-## Core
+## Build instructions
+
+- **From source**: See the [building guide](../DEVELOP.md)
+- **Python wheels**: Download the python examples from GitHub, no building necessary.
+- **NGC container or debian package**: the python examples and pre-built C++ examples are already included under `/opt/nvidia/holoscan/examples`. You can rebuild the C++ examples like so:
+
+ ```sh
+ export src_dir="/opt/nvidia/holoscan/examples/" # Add "/cpp" to build a specific example
+ export build_dir="/opt/nvidia/holoscan/examples/build` # Or the path of your choice
+ cmake -S $src_dir -B $build_dir
+ cmake --build $build_dir -j
+ ```
+
+## Run instructions
+
+See the README of each example for specific run instructions based on your installation type.
+
+## Test instructions
+
+- **From source**: See the [building guide](../DEVELOP.md#testing)
+- **Python wheels**: not available
+- **NGC container or debian package**:
+ - Running the following command will run the examples and compare the results with expected baselines.
+
+ ```sh
+ ctest --test-dir $build_dir
+ ```
+
+ - To group building and testing:
+
+ ```sh
+ /opt/nvidia/holoscan/examples/testing/run_example_tests
+ ```
+
+## Example list
+
+### Core
The following examples demonstrate the basics of the Holoscan core API, and are ideal for new users starting with the SDK:
@@ -40,22 +77,21 @@ The following examples illustrate the use of specific resource classes that can
## Inference
* [**Bring-Your-Own-Model**](bring_your_own_model): create a simple inference pipeline for ML applications
-
-## Working with third-party frameworks
+### Working with third-party frameworks
The following examples demonstrate how to seamlessly leverage third-party frameworks in holoscan applications:
* [**NumPy native**](numpy_native): signal processing on the CPU using numpy arrays
* [**CuPy native**](cupy_native): basic computation on the GPU using cupy arrays
-## Sensors
+### Sensors
The following examples demonstrate how sensors can be used as input streams to your holoscan applications:
* [**v4l2 camera**](v4l2_camera): for USB and HDMI input, such as USB cameras or HDMI output of laptop
* [**AJA capture**](aja_capture): for AJA capture cards
-## GXF and Holoscan
+### GXF and Holoscan
* [**Tensor interop**](tensor_interop): use the `Entity` message to pass tensors to/from Holoscan operators wrapping GXF codelets in Holoscan applications
* [**Wrap operator as GXF extension**](wrap_operator_as_gxf_extension): wrap Holoscan native operators as GXF codelets to use in GXF applications
diff --git a/examples/bring_your_own_model/README.md b/examples/bring_your_own_model/README.md
index 6211223c..e60cbe9d 100644
--- a/examples/bring_your_own_model/README.md
+++ b/examples/bring_your_own_model/README.md
@@ -27,7 +27,7 @@ through how to modify the python example code to run the application with an ult
```
* **using deb package install**:
```bash
- # [Prerequisite] Download NGC dataset above to `DATA_DIR`
+ /opt/nvidia/holoscan/examples/download_example_data
export HOLOSCAN_INPUT_PATH=
export PYTHONPATH=/opt/nvidia/holoscan/python/lib
# Need to enable write permission in the model directory to write the engine file (use with caution)
diff --git a/examples/conditions/asynchronous/README.md b/examples/conditions/asynchronous/README.md
index 7483df37..90415441 100644
--- a/examples/conditions/asynchronous/README.md
+++ b/examples/conditions/asynchronous/README.md
@@ -10,7 +10,8 @@ There are two operators involved in this example:
The transmit operator will be asynchronous if `async_transmit: true` in `ping_async.yaml`.
The receive operator will be asynchronous if `async_receive: true` in `ping_async.yaml`.
-The multi-threaded scheduler will be used if `multithreaded: true` in `ping_async.yaml`.
+
+The scheduler to be used can be set via the `scheduler` entry in `ping_async.yaml`. It defaults to `event_based` (an event-based multi-thread scheduler), but can also be set to either `multi_thread` (polling-based) or `greedy` (single thread).
*Visit the [SDK User Guide](https://docs.nvidia.com/holoscan/sdk-user-guide/components/conditions.html) to learn more about the Asynchronous Condition.*
diff --git a/examples/conditions/asynchronous/cpp/CMakeLists.min.txt b/examples/conditions/asynchronous/cpp/CMakeLists.min.txt
index 7633569a..0d25e198 100644
--- a/examples/conditions/asynchronous/cpp/CMakeLists.min.txt
+++ b/examples/conditions/asynchronous/cpp/CMakeLists.min.txt
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the \"License\");
@@ -33,6 +33,14 @@ target_link_libraries(ping_async
holoscan::ops::ping_tx
)
+# Copy config file to the build tree
+add_custom_target(ping_async_yaml
+ COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_CURRENT_SOURCE_DIR}/ping_async.yaml" ${CMAKE_CURRENT_BINARY_DIR}
+ DEPENDS "ping_async.yaml"
+ BYPRODUCTS "ping_async.yaml"
+)
+add_dependencies(ping_async ping_async_yaml)
+
# Testing
if(BUILD_TESTING)
add_test(NAME EXAMPLE_CPP_PING_ASYNC_TEST
diff --git a/examples/conditions/asynchronous/cpp/ping_async.cpp b/examples/conditions/asynchronous/cpp/ping_async.cpp
index bb364352..0e9f1f65 100644
--- a/examples/conditions/asynchronous/cpp/ping_async.cpp
+++ b/examples/conditions/asynchronous/cpp/ping_async.cpp
@@ -1,5 +1,5 @@
/*
- * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,6 +15,8 @@
* limitations under the License.
*/
+#include
+
#include
#include
#include
@@ -72,13 +74,22 @@ int main(int argc, char** argv) {
app->set_async_receive(async_receive);
app->set_async_transmit(async_transmit);
- bool multithreaded = app->from_config("multithreaded").as();
- if (multithreaded) {
+ std::string scheduler = app->from_config("scheduler").as();
+ holoscan::ArgList scheduler_args{holoscan::Arg("stop_on_deadlock", true),
+ holoscan::Arg("stop_on_deadlock_timeout", 500L)};
+ if (scheduler == "multi_thread") {
// use MultiThreadScheduler instead of the default GreedyScheduler
- app->scheduler(app->make_scheduler(
- "multithread-scheduler",
- holoscan::Arg("stop_on_deadlock", true),
- holoscan::Arg("stop_on_deadlock_timeout", 500L)));
+ app->scheduler(app->make_scheduler("MTS", scheduler_args));
+ } else if (scheduler == "event_based") {
+ // use EventBasedScheduler instead of the default GreedyScheduler
+ app->scheduler(app->make_scheduler("EBS", scheduler_args));
+ } else if (scheduler == "greedy") {
+ app->scheduler(app->make_scheduler("GS", scheduler_args));
+ } else if (scheduler != "default") {
+ throw std::runtime_error(fmt::format(
+ "unrecognized scheduler option '{}', should be one of {'multi_thread', 'event_based', "
+ "'greedy', 'default'}",
+ scheduler));
}
// run the application
diff --git a/examples/conditions/asynchronous/cpp/ping_async.yaml b/examples/conditions/asynchronous/cpp/ping_async.yaml
index 7b7bd900..a5d6b1b9 100644
--- a/examples/conditions/asynchronous/cpp/ping_async.yaml
+++ b/examples/conditions/asynchronous/cpp/ping_async.yaml
@@ -1,5 +1,5 @@
%YAML 1.2
-# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -18,4 +18,4 @@
async_receive: true
async_transmit: false
-multithreaded: true
+scheduler: event_based # multi_thread, event_based or greedy
diff --git a/examples/hello_world/cpp/hello_world.cpp b/examples/hello_world/cpp/hello_world.cpp
index e8277316..38e943b9 100644
--- a/examples/hello_world/cpp/hello_world.cpp
+++ b/examples/hello_world/cpp/hello_world.cpp
@@ -1,5 +1,5 @@
/*
- * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -26,8 +26,7 @@ class HelloWorldOp : public Operator {
HelloWorldOp() = default;
- void setup(OperatorSpec& spec) override {
- }
+ void setup(OperatorSpec& spec) override {}
void compute(InputContext& op_input, OutputContext& op_output,
ExecutionContext& context) override {
@@ -39,7 +38,6 @@ class HelloWorldOp : public Operator {
} // namespace holoscan::ops
-
class HelloWorldApp : public holoscan::Application {
public:
void compose() override {
diff --git a/examples/holoviz/README.md b/examples/holoviz/README.md
index 01db36ad..ba35c76d 100644
--- a/examples/holoviz/README.md
+++ b/examples/holoviz/README.md
@@ -29,8 +29,8 @@ The following dataset is used by this example:
```
* **using deb package install**:
```bash
- # [Prerequisite] Download NGC dataset above to `DATA_DIR`
- export HOLOSCAN_INPUT_PATH=
+ /opt/nvidia/holoscan/examples/download_example_data
+ export HOLOSCAN_INPUT_PATH=/opt/nvidia/holoscan/data
python3 -m pip install numpy
python3 -m pip install cupy-cuda12x
export PYTHONPATH=/opt/nvidia/holoscan/python/lib
diff --git a/examples/holoviz/cpp/CMakeLists.min.txt b/examples/holoviz/cpp/CMakeLists.min.txt
index c0bb99d7..c49b345f 100644
--- a/examples/holoviz/cpp/CMakeLists.min.txt
+++ b/examples/holoviz/cpp/CMakeLists.min.txt
@@ -14,7 +14,7 @@
# limitations under the License.
cmake_minimum_required(VERSION 3.20)
-project(holoscan_hello_world CXX)
+project(holoviz_examples_cpp CXX)
# Finds the package holoscan
find_package(holoscan REQUIRED CONFIG
@@ -33,12 +33,63 @@ target_link_libraries(holoviz_geometry
# Testing
if(BUILD_TESTING)
+ set(RECORDING_DIR ${CMAKE_CURRENT_BINARY_DIR}/recording_output)
+ set(SOURCE_VIDEO_BASENAME holoviz_geometry_output)
+ set(VALIDATION_FRAMES_DIR ${CMAKE_SOURCE_DIR}/testing/validation_frames/holoviz_geometry/)
+
+ file(MAKE_DIRECTORY ${RECORDING_DIR})
+
+ # Patch the current example to enable recording the rendering window
+ add_custom_command(OUTPUT holoviz_geometry_test.cpp
+ PRE_LINK
+ COMMAND patch -u -o holoviz_geometry_test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/holoviz_geometry.cpp
+ ${CMAKE_SOURCE_DIR}/testing/validation_frames/holoviz_geometry/cpp_holoviz_geometry.patch
+ )
+
+ # Create the test executable
+ add_executable(holoviz_geometry_test
+ holoviz_geometry_test.cpp
+ )
+
+ target_include_directories(holoviz_geometry_test
+ PRIVATE ${CMAKE_SOURCE_DIR}/testing)
+
+ target_compile_definitions(holoviz_geometry_test
+ PRIVATE RECORD_OUTPUT RECORDING_DIR="${RECORDING_DIR}"
+ PRIVATE SOURCE_VIDEO_BASENAME="${SOURCE_VIDEO_BASENAME}"
+ )
+
+ target_link_libraries(holoviz_geometry_test
+ PRIVATE
+ holoscan::core
+ holoscan::ops::holoviz
+ holoscan::ops::video_stream_replayer
+ holoscan::ops::video_stream_recorder
+ holoscan::ops::format_converter
+ )
+
+ # Add the test and make sure it runs
add_test(NAME EXAMPLE_CPP_HOLOVIZ_GEOMETRY_TEST
- COMMAND ${CMAKE_CURRENT_BINARY_DIR}/holoviz_geometry --count 10
+ COMMAND ${CMAKE_CURRENT_BINARY_DIR}/holoviz_geometry_test --count 10
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
)
set_tests_properties(EXAMPLE_CPP_HOLOVIZ_GEOMETRY_TEST PROPERTIES
PASS_REGULAR_EXPRESSION "Received camera pose:"
PASS_REGULAR_EXPRESSION "Reach end of file or playback count reaches to the limit. Stop ticking."
)
+
+ # Add a test to check the validity of the frames
+ add_test(NAME EXAMPLE_CPP_HOLOVIZ_GEOMETRY_RENDER_TEST
+ COMMAND python3 ${CMAKE_SOURCE_DIR}/../bin/video_validation.py
+ --source_video_dir ${RECORDING_DIR}
+ --source_video_basename ${SOURCE_VIDEO_BASENAME}
+ --output_dir ${RECORDING_DIR}
+ --validation_frames_dir ${VALIDATION_FRAMES_DIR}
+ WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
+ )
+
+ set_tests_properties(EXAMPLE_CPP_HOLOVIZ_GEOMETRY_RENDER_TEST PROPERTIES
+ DEPENDS EXAMPLE_CPP_HOLOVIZ_GEOMETRY_TEST
+ PASS_REGULAR_EXPRESSION "Valid video output!"
+ )
endif()
diff --git a/examples/holoviz/cpp/CMakeLists.txt b/examples/holoviz/cpp/CMakeLists.txt
index 4ee8d796..5af45cfb 100644
--- a/examples/holoviz/cpp/CMakeLists.txt
+++ b/examples/holoviz/cpp/CMakeLists.txt
@@ -75,6 +75,9 @@ if(HOLOSCAN_BUILD_TESTS)
holoviz_geometry_test.cpp
)
+ target_include_directories(holoviz_geometry_test
+ PRIVATE ${CMAKE_SOURCE_DIR}/tests)
+
target_compile_definitions(holoviz_geometry_test
PRIVATE RECORD_OUTPUT RECORDING_DIR="${RECORDING_DIR}"
PRIVATE SOURCE_VIDEO_BASENAME="${SOURCE_VIDEO_BASENAME}"
diff --git a/examples/holoviz/cpp/holoviz_geometry.cpp b/examples/holoviz/cpp/holoviz_geometry.cpp
index 5778b18e..d527f55a 100644
--- a/examples/holoviz/cpp/holoviz_geometry.cpp
+++ b/examples/holoviz/cpp/holoviz_geometry.cpp
@@ -240,6 +240,15 @@ class HolovizGeometryApp : public holoscan::Application {
void compose() override {
using namespace holoscan;
+ ArgList args;
+ auto data_directory = std::getenv("HOLOSCAN_INPUT_PATH");
+ if (data_directory != nullptr && data_directory[0] != '\0') {
+ auto video_directory = std::filesystem::path(data_directory);
+ video_directory /= "racerx";
+ args.add(Arg("directory", video_directory.string()));
+ HOLOSCAN_LOG_INFO("Using video from {}", video_directory.string());
+ }
+
// Define the replayer, geometry source and holoviz operators
auto replayer =
make_operator("replayer",
@@ -248,7 +257,8 @@ class HolovizGeometryApp : public holoscan::Application {
Arg("frame_rate", 0.f),
Arg("repeat", true),
Arg("realtime", true),
- Arg("count", count_));
+ Arg("count", count_),
+ args);
auto source = make_operator("source");
diff --git a/examples/holoviz/python/CMakeLists.min.txt b/examples/holoviz/python/CMakeLists.min.txt
new file mode 100644
index 00000000..5831ef1b
--- /dev/null
+++ b/examples/holoviz/python/CMakeLists.min.txt
@@ -0,0 +1,149 @@
+# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Holoviz Geometry Testing
+if(BUILD_TESTING)
+
+ # Testing holoviz_geometry
+ set(RECORDING_DIR ${CMAKE_CURRENT_BINARY_DIR}/recording_output)
+ set(SOURCE_VIDEO_BASENAME python_holoviz_geometry_output)
+ set(VALIDATION_FRAMES_DIR ${CMAKE_SOURCE_DIR}/testing/validation_frames/holoviz_geometry/)
+
+ file(MAKE_DIRECTORY ${RECORDING_DIR})
+
+ # Patch the current example to enable recording the rendering window
+ add_custom_command(OUTPUT holoviz_geometry_test.py
+ PRE_LINK
+ COMMAND patch -u -o holoviz_geometry_test.py ${CMAKE_CURRENT_SOURCE_DIR}/holoviz_geometry.py
+ ${CMAKE_SOURCE_DIR}/testing/validation_frames/holoviz_geometry/python_holoviz_geometry.patch
+ COMMAND sed -i "s#RECORDING_DIR#${RECORDING_DIR}#g" holoviz_geometry_test.py
+ COMMAND sed -i "s#SOURCE_VIDEO_BASENAME#${SOURCE_VIDEO_BASENAME}#g" holoviz_geometry_test.py
+ )
+
+ add_custom_target(python_holoviz_geometry_test ALL
+ DEPENDS "holoviz_geometry_test.py"
+ )
+
+ add_test(NAME EXAMPLE_PYTHON_HOLOVIZ_GEOMETRY_TEST
+ COMMAND python3 holoviz_geometry_test.py --count 10
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ )
+
+ set_tests_properties(EXAMPLE_PYTHON_HOLOVIZ_GEOMETRY_TEST PROPERTIES
+ PASS_REGULAR_EXPRESSION "Received camera pose:"
+ PASS_REGULAR_EXPRESSION "Reach end of file or playback count reaches to the limit. Stop ticking."
+ )
+
+ # Add a test to check the validity of the frames
+ add_test(NAME EXAMPLE_PYTHON_HOLOVIZ_GEOMETRY_RENDER_TEST
+ COMMAND python3 ${CMAKE_SOURCE_DIR}/../bin/video_validation.py
+ --source_video_dir ${RECORDING_DIR}
+ --source_video_basename ${SOURCE_VIDEO_BASENAME}
+ --output_dir ${RECORDING_DIR}
+ --validation_frames_dir ${VALIDATION_FRAMES_DIR}
+ WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
+ )
+
+ set_tests_properties(EXAMPLE_PYTHON_HOLOVIZ_GEOMETRY_RENDER_TEST PROPERTIES
+ DEPENDS EXAMPLE_PYTHON_HOLOVIZ_GEOMETRY_TEST
+ PASS_REGULAR_EXPRESSION "Valid video output!"
+ )
+
+ # Testing holoviz_geometry_3d
+ set(SOURCE_VIDEO_BASENAME python_holoviz_geometry_3d_output)
+ set(VALIDATION_FRAMES_DIR ${CMAKE_SOURCE_DIR}/testing/validation_frames/holoviz_geometry_3d/)
+
+ file(MAKE_DIRECTORY ${RECORDING_DIR})
+
+ # Patch the current example to enable recording the rendering window
+ add_custom_command(OUTPUT holoviz_geometry_3d_test.py
+ PRE_LINK
+ COMMAND patch -u -o holoviz_geometry_3d_test.py ${CMAKE_CURRENT_SOURCE_DIR}/holoviz_geometry_3d.py
+ ${CMAKE_SOURCE_DIR}/testing/validation_frames/holoviz_geometry_3d/python_holoviz_geometry_3d.patch
+ COMMAND sed -i "s#RECORDING_DIR#${RECORDING_DIR}#g" holoviz_geometry_3d_test.py
+ COMMAND sed -i "s#SOURCE_VIDEO_BASENAME#${SOURCE_VIDEO_BASENAME}#g" holoviz_geometry_3d_test.py
+ )
+
+ add_custom_target(python_holoviz_geometry_3d_test ALL
+ DEPENDS "holoviz_geometry_3d_test.py"
+ )
+
+ add_test(NAME EXAMPLE_PYTHON_HOLOVIZ_GEOMETRY_3D_TEST
+ COMMAND python3 holoviz_geometry_3d_test.py --count 10
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ )
+
+ set_tests_properties(EXAMPLE_PYTHON_HOLOVIZ_GEOMETRY_3D_TEST PROPERTIES
+ PASS_REGULAR_EXPRESSION "Scheduler finished."
+ )
+
+ # Add a test to check the validity of the frames
+ add_test(NAME EXAMPLE_PYTHON_HOLOVIZ_GEOMETRY_3D_RENDER_TEST
+ COMMAND python3 ${CMAKE_SOURCE_DIR}/../bin/video_validation.py
+ --source_video_dir ${RECORDING_DIR}
+ --source_video_basename ${SOURCE_VIDEO_BASENAME}
+ --output_dir ${RECORDING_DIR}
+ --validation_frames_dir ${VALIDATION_FRAMES_DIR}
+ WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
+ )
+
+ set_tests_properties(EXAMPLE_PYTHON_HOLOVIZ_GEOMETRY_3D_RENDER_TEST PROPERTIES
+ DEPENDS EXAMPLE_PYTHON_HOLOVIZ_GEOMETRY_3D_TEST
+ PASS_REGULAR_EXPRESSION "Valid video output!"
+ )
+
+ # Testing holoviz_views
+ set(SOURCE_VIDEO_BASENAME python_holoviz_views_output)
+ set(VALIDATION_FRAMES_DIR ${CMAKE_SOURCE_DIR}/testing/validation_frames/holoviz_views/)
+
+ file(MAKE_DIRECTORY ${RECORDING_DIR})
+
+ # Patch the current example to enable recording the rendering window
+ add_custom_command(OUTPUT holoviz_views_test.py
+ PRE_LINK
+ COMMAND patch -u -o holoviz_views_test.py ${CMAKE_CURRENT_SOURCE_DIR}/holoviz_views.py
+ ${CMAKE_SOURCE_DIR}/testing/validation_frames/holoviz_views/python_holoviz_views.patch
+ COMMAND sed -i "s#RECORDING_DIR#${RECORDING_DIR}#g" holoviz_views_test.py
+ COMMAND sed -i "s#SOURCE_VIDEO_BASENAME#${SOURCE_VIDEO_BASENAME}#g" holoviz_views_test.py
+ )
+
+ add_custom_target(python_holoviz_views_test ALL
+ DEPENDS "holoviz_views_test.py"
+ )
+
+ add_test(NAME EXAMPLE_PYTHON_HOLOVIZ_VIEWS_TEST
+ COMMAND python3 holoviz_views_test.py --count 10
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ )
+
+ set_tests_properties(EXAMPLE_PYTHON_HOLOVIZ_VIEWS_TEST PROPERTIES
+ PASS_REGULAR_EXPRESSION "Reach end of file or playback count reaches to the limit. Stop ticking."
+ )
+
+ # Add a test to check the validity of the frames
+ add_test(NAME EXAMPLE_PYTHON_HOLOVIZ_VIEWS_RENDER_TEST
+ COMMAND python3 ${CMAKE_SOURCE_DIR}/../bin/video_validation.py
+ --source_video_dir ${RECORDING_DIR}
+ --source_video_basename ${SOURCE_VIDEO_BASENAME}
+ --output_dir ${RECORDING_DIR}
+ --validation_frames_dir ${VALIDATION_FRAMES_DIR}
+ WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
+ )
+
+ set_tests_properties(EXAMPLE_PYTHON_HOLOVIZ_VIEWS_RENDER_TEST PROPERTIES
+ DEPENDS EXAMPLE_PYTHON_HOLOVIZ_VIEWS_TEST
+ PASS_REGULAR_EXPRESSION "Valid video output!"
+ )
+endif()
diff --git a/examples/holoviz/python/CMakeLists.txt b/examples/holoviz/python/CMakeLists.txt
index 7597897b..ffb99c7f 100644
--- a/examples/holoviz/python/CMakeLists.txt
+++ b/examples/holoviz/python/CMakeLists.txt
@@ -95,6 +95,13 @@ install(FILES
COMPONENT "holoscan-examples"
)
+# Install the minimal CMakeLists.txt file
+install(FILES CMakeLists.min.txt
+ RENAME "CMakeLists.txt"
+ DESTINATION "${app_relative_dest_path}"
+ COMPONENT holoscan-examples
+)
+
# Holoviz Geometry 3D Testing
if(HOLOSCAN_BUILD_TESTS)
diff --git a/examples/multithread/README.md b/examples/multithread/README.md
index 29c2c742..88bd252d 100644
--- a/examples/multithread/README.md
+++ b/examples/multithread/README.md
@@ -33,11 +33,11 @@ Then, run:
./examples/multithread/cpp/multithread
```
-To run with the default, greedy single-threaded scheduler, set `multithread: false` in `app_config.yaml`.
+For the C++ application, the scheduler to be used can be set via the `scheduler` entry in `ping_async.yaml`. It defaults to `event_based` (an event-based multithread scheduler), but can also be set to either `multi_thread` (polling-based) or `greedy` (single thread).
## Python API
-- `multithread.py`: This example demonstrates how to configure and use a multi-threaded scheduler instead of the default single-threaded one. It involves three operators as described for the C++ API example described above. The primary difference is that instead of using a YAML file for the configuration variables, all values are set via the command line. Call the script below with the `--help` option to get a full description of the command line parameters.
+- `multithread.py`: This example demonstrates how to configure and use a multi-threaded scheduler instead of the default single-threaded one. It involves three operators as described for the C++ API example described above. The primary difference is that instead of using a YAML file for the configuration variables, all values are set via the command line. Call the script below with the `--help` option to get a full description of the command line parameters. By default a polling-based multithread scheduler will be used, but if `--event-based` is specified, the event-based multithread scheduler will be used instead.
### Build instructions
@@ -50,5 +50,5 @@ First, go in your `build` or `install` directory (automatically done by `./run l
Then, run the app with the options of your choice. For example, to use 8 worker threads to run 32 delay operators with delays ranging linearly from 0.2 to (0.2 + 0.05 * 31), one would set:
```bash
-python3 ./examples/multithread/python/multithread.py --threads 8 --num_delay_ops 32 --delay 0.2 --delay_step 0.05
+python3 ./examples/multithread/python/multithread.py --threads 8 --num_delay_ops 32 --delay 0.2 --delay_step 0.05 --event-based
```
diff --git a/examples/multithread/cpp/multithread.cpp b/examples/multithread/cpp/multithread.cpp
index 0c2d85fa..3abae8c9 100644
--- a/examples/multithread/cpp/multithread.cpp
+++ b/examples/multithread/cpp/multithread.cpp
@@ -1,5 +1,5 @@
/*
- * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -158,11 +158,23 @@ int main(int argc, char** argv) {
app->set_delay(delay);
app->set_delay_step(delay_step);
- bool multithreaded = app->from_config("multithreaded").as();
- if (multithreaded) {
+ std::string scheduler = app->from_config("scheduler").as();
+ if (scheduler == "multi_thread") {
// use MultiThreadScheduler instead of the default GreedyScheduler
app->scheduler(app->make_scheduler(
- "multithread-scheduler", app->from_config("scheduler")));
+ "multithread-scheduler", app->from_config("multi_thread_scheduler")));
+ } else if (scheduler == "event_based") {
+ // use EventBasedScheduler instead of the default GreedyScheduler
+ app->scheduler(app->make_scheduler(
+ "event-based-scheduler", app->from_config("event_based_scheduler")));
+ } else if (scheduler == "greedy") {
+ app->scheduler(app->make_scheduler(
+ "greedy-scheduler", app->from_config("greedy_scheduler")));
+ } else if (scheduler != "default") {
+ throw std::runtime_error(fmt::format(
+ "unrecognized scheduler option '{}', should be one of {'multi_thread', 'event_based', "
+ "'greedy', 'default'}",
+ scheduler));
}
app->run();
diff --git a/examples/multithread/cpp/multithread.yaml b/examples/multithread/cpp/multithread.yaml
index b59c39af..76c6472c 100644
--- a/examples/multithread/cpp/multithread.yaml
+++ b/examples/multithread/cpp/multithread.yaml
@@ -1,5 +1,5 @@
%YAML 1.2
-# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -17,12 +17,21 @@
extensions:
- libgxf_std.so
-multithreaded: true
+scheduler: event_based # event_based, multi_thread or greedy
num_delay_ops: 32
delay: 0.1
delay_step: 0.01
-scheduler:
+greedy_scheduler:
+ stop_on_deadlock: true
+ stop_on_deadlock_timeout: 500
+
+multi_thread_scheduler:
+ worker_thread_number: 8
+ stop_on_deadlock: true
+ stop_on_deadlock_timeout: 500
+
+event_based_scheduler:
worker_thread_number: 8
stop_on_deadlock: true
stop_on_deadlock_timeout: 500
diff --git a/examples/multithread/python/CMakeLists.txt b/examples/multithread/python/CMakeLists.txt
index 0a1d3917..1d18fc5d 100644
--- a/examples/multithread/python/CMakeLists.txt
+++ b/examples/multithread/python/CMakeLists.txt
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -38,4 +38,11 @@ if(HOLOSCAN_BUILD_TESTS)
)
set_tests_properties(EXAMPLE_PYTHON_MULTITHREAD_TEST PROPERTIES
PASS_REGULAR_EXPRESSION "sum of received values: 496")
+
+ add_test(NAME EXAMPLE_PYTHON_EVENT_BASED_TEST
+ COMMAND python3 multithread.py --event_based
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ )
+ set_tests_properties(EXAMPLE_PYTHON_MULTITHREAD_TEST PROPERTIES
+ PASS_REGULAR_EXPRESSION "sum of received values: 496")
endif()
diff --git a/examples/multithread/python/multithread.py b/examples/multithread/python/multithread.py
index 787051ab..4f6aaaf8 100644
--- a/examples/multithread/python/multithread.py
+++ b/examples/multithread/python/multithread.py
@@ -21,7 +21,7 @@
from holoscan.conditions import CountCondition
from holoscan.core import Application, Operator, OperatorSpec
-from holoscan.schedulers import GreedyScheduler, MultiThreadScheduler
+from holoscan.schedulers import EventBasedScheduler, GreedyScheduler, MultiThreadScheduler
class PingTxOp(Operator):
@@ -119,13 +119,14 @@ def compose(self):
self.add_flow(d, rx, {("out_val", "values"), ("out_name", "names")})
-def main(threads, num_delays, delay, delay_step):
+def main(threads, num_delays, delay, delay_step, event_based):
app = ParallelPingApp(num_delays=num_delays, delay=delay, delay_step=delay_step)
if threads == 0:
# Explicitly setting GreedyScheduler is not strictly required as it is the default.
scheduler = GreedyScheduler(app, name="greedy_scheduler")
else:
- scheduler = MultiThreadScheduler(
+ scheduler_class = EventBasedScheduler if event_based else MultiThreadScheduler
+ scheduler = scheduler_class(
app,
worker_thread_number=threads,
stop_on_deadlock=True,
@@ -148,9 +149,10 @@ def main(threads, num_delays, delay, delay_step):
type=int,
default=-1,
help=(
- "The number of threads to use for the multi-threaded scheduler. Set this to 0 to use "
+ "The number of threads to use for multi-threaded schedulers. Set this to 0 to use "
"the default greedy scheduler instead. If set to -1, multiprocessing.cpu_count() "
- "threads will be used."
+ "threads will be used. To use the event-based scheduler instead of the default "
+ "multi-thread scheduler, please specify --event_based."
),
)
parser.add_argument(
@@ -181,6 +183,15 @@ def main(threads, num_delays, delay, delay_step):
"0 to (num_delay_ops - 1)."
),
)
+ parser.add_argument(
+ "--event_based",
+ action="store_true",
+ help=(
+ "Sets the application to use the event-based scheduler instead of the default "
+ "multi-thread scheduler when threads > 0."
+ ),
+ )
+
args = parser.parse_args()
if args.delay < 0:
raise ValueError("delay must be non-negative")
@@ -199,4 +210,5 @@ def main(threads, num_delays, delay, delay_step):
num_delays=args.num_delay_ops,
delay=args.delay,
delay_step=args.delay_step,
+ event_based=args.event_based,
)
diff --git a/examples/ping_distributed/README.md b/examples/ping_distributed/README.md
index 84c3a429..5e1a62b4 100644
--- a/examples/ping_distributed/README.md
+++ b/examples/ping_distributed/README.md
@@ -3,7 +3,8 @@
This example demonstrates a distributed ping application with two operators connected using add_flow().
There are two operators involved in this example:
- 1. a transmitter in Fragment 1 (`fragment1`), set to transmit a tensor map containing a single tensor named 'out' on its 'out' port.
+
+ 1. a transmitter in Fragment 1 (`fragment1`), set to transmit a tensor map containing a single tensor named 'out' on its 'out' port.
2. a receiver in Fragment 2 (`fragment2`) that prints the received names and shapes of any received tensors to the terminal
The `--gpu` command line argument can be provided to indicate that the tensor should be on the GPU instead of the host (CPU). The user can also override the default tensor shape and data type. Run the application with `-h` or `--help` to see full details of the additional supported arguments.
@@ -16,25 +17,24 @@ Please refer to the [user guide](https://docs.nvidia.com/holoscan/sdk-user-guide
### Prerequisites
-* **using deb package install**:
- ```bash
- # Set the application folder
- APP_DIR=/opt/nvidia/holoscan/examples/ping_distributed/cpp
- ```
+* **using deb package install or NGC container**:
-* **from NGC container**:
```bash
# Set the application folder
APP_DIR=/opt/nvidia/holoscan/examples/ping_distributed/cpp
```
+
* **source (dev container)**:
+
```bash
./run launch # optional: append `install` for install tree (default: `build`)
# Set the application folder
APP_DIR=./examples/ping_distributed/cpp
```
+
* **source (local env)**:
+
```bash
# Set the application folder
APP_DIR=${BUILD_OR_INSTALL_DIR}/examples/ping_distributed/cpp
@@ -65,41 +65,34 @@ Please refer to the [user guide](https://docs.nvidia.com/holoscan/sdk-user-guide
### Prerequisites
* **using python wheel**:
+
```bash
- # [Prerequisite] Download NGC dataset above to `DATA_DIR`
- export HOLOSCAN_INPUT_PATH=
# [Prerequisite] Download example .py file below to `APP_DIR`
# [Optional] Start the virtualenv where holoscan is installed
# Set the application folder
APP_DIR=
```
-* **using deb package install**:
- ```bash
- # [Prerequisite] Download NGC dataset above to `DATA_DIR` (e.g., `/opt/nvidia/data`)
- export HOLOSCAN_INPUT_PATH=
- export PYTHONPATH=/opt/nvidia/holoscan/python/lib
- # Set the application folder
- APP_DIR=/opt/nvidia/holoscan/examples/ping_distributed/python
- ```
-* **from NGC container**:
- ```bash
- # HOLOSCAN_INPUT_PATH is set to /opt/nvidia/data by default
+* **using deb package or NGC container**:
+ ```bash
# Set the application folder
APP_DIR=/opt/nvidia/holoscan/examples/ping_distributed/python
```
+
* **source (dev container)**:
+
```bash
./run launch # optional: append `install` for install tree (default: `build`)
# Set the application folder
APP_DIR=./examples/ping_distributed/python
```
+
* **source (local env)**:
+
```bash
- export HOLOSCAN_INPUT_PATH=${SRC_DIR}/data
export PYTHONPATH=${BUILD_OR_INSTALL_DIR}/python/lib
# Set the application folder
diff --git a/examples/ping_distributed/cpp/CMakeLists.txt b/examples/ping_distributed/cpp/CMakeLists.txt
index d3c8ab62..3fd08853 100644
--- a/examples/ping_distributed/cpp/CMakeLists.txt
+++ b/examples/ping_distributed/cpp/CMakeLists.txt
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -68,6 +68,7 @@ if(HOLOSCAN_BUILD_TESTS)
)
set_tests_properties(EXAMPLE_CPP_PING_DISTRIBUTED_TEST PROPERTIES
PASS_REGULAR_EXPRESSION "message 10: Tensor key: 'out', shape: \\(32, 64\\)"
+ FAIL_REGULAR_EXPRESSION "initialized independent of a parent entity"
)
add_test(NAME EXAMPLE_CPP_PING_DISTRIBUTED_GPU_TEST
@@ -76,6 +77,7 @@ if(HOLOSCAN_BUILD_TESTS)
)
set_tests_properties(EXAMPLE_CPP_PING_DISTRIBUTED_GPU_TEST PROPERTIES
PASS_REGULAR_EXPRESSION "message 10: Tensor key: 'out', shape: \\(32, 64\\)"
+ FAIL_REGULAR_EXPRESSION "initialized independent of a parent entity"
)
add_test(NAME EXAMPLE_CPP_PING_DISTRIBUTED_HELP_STRING_TEST
@@ -84,5 +86,6 @@ if(HOLOSCAN_BUILD_TESTS)
)
set_tests_properties(EXAMPLE_CPP_PING_DISTRIBUTED_HELP_STRING_TEST PROPERTIES
PASS_REGULAR_EXPRESSION "Usage: ping_distributed \\[OPTIONS\\]"
+ FAIL_REGULAR_EXPRESSION "initialized independent of a parent entity"
)
endif()
diff --git a/examples/ping_distributed/cpp/ping_distributed_ops.cpp b/examples/ping_distributed/cpp/ping_distributed_ops.cpp
index b5534cb1..e4c92a06 100644
--- a/examples/ping_distributed/cpp/ping_distributed_ops.cpp
+++ b/examples/ping_distributed/cpp/ping_distributed_ops.cpp
@@ -1,5 +1,5 @@
/*
- * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -161,11 +161,13 @@ void PingTensorTxOp::compute(InputContext&, OutputContext& op_output, ExecutionC
tensor_shape, dtype, bytes_per_element, strides, storage_type, allocator.value());
if (!result) { HOLOSCAN_LOG_ERROR("failed to generate tensor"); }
- // Create Holoscan GXF tensor
- auto holoscan_gxf_tensor = holoscan::gxf::GXFTensor(*gxf_tensor);
-
// Create Holoscan tensor
- auto holoscan_tensor = holoscan_gxf_tensor.as_tensor();
+ auto maybe_dl_ctx = (*gxf_tensor).toDLManagedTensorContext();
+ if (!maybe_dl_ctx) {
+ HOLOSCAN_LOG_ERROR(
+ "failed to get std::shared_ptr from nvidia::gxf::Tensor");
+ }
+ std::shared_ptr holoscan_tensor = std::make_shared(maybe_dl_ctx.value());
// insert tensor into the TensorMap
out_message.insert({tensor_name_.get().c_str(), holoscan_tensor});
diff --git a/examples/ping_distributed/python/CMakeLists.txt b/examples/ping_distributed/python/CMakeLists.txt
index f357f008..f9efcd91 100644
--- a/examples/ping_distributed/python/CMakeLists.txt
+++ b/examples/ping_distributed/python/CMakeLists.txt
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -39,6 +39,7 @@ if(HOLOSCAN_BUILD_TESTS)
set_tests_properties(EXAMPLE_PYTHON_PING_DISTRIBUTED_TEST PROPERTIES
PASS_REGULAR_EXPRESSION "message 10: Tensor key: 'out', shape: \\(32, 64\\)"
FAIL_REGULAR_EXPRESSION "AssertionError:"
+ FAIL_REGULAR_EXPRESSION "initialized independent of a parent entity"
)
add_test(NAME EXAMPLE_PYTHON_PING_DISTRIBUTED_GPU_TEST
@@ -48,6 +49,7 @@ if(HOLOSCAN_BUILD_TESTS)
set_tests_properties(EXAMPLE_PYTHON_PING_DISTRIBUTED_GPU_TEST PROPERTIES
PASS_REGULAR_EXPRESSION "message 10: Tensor key: 'out', shape: \\(32, 64\\)"
FAIL_REGULAR_EXPRESSION "AssertionError:"
+ FAIL_REGULAR_EXPRESSION "initialized independent of a parent entity"
)
add_test(NAME EXAMPLE_PYTHON_PING_DISTRIBUTED_HELP_STRING_TEST
@@ -56,6 +58,7 @@ if(HOLOSCAN_BUILD_TESTS)
)
set_tests_properties(EXAMPLE_PYTHON_PING_DISTRIBUTED_HELP_STRING_TEST PROPERTIES
PASS_REGULAR_EXPRESSION "Usage: ping_distributed.py \\[OPTIONS\\]"
+ FAIL_REGULAR_EXPRESSION "initialized independent of a parent entity"
)
add_test(NAME EXAMPLE_PYTHON_PING_DISTRIBUTED_INVALID_DEVICE_TEST
@@ -67,5 +70,6 @@ if(HOLOSCAN_BUILD_TESTS)
ENVIRONMENT "HOLOSCAN_UCX_DEVICE_ID=-5"
PASS_REGULAR_EXPRESSION "GPUDevice value found and cached. dev_id: -5"
PASS_REGULAR_EXPRESSION "cudaSetDevice Failed - 101, device id -5"
+ FAIL_REGULAR_EXPRESSION "initialized independent of a parent entity"
)
endif()
diff --git a/examples/ping_simple/cpp/ping_simple.cpp b/examples/ping_simple/cpp/ping_simple.cpp
index 878f2415..beb7447c 100644
--- a/examples/ping_simple/cpp/ping_simple.cpp
+++ b/examples/ping_simple/cpp/ping_simple.cpp
@@ -1,5 +1,5 @@
/*
- * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -19,7 +19,6 @@
#include
#include
-
class MyPingApp : public holoscan::Application {
public:
void compose() override {
diff --git a/examples/ping_simple_run_async/cpp/ping_simple_run_async.cpp b/examples/ping_simple_run_async/cpp/ping_simple_run_async.cpp
index 1d2b7272..d3a3fcb4 100644
--- a/examples/ping_simple_run_async/cpp/ping_simple_run_async.cpp
+++ b/examples/ping_simple_run_async/cpp/ping_simple_run_async.cpp
@@ -1,5 +1,5 @@
/*
- * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -42,7 +42,6 @@ int main(int argc, char** argv) {
auto app = holoscan::make_application();
auto future = app->run_async();
HOLOSCAN_LOG_INFO("Application is running asynchronously.");
- // Executing `future.wait();` here would block the main thread until the application finishes
auto print_status = std::thread([&app, &future]() {
// Wait for the application to finish
@@ -65,7 +64,9 @@ int main(int argc, char** argv) {
});
print_status.join(); // print status while application is running
- future.wait();
+ // Block until application is done and throw any exceptions
+ future.get();
+
HOLOSCAN_LOG_INFO("Application has finished running.");
return 0;
}
diff --git a/examples/ping_simple_run_async/python/ping_simple_run_async.py b/examples/ping_simple_run_async/python/ping_simple_run_async.py
index 8b1d0cc6..c78fa512 100644
--- a/examples/ping_simple_run_async/python/ping_simple_run_async.py
+++ b/examples/ping_simple_run_async/python/ping_simple_run_async.py
@@ -62,5 +62,7 @@ def print_status():
print_status() # print status while application is running
+ # Block until application is done and raise any exceptions
future.result()
+
print("# Application has finished running.")
diff --git a/examples/tensor_interop/README.md b/examples/tensor_interop/README.md
index 53dfa355..a6a707e7 100644
--- a/examples/tensor_interop/README.md
+++ b/examples/tensor_interop/README.md
@@ -55,8 +55,8 @@ The following dataset is used by this example:
```
* **using deb package install**:
```bash
- # [Prerequisite] Download NGC dataset above to `DATA_DIR`
- export HOLOSCAN_INPUT_PATH=
+ /opt/nvidia/holoscan/examples/download_example_data
+ export HOLOSCAN_INPUT_PATH=/opt/nvidia/holoscan/data
python3 -m pip install cupy-cuda12x
export PYTHONPATH=/opt/nvidia/holoscan/python/lib
python3 /opt/nvidia/holoscan/examples/tensor_interop/python/tensor_interop.py
diff --git a/examples/tensor_interop/cpp/receive_tensor_gxf.hpp b/examples/tensor_interop/cpp/receive_tensor_gxf.hpp
index 991e760e..4a29b2fc 100644
--- a/examples/tensor_interop/cpp/receive_tensor_gxf.hpp
+++ b/examples/tensor_interop/cpp/receive_tensor_gxf.hpp
@@ -1,5 +1,5 @@
/*
- * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -26,7 +26,7 @@
#include "gxf/std/allocator.hpp"
#include "gxf/std/codelet.hpp"
-#include "gxf/std/parameter_parser_std.hpp"
+#include "gxf/core/parameter_parser_std.hpp"
#include "gxf/std/receiver.hpp"
#include "gxf/std/tensor.hpp"
diff --git a/examples/tensor_interop/cpp/send_tensor_gxf.hpp b/examples/tensor_interop/cpp/send_tensor_gxf.hpp
index 05864b12..34238480 100644
--- a/examples/tensor_interop/cpp/send_tensor_gxf.hpp
+++ b/examples/tensor_interop/cpp/send_tensor_gxf.hpp
@@ -1,5 +1,5 @@
/*
- * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -25,7 +25,7 @@
#include "gxf/std/allocator.hpp"
#include "gxf/std/codelet.hpp"
-#include "gxf/std/parameter_parser_std.hpp"
+#include "gxf/core/parameter_parser_std.hpp"
#include "gxf/std/tensor.hpp"
#include "gxf/std/transmitter.hpp"
@@ -83,8 +83,8 @@ class SendTensor : public Codelet {
}
void* output_data_ptr = maybe_output_tensor.value()->pointer();
- CUDA_TRY(cudaMemset(output_data_ptr, value_, tensor_shape.size() *
- gxf::PrimitiveTypeSize(element_type)));
+ CUDA_TRY(cudaMemset(
+ output_data_ptr, value_, tensor_shape.size() * gxf::PrimitiveTypeSize(element_type)));
value_ = (value_ + 1) % 255;
diff --git a/examples/tensor_interop/cpp/tensor_interop.cpp b/examples/tensor_interop/cpp/tensor_interop.cpp
index c506016b..7bddc9af 100644
--- a/examples/tensor_interop/cpp/tensor_interop.cpp
+++ b/examples/tensor_interop/cpp/tensor_interop.cpp
@@ -1,5 +1,5 @@
/*
- * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -27,7 +27,6 @@
#include
#include
#include
-#include
#include
#include "./receive_tensor_gxf.hpp"
diff --git a/examples/tensor_interop/python/CMakeLists.min.txt b/examples/tensor_interop/python/CMakeLists.min.txt
new file mode 100644
index 00000000..ca9410e8
--- /dev/null
+++ b/examples/tensor_interop/python/CMakeLists.min.txt
@@ -0,0 +1,65 @@
+# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Testing
+if(BUILD_TESTING)
+
+ set(RECORDING_DIR ${CMAKE_CURRENT_BINARY_DIR}/recording_output)
+ set(SOURCE_VIDEO_BASENAME python_tensor_interop_output)
+ set(VALIDATION_FRAMES_DIR ${CMAKE_SOURCE_DIR}/testing/validation_frames/tensor_interop/)
+
+ file(MAKE_DIRECTORY ${RECORDING_DIR})
+
+ file(READ ${CMAKE_CURRENT_SOURCE_DIR}/tensor_interop.yaml CONFIG_STRING)
+ string(REPLACE "count: 0" "count: 10" CONFIG_STRING ${CONFIG_STRING})
+ string(APPEND CONFIG_STRING " enable_render_buffer_output: true\n\nrecorder:\n directory: \"${RECORDING_DIR}\"\n basename: \"${SOURCE_VIDEO_BASENAME}\"")
+ set(CONFIG_FILE ${CMAKE_CURRENT_BINARY_DIR}/python_tensor_interop_testing_config.yaml)
+ file(WRITE ${CONFIG_FILE} ${CONFIG_STRING})
+
+ # Patch the current example to enable recording the rendering window
+ add_custom_command(OUTPUT tensor_interop_test.py
+ PRE_LINK
+ COMMAND patch -u -o tensor_interop_test.py ${CMAKE_CURRENT_SOURCE_DIR}/tensor_interop.py
+ ${CMAKE_SOURCE_DIR}/testing/validation_frames/tensor_interop/python_tensor_interop.patch
+ )
+
+ add_custom_target(python_tensor_interop_test ALL
+ DEPENDS "tensor_interop_test.py"
+ )
+
+ add_test(NAME EXAMPLE_PYTHON_TENSOR_INTEROP_TEST
+ COMMAND python3 tensor_interop_test.py --config ${CONFIG_FILE}
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ )
+ set_tests_properties(EXAMPLE_PYTHON_TENSOR_INTEROP_TEST PROPERTIES
+ PASS_REGULAR_EXPRESSION "Reach end of file or playback count reaches to the limit. Stop ticking."
+ )
+
+ # Add a test to check the validity of the frames
+ add_test(NAME EXAMPLE_PYTHON_TENSOR_INTEROP_RENDER_TEST
+ COMMAND python3 ${CMAKE_SOURCE_DIR}/../bin/video_validation.py
+ --source_video_dir ${RECORDING_DIR}
+ --source_video_basename ${SOURCE_VIDEO_BASENAME}
+ --output_dir ${RECORDING_DIR}
+ --validation_frames_dir ${VALIDATION_FRAMES_DIR}
+ WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
+ )
+
+ set_tests_properties(EXAMPLE_PYTHON_TENSOR_INTEROP_RENDER_TEST PROPERTIES
+ DEPENDS EXAMPLE_PYTHON_TENSOR_INTEROP_TEST
+ PASS_REGULAR_EXPRESSION "Valid video output!"
+ )
+
+endif()
diff --git a/examples/tensor_interop/python/CMakeLists.txt b/examples/tensor_interop/python/CMakeLists.txt
index 36d07f8b..62fd082c 100644
--- a/examples/tensor_interop/python/CMakeLists.txt
+++ b/examples/tensor_interop/python/CMakeLists.txt
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -39,6 +39,13 @@ install(FILES
COMPONENT "holoscan-examples"
)
+# Install the minimal CMakeLists.txt file
+install(FILES CMakeLists.min.txt
+ RENAME "CMakeLists.txt"
+ DESTINATION "${app_relative_dest_path}"
+ COMPONENT holoscan-examples
+)
+
# Testing
if(HOLOSCAN_BUILD_TESTS)
diff --git a/examples/testing/run_example_tests b/examples/testing/run_example_tests
new file mode 100644
index 00000000..1a59a745
--- /dev/null
+++ b/examples/testing/run_example_tests
@@ -0,0 +1,31 @@
+#!/bin/bash
+# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script builds and runs the examples to make sure that the Holoscan SDK is correctly
+# installed and examples can be built and run correctly
+# NOTE: This is meant to be called from an installation of the SDK, not from the git repository
+
+# Get path to the examples. This assumes the directory is where the script is located
+SCRIPT_DIR=$(dirname "$(readlink -f "$0")")
+source_dir=$(realpath "$SCRIPT_DIR/..")
+
+# Compile the examples
+build_dir=${source_dir}/examples-build
+cmake -S ${source_dir} -B ${build_dir}
+cmake --build ${build_dir} -j
+
+# Run CTest
+ctest --test-dir ${build_dir}
diff --git a/examples/v4l2_camera/CMakeLists.txt b/examples/v4l2_camera/CMakeLists.txt
index 939fe2bd..35338b09 100644
--- a/examples/v4l2_camera/CMakeLists.txt
+++ b/examples/v4l2_camera/CMakeLists.txt
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/examples/v4l2_camera/README.md b/examples/v4l2_camera/README.md
index b056a82e..d5d04f54 100644
--- a/examples/v4l2_camera/README.md
+++ b/examples/v4l2_camera/README.md
@@ -17,43 +17,48 @@ If using a container outside the `run` script, add `--group-add video` and `--de
### Local Development
Install the following dependency:
+
+```sh
+sudo apt-get install libv4l-dev
+```
+
+To use `v4l2-ctl` for debugging, also install `v4l-utils`:
+
```sh
-sudo apt-get install libv4l-dev=1.18.0-2build1
+sudo apt-get install v4l-utils
```
If you do not have permissions to open the video device, run:
+
```sh
- sudo usermod -aG video $USER
+sudo usermod -aG video $USER
```
### Updating HDMI IN Firmware
-Before using the HDMI IN device, please ensure that it has the latest firmware by following instructions from the [devkit guide](https://docs.nvidia.com/igx-orin/user-guide/latest/post-installation.html#updating-hdmi-in-input-firmware).
+Before using the HDMI IN device on NVIDIA IGX or Clara AGX developer kits, please ensure that it has the latest firmware by following instructions from the [devkit guide](https://docs.nvidia.com/igx-orin/user-guide/latest/post-installation.html#updating-hdmi-in-input-firmware).
## Parameters
There are a few parameters that can be specified:
-* `device`: The mount point of the device (default=`"/dev/video0"`).
-* `pixel_format`: The [V4L2 pixel format](https://docs.kernel.org/userspace-api/media/v4l/pixfmt-intro.html) of the device, as FourCC code (if not specified, app will auto select 'AR24' or 'YUYV' if supported by the device)
-* `width`: The frame size width (if not specified, uses device default). Currently, only `V4L2_FRMSIZE_TYPE_DISCRETE` are supported.
-* `height`: The frame size height (if not specified, uses device default). Currently, only `V4L2_FRMSIZE_TYPE_DISCRETE` are supported.
-
-**OBS:** Note that specifying both the `width` and `height` parameters will make the app use `BlockMemoryPool` rather than `UnboundedAllocator` which improves the latency (FPS), however
-please ensure that your device supports that combination of `width` and `height` (see `v4l2-ctl --list-formats-ext` below) otherwise the application will fail to start.
-
-The parameters of the available V4L2-supported devices can be found with:
-```sh
-v4l2-ctl --list-devices
-```
-followed by:
-```sh
-v4l2-ctl -d /dev/video0 --list-formats-ext
-```
-If you do not have the `v4l2-ctl` app, it can be installed with (if running via Holoscan Docker image, already available):
-```sh
-sudo apt-get install v4l-utils
-```
+* `device`: The mount point of the device
+ * Default: `"/dev/video0"`
+ * List available options with `v4l2-ctl --list-devices`
+* `pixel_format`: The [V4L2 pixel format](https://docs.kernel.org/userspace-api/media/v4l/pixfmt-intro.html) of the device, as FourCC code
+ * Default: auto selects `AB24` or `YUYV` based on device support
+ * List available options with `v4l2-ctl -d /dev/ --list-formats`
+* `width` and `height`: The frame dimensions
+ * Default: device default
+ * List available options with `v4l2-ctl -d /dev/ --list-formats-ext`
+* `exposure_time`: The exposure time of the camera sensor in multiples of 100 μs (e.g. setting exposure_time to 100 is 10 ms)
+ * Default: auto exposure, or device default if auto is not supported
+ * List supported range with `v4l2-ctl -d /dev/ -L`
+* `gain`: The gain of the camera sensor
+ * Default: auto gain, or device default if auto is not supported
+ * List supported range with `v4l2-ctl -d /dev/ -L`
+
+> Note that specifying both the `width` and `height` parameters to values supported by your device (see `v4l2-ctl --list-formats-ext`) will make the app use `BlockMemoryPool` rather than `UnboundedAllocator` which optimizes memory and should improve the latency (FPS).
## Run Instructions
diff --git a/examples/v4l2_camera/cpp/CMakeLists.min.txt b/examples/v4l2_camera/cpp/CMakeLists.min.txt
index 7b759cd8..50cc80c9 100644
--- a/examples/v4l2_camera/cpp/CMakeLists.min.txt
+++ b/examples/v4l2_camera/cpp/CMakeLists.min.txt
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the \"License\");
diff --git a/examples/v4l2_camera/cpp/CMakeLists.txt b/examples/v4l2_camera/cpp/CMakeLists.txt
index dc56c002..b177f570 100644
--- a/examples/v4l2_camera/cpp/CMakeLists.txt
+++ b/examples/v4l2_camera/cpp/CMakeLists.txt
@@ -69,14 +69,8 @@ endif()
# Testing
option(HOLOSCAN_BUILD_V4L2_TESTS "Build tests for V4L2 loopback" OFF)
if(HOLOSCAN_BUILD_TESTS AND HOLOSCAN_BUILD_V4L2_TESTS)
- # Assumes that the v4l2 video loopback is mounted on /dev/video3. This allows us to create a
- # a virtual video device and stream data from an mp4 file without the need for a physical
- # video input device. To setup v4l2 video loopback, refer to the "Use with V4L2 Loopback Devices"
- # section of the README file for this example
- file(READ ${CMAKE_CURRENT_SOURCE_DIR}/v4l2_camera.yaml CONFIG_STRING)
- string(REPLACE "device: \"/dev/video0\"" "device: \"/dev/video3\"" CONFIG_STRING "${CONFIG_STRING}")
- set(CONFIG_FILE ${CMAKE_CURRENT_BINARY_DIR}/cpp_v4l2_camera_config.yaml)
- file(WRITE ${CONFIG_FILE} "${CONFIG_STRING}")
+ # Assumes that the v4l2 video loopback has already been mounted and the yaml files have been
+ # updated to use the virtual loopback device.
# Modify testcase to only run 10 frames
add_custom_command(OUTPUT v4l2_camera_test.cpp
@@ -100,9 +94,7 @@ if(HOLOSCAN_BUILD_TESTS AND HOLOSCAN_BUILD_V4L2_TESTS)
add_dependencies(v4l2_camera_test racerx_data)
add_test(NAME EXAMPLE_CPP_V4L2_CAMERA_TEST
- COMMAND bash -c "ffmpeg -stream_loop -1 -re -i ${CMAKE_SOURCE_DIR}/data/racerx/racerx-small.mp4 \
- -pix_fmt yuyv422 -f v4l2 /dev/video3 & sleep 5; \
- ${CMAKE_CURRENT_BINARY_DIR}/v4l2_camera_test ${CONFIG_FILE}; echo 'Done'; kill %1"
+ COMMAND "${CMAKE_CURRENT_BINARY_DIR}/v4l2_camera_test"
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
)
set_tests_properties(EXAMPLE_CPP_V4L2_CAMERA_TEST PROPERTIES
diff --git a/examples/v4l2_camera/cpp/v4l2_camera.cpp b/examples/v4l2_camera/cpp/v4l2_camera.cpp
index 7251ed94..e2773d6d 100644
--- a/examples/v4l2_camera/cpp/v4l2_camera.cpp
+++ b/examples/v4l2_camera/cpp/v4l2_camera.cpp
@@ -1,5 +1,5 @@
/*
- * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -50,9 +50,7 @@ class App : public holoscan::Application {
auto allocator = make_resource("pool", 0, block_size, 1);
source = make_operator(
- "source",
- from_config("source"),
- Arg("allocator") = allocator);
+ "source", from_config("source"), Arg("allocator") = allocator);
// Set Holoviz width and height from source resolution
auto viz_args = from_config("visualizer");
@@ -61,8 +59,8 @@ class App : public holoscan::Application {
else if (arg.name() == "height")
viz_args.add(arg);
}
- visualizer = make_operator(
- "visualizer", viz_args, Arg("allocator") = allocator);
+ visualizer =
+ make_operator("visualizer", viz_args, Arg("allocator") = allocator);
} else {
// width and height not given, use UnboundedAllocator (worse latency)
source = make_operator(
@@ -83,9 +81,7 @@ int main(int argc, char** argv) {
// Get the configuration
auto config_path = std::filesystem::canonical(argv[0]).parent_path();
config_path += "/v4l2_camera.yaml";
- if ( argc >= 2 ) {
- config_path = argv[1];
- }
+ if (argc >= 2) { config_path = argv[1]; }
app.config(config_path);
app.run();
@@ -94,4 +90,3 @@ int main(int argc, char** argv) {
return 0;
}
-
diff --git a/examples/v4l2_camera/cpp/v4l2_camera.yaml b/examples/v4l2_camera/cpp/v4l2_camera.yaml
index cbf9daec..9e2c2605 100644
--- a/examples/v4l2_camera/cpp/v4l2_camera.yaml
+++ b/examples/v4l2_camera/cpp/v4l2_camera.yaml
@@ -1,5 +1,5 @@
%YAML 1.2
-# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,7 +15,7 @@
# limitations under the License.
---
source: # V4L2VideoCaptureOp
- # | Input device. On devkit, /dev/video0 is generally HDMI in
+ # | Input device. On Clara AGX or NVIDIA IGX devkits, /dev/video0 is generally the HDMI IN
device: "/dev/video0"
# | App will auto-select default width and height if not provided
@@ -26,6 +26,13 @@ source: # V4L2VideoCaptureOp
# | App will auto-select the default "pixel_format" for your device if not provided.
# | See this app's readme file for details.
- # pixel_format: "AR24"
+ # pixel_format: "AB24"
+
+ # | These properties might not be supported for all v4l2 nodes.
+ # | The app will attempt to do auto exposure and gain if not provided. If auto is not supported,
+ # | it will use the defaults defined by your device.
+ # | See this app's readme file for details.
+ # exposure_time: 500
+ # gain: 100
visualizer: # Holoviz
diff --git a/examples/v4l2_camera/python/CMakeLists.txt b/examples/v4l2_camera/python/CMakeLists.txt
index fc7df49d..11016497 100644
--- a/examples/v4l2_camera/python/CMakeLists.txt
+++ b/examples/v4l2_camera/python/CMakeLists.txt
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -43,14 +43,8 @@ install(FILES
# Testing
option(HOLOSCAN_BUILD_V4L2_TESTS "Build tests for V4L2 loopback" OFF)
if(HOLOSCAN_BUILD_TESTS AND HOLOSCAN_BUILD_V4L2_TESTS)
- # Assumes that the v4l2 video loopback is mounted on /dev/video3. This allows us to create a
- # a virtual video device and stream data from an mp4 file without the need for a physical
- # video input device. To setup v4l2 video loopback, refer to the "Use with V4L2 Loopback Devices"
- # section of the README file for this example
- file(READ ${CMAKE_CURRENT_SOURCE_DIR}/v4l2_camera.yaml CONFIG_STRING)
- string(REPLACE "device: \"/dev/video0\"" "device: \"/dev/video3\"" CONFIG_STRING "${CONFIG_STRING}")
- set(CONFIG_FILE ${CMAKE_CURRENT_BINARY_DIR}/python_v4l2_camera_config.yaml)
- file(WRITE ${CONFIG_FILE} "${CONFIG_STRING}")
+ # Assumes that the v4l2 video loopback has already been mounted and the yaml files have been
+ # updated to use the virtual loopback device.
# Modify testcase to only run 10 frames
file(READ ${CMAKE_CURRENT_SOURCE_DIR}/v4l2_camera.py PYTHON_SOURCE_STRING)
@@ -61,9 +55,7 @@ if(HOLOSCAN_BUILD_TESTS AND HOLOSCAN_BUILD_V4L2_TESTS)
file(WRITE ${PYTHON_SOURCE_FILE} "${PYTHON_SOURCE_STRING}")
add_test(NAME EXAMPLE_PYTHON_V4L2_CAMERA_TEST
- COMMAND bash -c "ffmpeg -stream_loop -1 -re -i ${CMAKE_SOURCE_DIR}/data/racerx/racerx-small.mp4 \
- -pix_fmt yuyv422 -f v4l2 /dev/video3 & sleep 5; \
- python3 v4l2_camera_test.py --config ${CONFIG_FILE}; echo 'Done'; kill %1"
+ COMMAND python3 v4l2_camera_test.py
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
)
diff --git a/examples/v4l2_camera/python/v4l2_camera.yaml b/examples/v4l2_camera/python/v4l2_camera.yaml
index cbf9daec..9e2c2605 100644
--- a/examples/v4l2_camera/python/v4l2_camera.yaml
+++ b/examples/v4l2_camera/python/v4l2_camera.yaml
@@ -1,5 +1,5 @@
%YAML 1.2
-# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,7 +15,7 @@
# limitations under the License.
---
source: # V4L2VideoCaptureOp
- # | Input device. On devkit, /dev/video0 is generally HDMI in
+ # | Input device. On Clara AGX or NVIDIA IGX devkits, /dev/video0 is generally the HDMI IN
device: "/dev/video0"
# | App will auto-select default width and height if not provided
@@ -26,6 +26,13 @@ source: # V4L2VideoCaptureOp
# | App will auto-select the default "pixel_format" for your device if not provided.
# | See this app's readme file for details.
- # pixel_format: "AR24"
+ # pixel_format: "AB24"
+
+ # | These properties might not be supported for all v4l2 nodes.
+ # | The app will attempt to do auto exposure and gain if not provided. If auto is not supported,
+ # | it will use the defaults defined by your device.
+ # | See this app's readme file for details.
+ # exposure_time: 500
+ # gain: 100
visualizer: # Holoviz
diff --git a/examples/video_replayer/README.md b/examples/video_replayer/README.md
index 56a4afba..3becbb15 100644
--- a/examples/video_replayer/README.md
+++ b/examples/video_replayer/README.md
@@ -1,6 +1,8 @@
# Video Replayer
-Minimal example to demonstrate the use of the video stream replayer operator to load video from disk. The video frames need to have been converted to a gxf entity format, as shown [here](../../scripts/README.md#convert_video_to_gxf_entitiespy).
+Minimal example to demonstrate the use of the video stream replayer operator to load video from disk.
+
+The video frames need to have been converted to a gxf entity format to use as input. You can use the `convert_video_to_gxf_entities.py` script installed in `/opt/nvidia/holoscan/bin` or available [on GitHub](https://github.com/nvidia-holoscan/holoscan-sdk/tree/main/scripts#convert_video_to_gxf_entitiespy) (tensors will be loaded on the GPU).
> Note: Support for H264 stream support is in progress and can be found on [HoloHub](https://nvidia-holoscan.github.io/holohub)
@@ -15,13 +17,12 @@ The following dataset is used by this example:
* **using deb package install**:
```bash
- # [Prerequisite] Download NGC dataset above to `/opt/nvidia/data`
- cd /opt/nvidia/holoscan # to find dataset
+ /opt/nvidia/holoscan/examples/download_example_data
+ export HOLOSCAN_INPUT_PATH=/opt/nvidia/holoscan/data
./examples/video_replayer/cpp/video_replayer
```
* **from NGC container**:
```bash
- cd /opt/nvidia/holoscan # to find dataset
./examples/video_replayer/cpp/video_replayer
```
* **source (dev container)**:
@@ -47,8 +48,8 @@ The following dataset is used by this example:
```
* **using deb package install**:
```bash
- # [Prerequisite] Download NGC dataset above to `DATA_DIR`
- export HOLOSCAN_INPUT_PATH=
+ /opt/nvidia/holoscan/examples/download_example_data
+ export HOLOSCAN_INPUT_PATH=/opt/nvidia/holoscan/data
export PYTHONPATH=/opt/nvidia/holoscan/python/lib
python3 /opt/nvidia/holoscan/examples/video_replayer/python/video_replayer.py
```
diff --git a/examples/video_replayer/cpp/CMakeLists.min.txt b/examples/video_replayer/cpp/CMakeLists.min.txt
index 5064e855..75b7349c 100644
--- a/examples/video_replayer/cpp/CMakeLists.min.txt
+++ b/examples/video_replayer/cpp/CMakeLists.min.txt
@@ -42,16 +42,67 @@ add_dependencies(video_replayer video_replayer_yaml)
# Testing
if(BUILD_TESTING)
+ set(RECORDING_DIR ${CMAKE_CURRENT_BINARY_DIR}/recording_output)
+ set(SOURCE_VIDEO_BASENAME video_replayer_output)
+ set(VALIDATION_FRAMES_DIR ${CMAKE_SOURCE_DIR}/testing/validation_frames/video_replayer/)
+
+ file(MAKE_DIRECTORY ${RECORDING_DIR})
+
file(READ ${CMAKE_CURRENT_SOURCE_DIR}/video_replayer.yaml CONFIG_STRING)
- string(REPLACE "count: 0" "count: 10" CONFIG_STRING ${CONFIG_STRING})
+ string(REPLACE "count: 0" "count: 10" CONFIG_STRING "${CONFIG_STRING}")
set(CONFIG_FILE ${CMAKE_CURRENT_BINARY_DIR}/cpp_video_replayer_config.yaml)
- file(WRITE ${CONFIG_FILE} ${CONFIG_STRING})
+ file(WRITE ${CONFIG_FILE} "${CONFIG_STRING}")
+
+ # Patch the current example to enable recording the rendering window
+ add_custom_command(OUTPUT video_replayer_test.cpp
+ PRE_LINK
+ COMMAND patch -u -o video_replayer_test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/video_replayer.cpp
+ ${CMAKE_SOURCE_DIR}/testing/validation_frames/video_replayer/cpp_video_replayer.patch
+ )
+ # Create the test executable
+ add_executable(video_replayer_test
+ video_replayer_test.cpp
+ )
+
+ target_include_directories(video_replayer_test
+ PRIVATE ${CMAKE_SOURCE_DIR}/testing)
+
+ target_compile_definitions(video_replayer_test
+ PRIVATE RECORD_OUTPUT RECORDING_DIR="${RECORDING_DIR}"
+ PRIVATE SOURCE_VIDEO_BASENAME="${SOURCE_VIDEO_BASENAME}"
+ )
+
+ target_link_libraries(video_replayer_test
+ PRIVATE
+ holoscan::core
+ holoscan::ops::holoviz
+ holoscan::ops::video_stream_replayer
+ holoscan::ops::video_stream_recorder
+ holoscan::ops::format_converter
+ )
+
+ # Add the test and make sure it runs
add_test(NAME EXAMPLE_CPP_VIDEO_REPLAYER_TEST
- COMMAND ${CMAKE_CURRENT_BINARY_DIR}/video_replayer ${CONFIG_FILE}
+ COMMAND ${CMAKE_CURRENT_BINARY_DIR}/video_replayer_test ${CONFIG_FILE}
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
)
set_tests_properties(EXAMPLE_CPP_VIDEO_REPLAYER_TEST PROPERTIES
PASS_REGULAR_EXPRESSION "Reach end of file or playback count reaches to the limit. Stop ticking."
)
+
+ # Add a test to check the validity of the frames
+ add_test(NAME EXAMPLE_CPP_VIDEO_REPLAYER_RENDER_TEST
+ COMMAND python3 ${CMAKE_SOURCE_DIR}/../bin/video_validation.py
+ --source_video_dir ${RECORDING_DIR}
+ --source_video_basename ${SOURCE_VIDEO_BASENAME}
+ --output_dir ${RECORDING_DIR}
+ --validation_frames_dir ${VALIDATION_FRAMES_DIR}
+ WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
+ )
+
+ set_tests_properties(EXAMPLE_CPP_VIDEO_REPLAYER_RENDER_TEST PROPERTIES
+ DEPENDS EXAMPLE_CPP_VIDEO_REPLAYER_TEST
+ PASS_REGULAR_EXPRESSION "Valid video output!"
+ )
endif()
diff --git a/examples/video_replayer/cpp/CMakeLists.txt b/examples/video_replayer/cpp/CMakeLists.txt
index 672f30f6..a5cd547f 100644
--- a/examples/video_replayer/cpp/CMakeLists.txt
+++ b/examples/video_replayer/cpp/CMakeLists.txt
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -93,6 +93,9 @@ if(HOLOSCAN_BUILD_TESTS)
video_replayer_test.cpp
)
+ target_include_directories(video_replayer_test
+ PRIVATE ${CMAKE_SOURCE_DIR}/tests)
+
target_compile_definitions(video_replayer_test
PRIVATE RECORD_OUTPUT RECORDING_DIR="${RECORDING_DIR}"
PRIVATE SOURCE_VIDEO_BASENAME="${SOURCE_VIDEO_BASENAME}"
diff --git a/examples/video_replayer/cpp/video_replayer.cpp b/examples/video_replayer/cpp/video_replayer.cpp
index 7f8ba5db..01eb52b7 100644
--- a/examples/video_replayer/cpp/video_replayer.cpp
+++ b/examples/video_replayer/cpp/video_replayer.cpp
@@ -1,5 +1,5 @@
/*
- * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,6 +14,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+#include
#include
#include
@@ -24,8 +25,18 @@ class VideoReplayerApp : public holoscan::Application {
void compose() override {
using namespace holoscan;
+ // Sets the data directory to use from the environment variable if it is set
+ ArgList args;
+ auto data_directory = std::getenv("HOLOSCAN_INPUT_PATH");
+ if (data_directory != nullptr && data_directory[0] != '\0') {
+ auto video_directory = std::filesystem::path(data_directory);
+ video_directory /= "racerx";
+ args.add(Arg("directory", video_directory.string()));
+ }
+
// Define the replayer and holoviz operators and configure using yaml configuration
- auto replayer = make_operator("replayer", from_config("replayer"));
+ auto replayer =
+ make_operator("replayer", from_config("replayer"), args);
auto visualizer = make_operator("holoviz", from_config("holoviz"));
// Define the workflow: replayer -> holoviz
@@ -37,9 +48,7 @@ int main(int argc, char** argv) {
// Get the yaml configuration file
auto config_path = std::filesystem::canonical(argv[0]).parent_path();
config_path /= std::filesystem::path("video_replayer.yaml");
- if ( argc >= 2 ) {
- config_path = argv[1];
- }
+ if (argc >= 2) { config_path = argv[1]; }
auto app = holoscan::make_application();
app->config(config_path);
diff --git a/examples/video_replayer/python/CMakeLists.min.txt b/examples/video_replayer/python/CMakeLists.min.txt
new file mode 100644
index 00000000..316b15a9
--- /dev/null
+++ b/examples/video_replayer/python/CMakeLists.min.txt
@@ -0,0 +1,67 @@
+# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Testing
+if(BUILD_TESTING)
+
+ set(RECORDING_DIR ${CMAKE_CURRENT_BINARY_DIR}/recording_output)
+ set(SOURCE_VIDEO_BASENAME python_video_replayer_output)
+ set(VALIDATION_FRAMES_DIR ${CMAKE_SOURCE_DIR}/testing/validation_frames/video_replayer/)
+
+ file(MAKE_DIRECTORY ${RECORDING_DIR})
+
+ file(READ ${CMAKE_CURRENT_SOURCE_DIR}/video_replayer.yaml CONFIG_STRING)
+ string(REPLACE "count: 0" "count: 10" CONFIG_STRING ${CONFIG_STRING})
+ string(APPEND CONFIG_STRING " enable_render_buffer_output: true\n\nrecorder:\n directory: \"${RECORDING_DIR}\"\n basename: \"${SOURCE_VIDEO_BASENAME}\"")
+ set(CONFIG_FILE ${CMAKE_CURRENT_BINARY_DIR}/python_video_replayer_config.yaml)
+ file(WRITE ${CONFIG_FILE} ${CONFIG_STRING})
+
+ # Patch the current example to enable recording the rendering window
+ add_custom_command(OUTPUT video_replayer_test.py
+ PRE_LINK
+ COMMAND patch -u -o video_replayer_test.py ${CMAKE_CURRENT_SOURCE_DIR}/video_replayer.py
+ ${CMAKE_SOURCE_DIR}/testing/validation_frames/video_replayer/python_video_replayer.patch
+ )
+
+ add_custom_target(python_video_replayer_test ALL
+ DEPENDS "video_replayer_test.py"
+ )
+
+ add_test(NAME EXAMPLE_PYTHON_VIDEO_REPLAYER_TEST
+ COMMAND python3 video_replayer_test.py --config python_video_replayer_config.yaml
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ )
+
+ set_tests_properties(EXAMPLE_PYTHON_VIDEO_REPLAYER_TEST PROPERTIES
+ DEPENDS "video_replayer_test.py"
+ PASS_REGULAR_EXPRESSION "Reach end of file or playback count reaches to the limit. Stop ticking."
+ )
+
+ # Add a test to check the validity of the frames
+ add_test(NAME EXAMPLE_PYTHON_VIDEO_REPLAYER_RENDER_TEST
+ COMMAND python3 ${CMAKE_SOURCE_DIR}/../bin/video_validation.py
+ --source_video_dir ${RECORDING_DIR}
+ --source_video_basename ${SOURCE_VIDEO_BASENAME}
+ --output_dir ${RECORDING_DIR}
+ --validation_frames_dir ${VALIDATION_FRAMES_DIR}
+ WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
+ )
+
+ set_tests_properties(EXAMPLE_PYTHON_VIDEO_REPLAYER_RENDER_TEST PROPERTIES
+ DEPENDS EXAMPLE_PYTHON_VIDEO_REPLAYER_TEST
+ PASS_REGULAR_EXPRESSION "Valid video output!"
+ )
+
+endif()
diff --git a/examples/video_replayer/python/CMakeLists.txt b/examples/video_replayer/python/CMakeLists.txt
index 4115dfbd..cbc55e2c 100644
--- a/examples/video_replayer/python/CMakeLists.txt
+++ b/examples/video_replayer/python/CMakeLists.txt
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -40,6 +40,13 @@ install(FILES
COMPONENT "holoscan-examples"
)
+# Install the minimal CMakeLists.txt file
+install(FILES CMakeLists.min.txt
+ RENAME "CMakeLists.txt"
+ DESTINATION "${app_relative_dest_path}"
+ COMPONENT holoscan-examples
+)
+
# Testing
if(HOLOSCAN_BUILD_TESTS)
diff --git a/examples/video_replayer_distributed/README.md b/examples/video_replayer_distributed/README.md
index 0d88ce9b..58fe2830 100644
--- a/examples/video_replayer_distributed/README.md
+++ b/examples/video_replayer_distributed/README.md
@@ -1,7 +1,8 @@
# Distributed Video Replayer
Minimal example to demonstrate the use of the video stream replayer operator to load video from disk in a distributed manner.
-The video frames need to have been converted to a gxf entity format, as shown [here](../../scripts/README.md#convert_video_to_gxf_entitiespy).
+
+The video frames need to have been converted to a gxf entity format to use as input. You can use the `convert_video_to_gxf_entities.py` script installed in `/opt/nvidia/holoscan/bin` or available [on GitHub](https://github.com/nvidia-holoscan/holoscan-sdk/tree/main/scripts#convert_video_to_gxf_entitiespy) (tensors will be loaded on the GPU).
> Note: Support for H264 stream support is in progress and can be found on [HoloHub](https://nvidia-holoscan.github.io/holohub)
@@ -21,8 +22,8 @@ Please refer to the [user guide](https://docs.nvidia.com/holoscan/sdk-user-guide
* **using deb package install**:
```bash
- # [Prerequisite] Download NGC dataset above to `DATA_DIR` (e.g., `/opt/nvidia/data`)
- export HOLOSCAN_INPUT_PATH=
+ /opt/nvidia/holoscan/examples/download_example_data
+ export HOLOSCAN_INPUT_PATH=/opt/nvidia/holoscan/data
# Set the application folder
APP_DIR=/opt/nvidia/holoscan/examples/video_replayer_distributed/cpp
@@ -82,8 +83,8 @@ Please refer to the [user guide](https://docs.nvidia.com/holoscan/sdk-user-guide
```
* **using deb package install**:
```bash
- # [Prerequisite] Download NGC dataset above to `DATA_DIR` (e.g., `/opt/nvidia/data`)
- export HOLOSCAN_INPUT_PATH=
+ /opt/nvidia/holoscan/examples/download_example_data
+ export HOLOSCAN_INPUT_PATH=/opt/nvidia/holoscan/data
export PYTHONPATH=/opt/nvidia/holoscan/python/lib
# Set the application folder
@@ -156,9 +157,9 @@ Refer to the documentation in the [user guide](https://docs.nvidia.com/holoscan/
# in one machine (e.g. IP address `10.2.34.56`) using the port number `10000`,
# and another worker (`fragment2` that renders video to display) in another machine.
# If `--fragments` is not specified, any fragment in the application will be chosen to run.
-# The `--nic ` argument is required when running a distributed application
+# The `--nic ` argument is required when running a distributed application
# across multiple nodes; it instructs the application to use the specified network
-# interface for communicating with other application nodes.
+# interface for communicating with other application nodes.
#
# note: use the following command to get a list of available network interface name and its assigned IP address.
ip -o -4 addr show | awk '{print $2, $4}'
diff --git a/examples/video_replayer_distributed/cpp/CMakeLists.min.txt b/examples/video_replayer_distributed/cpp/CMakeLists.min.txt
index 9d8a57d1..f962f48c 100644
--- a/examples/video_replayer_distributed/cpp/CMakeLists.min.txt
+++ b/examples/video_replayer_distributed/cpp/CMakeLists.min.txt
@@ -42,22 +42,74 @@ add_dependencies(video_replayer_distributed video_replayer_distributed_yaml)
# Testing
if(BUILD_TESTING)
+ set(RECORDING_DIR ${CMAKE_CURRENT_BINARY_DIR}/recording_output)
+ set(SOURCE_VIDEO_BASENAME video_replayer_output)
+ set(VALIDATION_FRAMES_DIR ${CMAKE_SOURCE_DIR}/testing/validation_frames/video_replayer/)
+
+ file(MAKE_DIRECTORY ${RECORDING_DIR})
+
file(READ ${CMAKE_CURRENT_SOURCE_DIR}/video_replayer_distributed.yaml CONFIG_STRING)
string(REPLACE "count: 0" "count: 10" CONFIG_STRING ${CONFIG_STRING})
set(CONFIG_FILE ${CMAKE_CURRENT_BINARY_DIR}/cpp_video_replayer_distributed_config.yaml)
file(WRITE ${CONFIG_FILE} ${CONFIG_STRING})
+ # Patch the current example to enable recording the rendering window
+ add_custom_command(OUTPUT video_replayer_distributed_test.cpp
+ PRE_LINK
+ COMMAND patch -u -o video_replayer_distributed_test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/video_replayer_distributed.cpp
+ ${CMAKE_SOURCE_DIR}/testing/validation_frames/video_replayer/cpp_video_replayer_distributed.patch
+ )
+
+ # Create the test executable
+ add_executable(video_replayer_distributed_test
+ video_replayer_distributed_test.cpp
+ )
+
+ target_include_directories(video_replayer_distributed_test
+ PRIVATE ${CMAKE_SOURCE_DIR}/testing)
+
+ target_compile_definitions(video_replayer_distributed_test
+ PRIVATE RECORD_OUTPUT RECORDING_DIR="${RECORDING_DIR}"
+ PRIVATE SOURCE_VIDEO_BASENAME="${SOURCE_VIDEO_BASENAME}"
+ )
+
+ target_link_libraries(video_replayer_distributed_test
+ PRIVATE
+ holoscan::core
+ holoscan::ops::holoviz
+ holoscan::ops::video_stream_replayer
+ holoscan::ops::video_stream_recorder
+ holoscan::ops::format_converter
+ )
+
add_test(NAME EXAMPLE_CPP_VIDEO_REPLAYER_DISTRIBUTED_TEST
- COMMAND ${CMAKE_CURRENT_BINARY_DIR}/video_replayer_distributed --config ${CONFIG_FILE}
+ COMMAND ${CMAKE_CURRENT_BINARY_DIR}/video_replayer_distributed_test --config ${CONFIG_FILE}
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
)
-
+
add_test(NAME EXAMPLE_CPP_VIDEO_REPLAYER_DISTRIBUTED_DRIVER_AND_WORKER_TEST
- COMMAND ${CMAKE_CURRENT_BINARY_DIR}/video_replayer_distributed --config ${CONFIG_FILE} --driver --worker --fragments=all
+ COMMAND ${CMAKE_CURRENT_BINARY_DIR}/video_replayer_distributed_test --config ${CONFIG_FILE} --driver --worker --fragments=all
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
)
set_tests_properties(EXAMPLE_CPP_VIDEO_REPLAYER_DISTRIBUTED_TEST PROPERTIES
PASS_REGULAR_EXPRESSION "Reach end of file or playback count reaches to the limit. Stop ticking."
+ FAIL_REGULAR_EXPRESSION "initialized independent of a parent entity"
+ )
+
+ # Add a test to check the validity of the frames
+ add_test(NAME EXAMPLE_CPP_VIDEO_REPLAYER_DISTRIBUTED_RENDER_TEST
+ COMMAND python3 ${CMAKE_SOURCE_DIR}/../bin/video_validation.py
+ --source_video_dir ${RECORDING_DIR}
+ --source_video_basename ${SOURCE_VIDEO_BASENAME}
+ --output_dir ${RECORDING_DIR}
+ --validation_frames_dir ${VALIDATION_FRAMES_DIR}
+ WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
+ )
+
+ set_tests_properties(EXAMPLE_CPP_VIDEO_REPLAYER_DISTRIBUTED_RENDER_TEST PROPERTIES
+ DEPENDS EXAMPLE_CPP_VIDEO_REPLAYER_DISTRIBUTED_TEST
+ PASS_REGULAR_EXPRESSION "Valid video output!"
+ FAIL_REGULAR_EXPRESSION "initialized independent of a parent entity"
)
endif()
diff --git a/examples/video_replayer_distributed/cpp/CMakeLists.txt b/examples/video_replayer_distributed/cpp/CMakeLists.txt
index 19a76c24..47520466 100644
--- a/examples/video_replayer_distributed/cpp/CMakeLists.txt
+++ b/examples/video_replayer_distributed/cpp/CMakeLists.txt
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -93,6 +93,9 @@ if(HOLOSCAN_BUILD_TESTS)
video_replayer_distributed_test.cpp
)
+ target_include_directories(video_replayer_distributed_test
+ PRIVATE ${CMAKE_SOURCE_DIR}/tests)
+
target_compile_definitions(video_replayer_distributed_test
PRIVATE RECORD_OUTPUT RECORDING_DIR="${RECORDING_DIR}"
PRIVATE SOURCE_VIDEO_BASENAME="${SOURCE_VIDEO_BASENAME}"
@@ -119,6 +122,7 @@ if(HOLOSCAN_BUILD_TESTS)
set_tests_properties(EXAMPLE_CPP_VIDEO_REPLAYER_DISTRIBUTED_TEST PROPERTIES
PASS_REGULAR_EXPRESSION "Reach end of file or playback count reaches to the limit. Stop ticking."
+ FAIL_REGULAR_EXPRESSION "initialized independent of a parent entity"
)
# Add a test to check the validity of the frames
@@ -134,6 +138,7 @@ if(HOLOSCAN_BUILD_TESTS)
set_tests_properties(EXAMPLE_CPP_VIDEO_REPLAYER_DISTRIBUTED_RENDER_TEST PROPERTIES
DEPENDS EXAMPLE_CPP_VIDEO_REPLAYER_DISTRIBUTED_TEST
PASS_REGULAR_EXPRESSION "Valid video output!"
+ FAIL_REGULAR_EXPRESSION "initialized independent of a parent entity"
)
endif()
diff --git a/examples/video_replayer_distributed/python/CMakeLists.min.txt b/examples/video_replayer_distributed/python/CMakeLists.min.txt
new file mode 100644
index 00000000..178eefb9
--- /dev/null
+++ b/examples/video_replayer_distributed/python/CMakeLists.min.txt
@@ -0,0 +1,68 @@
+# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the \"License\");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an \"AS IS\" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Testing
+if(BUILD_TESTING)
+
+ set(RECORDING_DIR ${CMAKE_CURRENT_BINARY_DIR}/recording_output)
+ set(SOURCE_VIDEO_BASENAME python_video_replayer_distributed_output)
+ set(VALIDATION_FRAMES_DIR ${CMAKE_SOURCE_DIR}/testing/validation_frames/video_replayer/)
+
+ file(MAKE_DIRECTORY ${RECORDING_DIR})
+
+ file(READ ${CMAKE_CURRENT_SOURCE_DIR}/video_replayer_distributed.yaml CONFIG_STRING)
+ string(REPLACE "count: 0" "count: 10" CONFIG_STRING ${CONFIG_STRING})
+ string(APPEND CONFIG_STRING " enable_render_buffer_output: true\n\nrecorder:\n directory: \"${RECORDING_DIR}\"\n basename: \"${SOURCE_VIDEO_BASENAME}\"")
+ set(CONFIG_FILE ${CMAKE_CURRENT_BINARY_DIR}/python_video_replayer_distributed_config.yaml)
+ file(WRITE ${CONFIG_FILE} ${CONFIG_STRING})
+
+ # Patch the current example to enable recording the rendering window
+ add_custom_command(OUTPUT video_replayer_distributed_test.py
+ PRE_LINK
+ COMMAND patch -u -o video_replayer_distributed_test.py ${CMAKE_CURRENT_SOURCE_DIR}/video_replayer_distributed.py
+ ${CMAKE_SOURCE_DIR}/testing/validation_frames/video_replayer/python_video_replayer_distributed.patch
+ )
+
+ add_custom_target(python_video_replayer_distributed_test ALL
+ DEPENDS "video_replayer_distributed_test.py"
+ )
+
+ add_test(NAME EXAMPLE_PYTHON_VIDEO_REPLAYER_DISTRIBUTED_TEST
+ COMMAND python3 video_replayer_distributed_test.py --config python_video_replayer_distributed_config.yaml
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
+ )
+
+ set_tests_properties(EXAMPLE_PYTHON_VIDEO_REPLAYER_DISTRIBUTED_TEST PROPERTIES
+ DEPENDS "video_replayer_distributed_test.py"
+ PASS_REGULAR_EXPRESSION "Reach end of file or playback count reaches to the limit. Stop ticking."
+ )
+
+ # Add a test to check the validity of the frames
+ add_test(NAME EXAMPLE_PYTHON_VIDEO_REPLAYER_DISTRIBUTED_RENDER_TEST
+ COMMAND python3 ${CMAKE_SOURCE_DIR}/../bin/video_validation.py
+ --source_video_dir ${RECORDING_DIR}
+ --source_video_basename ${SOURCE_VIDEO_BASENAME}
+ --output_dir ${RECORDING_DIR}
+ --validation_frames_dir ${VALIDATION_FRAMES_DIR}
+ WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
+ )
+
+ set_tests_properties(EXAMPLE_PYTHON_VIDEO_REPLAYER_DISTRIBUTED_RENDER_TEST PROPERTIES
+ DEPENDS EXAMPLE_PYTHON_VIDEO_REPLAYER_DISTRIBUTED_TEST
+ PASS_REGULAR_EXPRESSION "Valid video output!"
+ )
+
+endif()
diff --git a/examples/video_replayer_distributed/python/CMakeLists.txt b/examples/video_replayer_distributed/python/CMakeLists.txt
index 2f719542..04ad5953 100644
--- a/examples/video_replayer_distributed/python/CMakeLists.txt
+++ b/examples/video_replayer_distributed/python/CMakeLists.txt
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -41,6 +41,13 @@ install(FILES
COMPONENT "holoscan-examples"
)
+# Install the minimal CMakeLists.txt file
+install(FILES CMakeLists.min.txt
+ RENAME "CMakeLists.txt"
+ DESTINATION "${app_relative_dest_path}"
+ COMPONENT holoscan-examples
+)
+
# Testing
if(HOLOSCAN_BUILD_TESTS)
@@ -75,6 +82,7 @@ if(HOLOSCAN_BUILD_TESTS)
set_tests_properties(EXAMPLE_PYTHON_VIDEO_REPLAYER_DISTRIBUTED_TEST PROPERTIES
DEPENDS "video_replayer_distributed_test.py"
PASS_REGULAR_EXPRESSION "Reach end of file or playback count reaches to the limit. Stop ticking."
+ FAIL_REGULAR_EXPRESSION "initialized independent of a parent entity"
)
# Add a test to check the validity of the frames
@@ -90,6 +98,7 @@ if(HOLOSCAN_BUILD_TESTS)
set_tests_properties(EXAMPLE_PYTHON_VIDEO_REPLAYER_DISTRIBUTED_RENDER_TEST PROPERTIES
DEPENDS EXAMPLE_PYTHON_VIDEO_REPLAYER_DISTRIBUTED_TEST
PASS_REGULAR_EXPRESSION "Valid video output!"
+ FAIL_REGULAR_EXPRESSION "initialized independent of a parent entity"
)
endif()
diff --git a/examples/wrap_operator_as_gxf_extension/ping_rx_native_op/ping_rx_native_op.cpp b/examples/wrap_operator_as_gxf_extension/ping_rx_native_op/ping_rx_native_op.cpp
index f1d0e821..b0baee89 100644
--- a/examples/wrap_operator_as_gxf_extension/ping_rx_native_op/ping_rx_native_op.cpp
+++ b/examples/wrap_operator_as_gxf_extension/ping_rx_native_op/ping_rx_native_op.cpp
@@ -1,5 +1,5 @@
/*
- * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -17,8 +17,6 @@
#include "ping_rx_native_op.hpp"
-#include
-
using namespace holoscan;
namespace myops {
diff --git a/examples/wrap_operator_as_gxf_extension/ping_tx_native_op/ping_tx_native_op.cpp b/examples/wrap_operator_as_gxf_extension/ping_tx_native_op/ping_tx_native_op.cpp
index 559a062a..4dbdcfc3 100644
--- a/examples/wrap_operator_as_gxf_extension/ping_tx_native_op/ping_tx_native_op.cpp
+++ b/examples/wrap_operator_as_gxf_extension/ping_tx_native_op/ping_tx_native_op.cpp
@@ -1,5 +1,5 @@
/*
- * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -17,8 +17,6 @@
#include "ping_tx_native_op.hpp"
-#include
-
using namespace holoscan;
namespace myops {
diff --git a/gxf_extensions/CMakeLists.txt b/gxf_extensions/CMakeLists.txt
index 7a4a4e14..406d9cfc 100644
--- a/gxf_extensions/CMakeLists.txt
+++ b/gxf_extensions/CMakeLists.txt
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -28,5 +28,4 @@ set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib/gxf_extensions)
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib/gxf_extensions)
add_subdirectory(gxf_holoscan_wrapper)
-add_subdirectory(stream_playback)
add_subdirectory(ucx)
diff --git a/gxf_extensions/README.md b/gxf_extensions/README.md
index 4b3f20cb..90565970 100644
--- a/gxf_extensions/README.md
+++ b/gxf_extensions/README.md
@@ -4,5 +4,4 @@ See the User Guide for details regarding the extensions in GXF and Holoscan SDK,
- `bayer_demosaic`: includes the `nvidia::holoscan::BayerDemosaic` codelet. It performs color filter array (CFA) interpolation for 1-channel inputs of 8 or 16-bit unsigned integer and outputs an RGB or RGBA image. This codelet is no longer used in the core SDK as there is now also a native `holoscan::Operator` version available (instead of wrapping this codelet as a `holoscan::gxf::GXFOperator`). This version is kept as a concrete example of a codelet and a `GXFOperator` wrapping this codelet can still be found in `tests/system/bayer_demosaic_gxf.hpp` where it is used for test cases.
- `gxf_holoscan_wrapper`: includes the `holoscan::gxf::OperatorWrapper` codelet. It is used as a utility base class to wrap a holoscan operator to interface with the GXF framework.
-- `stream_playback`: includes the `nvidia::holoscan::stream_playback::VideoStreamSerializer` entity serializer to/from a Tensor Object.
- `ucx_holoscan`: includes `nvidia::holoscan::UcxHoloscanComponentSerializer` which is a `nvidia::gxf::ComponentSerializer` that handles serialization and deserialization of `holoscan::Message` and `holoscan::Tensor` types over a Unified Communication X (UCX) network connection. UCX is used by Holoscan SDK to send data between fragments of distributed applications. This extension must be used in combination with standard GXF UCX extension components. Specifically, this `UcxHoloscanComponentSerializer` is intended for use by the `UcxEntitySerializer` where it can operate alongside the `UcxComponentSerializer` that serializes GXF-specific types (`nvidia::gxf::Tensor`, `nvidia::gxf::VideoBuffer`, etc.).
diff --git a/gxf_extensions/gxf_holoscan_wrapper/gxf_holoscan_wrapper_ext.cpp b/gxf_extensions/gxf_holoscan_wrapper/gxf_holoscan_wrapper_ext.cpp
index 0d44e11c..e4cd7480 100644
--- a/gxf_extensions/gxf_holoscan_wrapper/gxf_holoscan_wrapper_ext.cpp
+++ b/gxf_extensions/gxf_holoscan_wrapper/gxf_holoscan_wrapper_ext.cpp
@@ -1,5 +1,5 @@
/*
- * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -17,7 +17,6 @@
#include "gxf/std/extension_factory_helper.hpp"
#include "holoscan/core/domain/tensor.hpp"
-#include "holoscan/core/gxf/gxf_tensor.hpp"
#include "holoscan/core/message.hpp"
#include "operator_wrapper.hpp"
@@ -28,8 +27,6 @@ GXF_EXT_FACTORY_SET_INFO(0x12d01b4ee06f49ef, 0x93c4961834347385, "HoloscanWrappe
// Register types/components that are used by Holoscan
GXF_EXT_FACTORY_ADD_0(0x61510ca06aa9493b, 0x8a777d0bf87476b7, holoscan::Message,
"Holoscan message type");
-GXF_EXT_FACTORY_ADD(0xa02945eaf20e418c, 0x8e6992b68672ce40, holoscan::gxf::GXFTensor,
- nvidia::gxf::Tensor, "Holoscan's GXF Tensor type");
GXF_EXT_FACTORY_ADD_0(0xa5eb0ed57d7f4aa2, 0xb5865ccca0ef955c, holoscan::Tensor,
"Holoscan's Tensor type");
diff --git a/gxf_extensions/gxf_holoscan_wrapper/operator_wrapper.hpp b/gxf_extensions/gxf_holoscan_wrapper/operator_wrapper.hpp
index 20eb5896..5ddeeed5 100644
--- a/gxf_extensions/gxf_holoscan_wrapper/operator_wrapper.hpp
+++ b/gxf_extensions/gxf_holoscan_wrapper/operator_wrapper.hpp
@@ -1,5 +1,5 @@
/*
- * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -26,7 +26,7 @@
#include "operator_wrapper_fragment.hpp"
#include "gxf/std/codelet.hpp"
-#include "gxf/std/parameter_parser_std.hpp"
+#include "gxf/core/parameter_parser_std.hpp"
namespace holoscan::gxf {
diff --git a/gxf_extensions/gxf_holoscan_wrapper/operator_wrapper_fragment.hpp b/gxf_extensions/gxf_holoscan_wrapper/operator_wrapper_fragment.hpp
index e8104f0f..9eb9545c 100644
--- a/gxf_extensions/gxf_holoscan_wrapper/operator_wrapper_fragment.hpp
+++ b/gxf_extensions/gxf_holoscan_wrapper/operator_wrapper_fragment.hpp
@@ -1,5 +1,5 @@
/*
- * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -32,9 +32,7 @@ class OperatorWrapperFragment : public holoscan::Fragment {
public:
OperatorWrapperFragment();
- GXFExecutor& gxf_executor() {
- return static_cast(executor());
- }
+ GXFExecutor& gxf_executor() { return static_cast(executor()); }
};
} // namespace holoscan::gxf
diff --git a/gxf_extensions/stream_playback/CMakeLists.txt b/gxf_extensions/stream_playback/CMakeLists.txt
deleted file mode 100644
index 29dff8ed..00000000
--- a/gxf_extensions/stream_playback/CMakeLists.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-# SPDX-License-Identifier: Apache-2.0
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Create library
-add_library(gxf_stream_playback_lib SHARED
- video_stream_serializer.cpp
- video_stream_serializer.hpp
-)
-target_link_libraries(gxf_stream_playback_lib
- PUBLIC
- GXF::serialization
- yaml-cpp
-)
-
-# Create extension
-add_library(gxf_stream_playback SHARED
- stream_playback_ext.cpp
-)
-target_link_libraries(gxf_stream_playback
- PUBLIC gxf_stream_playback_lib
- PRIVATE holoscan_security_flags
-)
-# Install GXF extension as a component 'holoscan-gxf_extensions'
-install_gxf_extension(gxf_stream_playback)
diff --git a/gxf_extensions/stream_playback/stream_playback_ext.cpp b/gxf_extensions/stream_playback/stream_playback_ext.cpp
deleted file mode 100644
index ea11e832..00000000
--- a/gxf_extensions/stream_playback/stream_playback_ext.cpp
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: Apache-2.0
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "gxf/std/extension_factory_helper.hpp"
-
-#include "video_stream_serializer.hpp"
-
-GXF_EXT_FACTORY_BEGIN()
-GXF_EXT_FACTORY_SET_INFO(0xe6c168715f3f428d, 0x96cd24dce2f42f46, "StreamPlaybackExtension",
- "Holoscan StreamPlayback extension", "NVIDIA", "0.2.0", "LICENSE");
-GXF_EXT_FACTORY_ADD(0x7ee08fcc84c94245, 0xa415022b42f4ef39,
- nvidia::holoscan::stream_playback::VideoStreamSerializer,
- nvidia::gxf::EntitySerializer, "VideoStreamSerializer component.");
-GXF_EXT_FACTORY_END()
diff --git a/gxf_extensions/stream_playback/video_stream_serializer.cpp b/gxf_extensions/stream_playback/video_stream_serializer.cpp
deleted file mode 100644
index e0649ee8..00000000
--- a/gxf_extensions/stream_playback/video_stream_serializer.cpp
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: Apache-2.0
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "video_stream_serializer.hpp"
-
-#include
-
-#include
-#include
-#include
-#include
-
-namespace nvidia {
-namespace holoscan {
-namespace stream_playback {
-
-namespace {
-
-// Serializes EntityHeader
-gxf::Expected SerializeEntityHeader(VideoStreamSerializer::EntityHeader header,
- gxf::Endpoint* endpoint) {
- if (!endpoint) { return gxf::Unexpected{GXF_ARGUMENT_NULL}; }
- header.serialized_size = htole64(header.serialized_size);
- header.checksum = htole32(header.checksum);
- header.sequence_number = htole64(header.sequence_number);
- header.flags = htole32(header.flags);
- header.component_count = htole64(header.component_count);
- header.reserved = htole64(header.reserved);
- return endpoint->writeTrivialType(&header).substitute(sizeof(header));
-}
-
-// Deserializes EntityHeader
-gxf::Expected DeserializeEntityHeader(
- gxf::Endpoint* endpoint) {
- if (!endpoint) { return gxf::Unexpected{GXF_ARGUMENT_NULL}; }
- VideoStreamSerializer::EntityHeader header;
- return endpoint->readTrivialType(&header).and_then([&]() {
- header.serialized_size = le64toh(header.serialized_size);
- header.checksum = le32toh(header.checksum);
- header.sequence_number = le64toh(header.sequence_number);
- header.flags = le32toh(header.flags);
- header.component_count = le64toh(header.component_count);
- header.reserved = le64toh(header.reserved);
- return header;
- });
-}
-
-// Serializes ComponentHeader
-gxf::Expected SerializeComponentHeader(VideoStreamSerializer::ComponentHeader header,
- gxf::Endpoint* endpoint) {
- if (!endpoint) { return gxf::Unexpected{GXF_ARGUMENT_NULL}; }
- header.serialized_size = htole64(header.serialized_size);
- header.tid.hash1 = htole64(header.tid.hash1);
- header.tid.hash2 = htole64(header.tid.hash2);
- header.name_size = htole64(header.name_size);
- return endpoint->writeTrivialType(&header).substitute(sizeof(header));
-}
-
-// Deserializes ComponentHeader
-gxf::Expected DeserializeComponentHeader(
- gxf::Endpoint* endpoint) {
- if (!endpoint) { return gxf::Unexpected{GXF_ARGUMENT_NULL}; }
- VideoStreamSerializer::ComponentHeader header;
- return endpoint->readTrivialType(&header).and_then([&]() {
- header.serialized_size = le64toh(header.serialized_size);
- header.tid.hash1 = le64toh(header.tid.hash1);
- header.tid.hash2 = le64toh(header.tid.hash2);
- header.name_size = le64toh(header.name_size);
- return header;
- });
-}
-
-} // namespace
-
-struct VideoStreamSerializer::ComponentEntry {
- ComponentHeader header = {0, GxfTidNull(), 0};
- gxf::UntypedHandle component = gxf::UntypedHandle::Null();
- gxf::Handle serializer = gxf::Handle::Null();
-};
-
-gxf_result_t VideoStreamSerializer::registerInterface(gxf::Registrar* registrar) {
- if (registrar == nullptr) { return GXF_ARGUMENT_NULL; }
- gxf::Expected result;
- result &=
- registrar->parameter(component_serializers_, "component_serializers", "Component serializers",
- "List of serializers for serializing and deserializing components");
- return gxf::ToResultCode(result);
-}
-
-gxf_result_t VideoStreamSerializer::serialize_entity_abi(gxf_uid_t eid, gxf::Endpoint* endpoint,
- uint64_t* size) {
- if (endpoint == nullptr || size == nullptr) { return GXF_ARGUMENT_NULL; }
- FixedVector components;
- FixedVector entries;
- return gxf::ToResultCode(
- gxf::Entity::Shared(context(), eid)
- .map([&](gxf::Entity entity) { return entity.findAll(components); })
- .and_then([&]() { return createComponentEntries(components); })
- .assign_to(entries)
- .and_then([&]() {
- EntityHeader entity_header;
- entity_header.serialized_size = 0; // How can we compute this before serializing?
- entity_header.checksum = 0x00000000;
- entity_header.sequence_number = outgoing_sequence_number_++;
- entity_header.flags = 0x00000000;
- entity_header.component_count = entries.size();
- entity_header.reserved = 0;
- return SerializeEntityHeader(entity_header, endpoint);
- })
- .assign_to(*size)
- .and_then([&]() { return serializeComponents(entries, endpoint); })
- .map([&](size_t serialized_size) { *size += serialized_size; }));
-}
-
-gxf::Expected VideoStreamSerializer::deserialize_entity_header_abi(
- gxf::Endpoint* endpoint) {
- gxf::Entity entity;
-
- gxf_result_t result = gxf::ToResultCode(
- gxf::Entity::New(context())
- .assign_to(entity)
- .and_then([&]() { return DeserializeEntityHeader(endpoint); })
- .map([&](EntityHeader entity_header) {
- if (entity_header.sequence_number != incoming_sequence_number_) {
- incoming_sequence_number_ = entity_header.sequence_number;
- }
- incoming_sequence_number_++;
- return deserializeComponents(entity_header.component_count, entity, endpoint);
- })
- .substitute(entity));
-
- if (result != GXF_SUCCESS) { GXF_LOG_ERROR("Deserialize entity header failed"); }
- return entity;
-}
-
-gxf_result_t VideoStreamSerializer::deserialize_entity_abi(gxf_uid_t eid, gxf::Endpoint* endpoint) {
- if (endpoint == nullptr) { return GXF_ARGUMENT_NULL; }
- gxf::Entity entity;
- return gxf::ToResultCode(gxf::Entity::Shared(context(), eid)
- .assign_to(entity)
- .and_then([&]() { return DeserializeEntityHeader(endpoint); })
- .map([&](EntityHeader entity_header) {
- if (entity_header.sequence_number != incoming_sequence_number_) {
- // Note:: This is a workaround for the issue that the frame count
- // is out of the maximum frame index.
- // Modified to support 'repeat' feature in
- // nvidia::holoscan::stream_playback::VideoStreamReplayer
- // which reuses gxf::EntityReplayer.
- // When 'repeat' parameter is 'true' and the frame count
- // is out of the maximum frame index, this error message
- // is printed with nvidia::gxf::StdEntitySerializer but it
- // is actually not a warning so we provide
- // nvidia::holoscan::stream_playback::VideoStreamSerializer
- // to replace nvidia::gxf::StdEntitySerializer and not to
- // print this warning message.
- incoming_sequence_number_ = entity_header.sequence_number;
- }
- incoming_sequence_number_++;
- return deserializeComponents(entity_header.component_count, entity,
- endpoint);
- }));
-}
-
-gxf::Expected>
-VideoStreamSerializer::createComponentEntries(
- const FixedVectorBase& components) {
- FixedVector entries;
- for (size_t i = 0; i < components.size(); i++) {
- const auto component = components[i];
- if (!component) { return gxf::Unexpected{GXF_ARGUMENT_OUT_OF_RANGE}; }
-
- // Check if component is serializable
- auto component_serializer = findComponentSerializer(component->tid());
- if (!component_serializer) {
- GXF_LOG_WARNING("No serializer found for component '%s' with type ID 0x%016zx%016zx",
- component->name(), component->tid().hash1, component->tid().hash2);
- continue;
- }
-
- // Create component header
- ComponentHeader component_header;
- component_header.serialized_size = 0; // How can we compute this before serializing?
- component_header.tid = component->tid();
- component_header.name_size = std::strlen(component->name());
-
- // Update component list
- const auto result =
- entries.emplace_back(component_header, component.value(), component_serializer.value());
- if (!result) { return gxf::Unexpected{GXF_EXCEEDING_PREALLOCATED_SIZE}; }
- }
-
- return entries;
-}
-
-gxf::Expected VideoStreamSerializer::serializeComponents(
- const FixedVectorBase& entries, gxf::Endpoint* endpoint) {
- size_t size = 0;
- for (size_t i = 0; i < entries.size(); i++) {
- const auto& entry = entries[i];
- if (!entry) { return gxf::Unexpected{GXF_ARGUMENT_OUT_OF_RANGE}; }
- const auto result =
- SerializeComponentHeader(entry->header, endpoint)
- .map([&](size_t component_header_size) { size += component_header_size; })
- .and_then(
- [&]() { return endpoint->write(entry->component.name(), entry->header.name_size); })
- .and_then([&]() { size += entry->header.name_size; })
- .and_then(
- [&]() { return entry->serializer->serializeComponent(entry->component, endpoint); })
- .map([&](size_t component_size) { size += component_size; });
- if (!result) { return gxf::ForwardError(result); }
- }
- return size;
-}
-
-gxf::Expected VideoStreamSerializer::deserializeComponents(size_t component_count,
- gxf::Entity entity,
- gxf::Endpoint* endpoint) {
- for (size_t i = 0; i < component_count; i++) {
- ComponentEntry entry;
- const auto result =
- DeserializeComponentHeader(endpoint)
- .assign_to(entry.header)
- .and_then([&]() { return findComponentSerializer(entry.header.tid); })
- .assign_to(entry.serializer)
- .and_then([&]() -> gxf::Expected {
- try {
- std::string name(entry.header.name_size, '\0');
- return gxf::ExpectedOrError(
- endpoint->read(const_cast(name.data()), name.size()), name);
- } catch (const std::exception& exception) {
- GXF_LOG_ERROR("Failed to deserialize component name: %s", exception.what());
- return gxf::Unexpected{GXF_OUT_OF_MEMORY};
- }
- })
- .map([&](std::string name) { return entity.add(entry.header.tid, name.c_str()); })
- .assign_to(entry.component)
- .and_then([&]() {
- return entry.serializer->deserializeComponent(entry.component, endpoint);
- });
- if (!result) { return gxf::ForwardError(result); }
- }
- return gxf::Success;
-}
-
-gxf::Expected> VideoStreamSerializer::findComponentSerializer(
- gxf_tid_t tid) {
- // Search cache for valid serializer
- const auto search = serializer_cache_.find(tid);
- if (search != serializer_cache_.end()) { return search->second; }
-
- // Search serializer list for valid serializer and cache result
- for (size_t i = 0; i < component_serializers_.get().size(); i++) {
- const auto component_serializer = component_serializers_.get()[i];
- if (!component_serializer) { return gxf::Unexpected{GXF_ARGUMENT_OUT_OF_RANGE}; }
- if (component_serializer.value()->isSupported(tid)) {
- serializer_cache_[tid] = component_serializer.value();
- return component_serializer.value();
- }
- }
-
- return gxf::Unexpected{GXF_QUERY_NOT_FOUND};
-}
-
-} // namespace stream_playback
-} // namespace holoscan
-} // namespace nvidia
diff --git a/gxf_extensions/stream_playback/video_stream_serializer.hpp b/gxf_extensions/stream_playback/video_stream_serializer.hpp
deleted file mode 100644
index 96dafdfb..00000000
--- a/gxf_extensions/stream_playback/video_stream_serializer.hpp
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- * SPDX-License-Identifier: Apache-2.0
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef NVIDIA_CLARA_HOLOSCAN_GXF_EXTENSIONS_STREAM_PLAYBACK_VIDEO_STREAM_SERIALIZER_HPP_
-#define NVIDIA_CLARA_HOLOSCAN_GXF_EXTENSIONS_STREAM_PLAYBACK_VIDEO_STREAM_SERIALIZER_HPP_
-
-#include
-
-#include "common/fixed_vector.hpp"
-#include "gxf/serialization/component_serializer.hpp"
-#include "gxf/serialization/entity_serializer.hpp"
-#include "gxf/serialization/tid_hash.hpp"
-
-namespace nvidia::holoscan::stream_playback {
-
-/// @brief Data marshalling codelet for video stream entities.
-///
-/// Serializes and deserializes entities with the provided component serializers.
-/// Little-endian is used over big-endian for better performance on x86 and arm platforms.
-/// Entities are serialized in the following format:
-///
-/// | Entity Header || Component Header | Component Name | Component | ... | ... | ... |
-///
-/// Components will be serialized in the order they are added to the entity.
-/// Components without serializers will be skipped.
-/// Each component will be preceded by a component header and the name of the component.
-/// The component itself will be serialized with a component serializer.
-/// An entity header will be added at the beginning.
-class VideoStreamSerializer : gxf::EntitySerializer {
- public:
-#pragma pack(push, 1)
- // Header preceding entities
- struct EntityHeader {
- uint64_t serialized_size; // Size of the serialized entity in bytes
- uint32_t checksum; // Checksum to verify the integrity of the message
- uint64_t sequence_number; // Sequence number of the message
- uint32_t flags; // Flags to specify delivery options
- uint64_t component_count; // Number of components in the entity
- uint64_t reserved; // Bytes reserved for future use
- };
-#pragma pack(pop)
-
-#pragma pack(push, 1)
- // Header preceding components
- struct ComponentHeader {
- uint64_t serialized_size; // Size of the serialized component in bytes
- gxf_tid_t tid; // Type ID of the component
- uint64_t name_size; // Size of the component name in bytes
- };
-#pragma pack(pop)
-
- gxf_result_t registerInterface(gxf::Registrar* registrar) override;
- gxf_result_t initialize() override { return GXF_SUCCESS; }
- gxf_result_t deinitialize() override { return GXF_SUCCESS; }
-
- gxf_result_t serialize_entity_abi(gxf_uid_t eid, gxf::Endpoint* endpoint,
- uint64_t* size) override;
- gxf_result_t deserialize_entity_abi(gxf_uid_t eid, gxf::Endpoint* endpoint) override;
- gxf::Expected deserialize_entity_header_abi(gxf::Endpoint* endpoint) override;
-
- private:
- // Structure used to organize serializable components
- struct ComponentEntry;
-
- // Populates a list of component entries using a list of component handles
- gxf::Expected> createComponentEntries(
- const FixedVectorBase& components);
- // Serializes a list of components and writes them to an endpoint
- // Returns the total number of bytes serialized
- gxf::Expected serializeComponents(const FixedVectorBase& entries,
- gxf::Endpoint* endpoint);
- // Reads from an endpoint and deserializes a list of components
- gxf::Expected deserializeComponents(size_t component_count, gxf::Entity entity,
- gxf::Endpoint* endpoint);
- // Searches for a component serializer that supports the given type ID
- // Uses the first valid serializer found and caches it for subsequent lookups
- // Returns an Unexpected if no valid serializer is found
- gxf::Expected> findComponentSerializer(gxf_tid_t tid);
-
- gxf::Parameter, kMaxComponents>>
- component_serializers_;
-
- // Table that caches type ID with a valid component serializer
- std::unordered_map, gxf::TidHash>
- serializer_cache_;
- // Sequence number for outgoing messages
- uint64_t outgoing_sequence_number_;
- // Sequence number for incoming messages
- uint64_t incoming_sequence_number_;
-};
-
-} // namespace nvidia::holoscan::stream_playback
-
-#endif // NVIDIA_CLARA_HOLOSCAN_GXF_EXTENSIONS_STREAM_PLAYBACK_VIDEO_STREAM_SERIALIZER_HPP_
diff --git a/gxf_extensions/ucx/ucx_holoscan_component_serializer.cpp b/gxf_extensions/ucx/ucx_holoscan_component_serializer.cpp
index ba43a0e7..ce0e1834 100644
--- a/gxf_extensions/ucx/ucx_holoscan_component_serializer.cpp
+++ b/gxf_extensions/ucx/ucx_holoscan_component_serializer.cpp
@@ -1,5 +1,5 @@
/*
- * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -28,22 +28,6 @@
namespace nvidia {
namespace gxf {
-// copy of TensorHeader from UcxComponentSerializer needed for GXFTensor serialize/deserialize
-namespace {
-
-#pragma pack(push, 1)
-struct TensorHeader {
- MemoryStorageType storage_type; // CPU or GPU tensor
- PrimitiveType element_type; // Tensor element type
- uint64_t bytes_per_element; // Bytes per tensor element
- uint32_t rank; // Tensor rank
- int32_t dims[Shape::kMaxRank]; // Tensor dimensions
- uint64_t strides[Shape::kMaxRank]; // Tensor strides
-};
-#pragma pack(pop)
-
-} // namespace
-
gxf_result_t UcxHoloscanComponentSerializer::registerInterface(Registrar* registrar) {
Expected result;
result &= registrar->parameter(
@@ -63,87 +47,21 @@ gxf_result_t UcxHoloscanComponentSerializer::initialize() {
Expected UcxHoloscanComponentSerializer::configureSerializers() {
Expected result;
- result &= setSerializer([this](void* component, Endpoint* endpoint) {
- return serializeHoloscanGXFTensor(*static_cast(component), endpoint);
- });
result &= setSerializer([this](void* component, Endpoint* endpoint) {
return serializeHoloscanMessage(*static_cast(component), endpoint);
});
- result &= setSerializer([this](void* component, Endpoint* endpoint) {
- return serializeTensor(*static_cast(component), endpoint);
- });
-
return result;
}
Expected UcxHoloscanComponentSerializer::configureDeserializers() {
Expected result;
- result &= setDeserializer([this](void* component, Endpoint* endpoint) {
- return deserializeHoloscanGXFTensor(endpoint).assign_to(
- *static_cast(component));
- });
result &= setDeserializer([this](void* component, Endpoint* endpoint) {
return deserializeHoloscanMessage(endpoint).assign_to(
*static_cast(component));
});
- result &= setDeserializer([this](void* component, Endpoint* endpoint) {
- return deserializeTensor(endpoint).assign_to(*static_cast(component));
- });
-
return result;
}
-Expected UcxHoloscanComponentSerializer::serializeHoloscanGXFTensor(
- const holoscan::gxf::GXFTensor& tensor, Endpoint* endpoint) {
- GXF_LOG_DEBUG("UcxHoloscanComponentSerializer::serializeHoloscanGXFTensor");
- // Implementation matches UcxComponentSerializer::serializeTensor since holoscan::gxf::Tensor
- // inherits from nvidia::gxf::Tensor.
- TensorHeader header;
- header.storage_type = tensor.storage_type();
- header.element_type = tensor.element_type();
- header.bytes_per_element = tensor.bytes_per_element();
- header.rank = tensor.rank();
- for (size_t i = 0; i < Shape::kMaxRank; i++) {
- header.dims[i] = tensor.shape().dimension(i);
- header.strides[i] = tensor.stride(i);
- }
- auto result = endpoint->write_ptr(tensor.pointer(), tensor.size(), tensor.storage_type());
- if (!result) { return ForwardError(result); }
- auto size = endpoint->writeTrivialType(&header);
- if (!size) { return ForwardError(size); }
- return sizeof(header);
-}
-
-Expected UcxHoloscanComponentSerializer::deserializeHoloscanGXFTensor(
- Endpoint* endpoint) {
- GXF_LOG_DEBUG("UcxHoloscanComponentSerializer::deserializeHoloscanGXFTensor");
- // Implementation is as in UcxComponentSerializer::deserializeTensor is private, but with an
- // additional conversion to GXFTensor at the end.
- if (!endpoint) { return Unexpected{GXF_ARGUMENT_NULL}; }
-
- TensorHeader header;
- auto size = endpoint->readTrivialType(&header);
- if (!size) { return ForwardError(size); }
-
- std::array dims;
- std::memcpy(dims.data(), header.dims, sizeof(header.dims));
- Tensor::stride_array_t strides;
- std::memcpy(strides.data(), header.strides, sizeof(header.strides));
-
- Tensor tensor;
- auto result = tensor.reshapeCustom(Shape(dims, header.rank),
- header.element_type,
- header.bytes_per_element,
- strides,
- header.storage_type,
- allocator_);
- if (!result) { return ForwardError(result); }
- result = endpoint->write_ptr(tensor.pointer(), tensor.size(), tensor.storage_type());
- if (!result) { return ForwardError(result); }
- // Convert to GXFTensor (doesn't need to protect with mutex since 'tensor' is local)
- return holoscan::gxf::GXFTensor(tensor, -1);
-}
-
Expected UcxHoloscanComponentSerializer::serializeHoloscanMessage(
const holoscan::Message& message, Endpoint* endpoint) {
GXF_LOG_DEBUG("UcxHoloscanComponentSerializer::serializeHoloscanMessage");
@@ -197,69 +115,5 @@ Expected UcxHoloscanComponentSerializer::deserializeHoloscanM
return deserialize_func(endpoint);
}
-Expected UcxHoloscanComponentSerializer::serializeTensor(const Tensor& tensor,
- Endpoint* endpoint) {
- TensorHeader header;
- header.storage_type = tensor.storage_type();
- header.element_type = tensor.element_type();
- header.bytes_per_element = tensor.bytes_per_element();
- header.rank = tensor.rank();
- for (size_t i = 0; i < Shape::kMaxRank; i++) {
- header.dims[i] = tensor.shape().dimension(i);
- header.strides[i] = tensor.stride(i);
- }
-
- // Issue 4371324
- // Following the resolution of issue 4272363, the conversion of GXF Tensor to Holoscan
- // GXFTensor now avoids thread contention by utilizing a mutex. However, this mutex is not
- // employed when sending the GXF Tensor to a remote endpoint. Consequently, the tensor pointer
- // may be null during transmission. To address this, the tensor pointer is checked before
- // sending; if it is null, the thread yields and retries, continuing this process for up to 100ms.
- // If the tensor pointer remains null after this duration, an error is returned. This logic
- // ensures a balance between efficient error handling and avoiding excessive delays in tensor
- // transmission.
- holoscan::Timer timer("Waiting time: {:.8f} seconds\n", true, false);
- auto pointer = tensor.pointer();
- while (pointer == nullptr && timer.stop() < 0.1) {
- std::this_thread::yield();
- pointer = tensor.pointer();
- }
- if (pointer == nullptr) {
- GXF_LOG_ERROR("Tensor pointer is still null after 100ms");
- return Unexpected{GXF_NULL_POINTER};
- }
-
- auto result = endpoint->write_ptr(pointer, tensor.size(), tensor.storage_type());
- if (!result) { return ForwardError(result); }
- auto size = endpoint->writeTrivialType(&header);
- if (!size) { return ForwardError(size); }
- return sizeof(header);
-}
-
-Expected UcxHoloscanComponentSerializer::deserializeTensor(Endpoint* endpoint) {
- if (!endpoint) { return Unexpected{GXF_ARGUMENT_NULL}; }
-
- TensorHeader header;
- auto size = endpoint->readTrivialType(&header);
- if (!size) { return ForwardError(size); }
-
- std::array dims;
- std::memcpy(dims.data(), header.dims, sizeof(header.dims));
- Tensor::stride_array_t strides;
- std::memcpy(strides.data(), header.strides, sizeof(header.strides));
-
- Tensor tensor;
- auto result = tensor.reshapeCustom(Shape(dims, header.rank),
- header.element_type,
- header.bytes_per_element,
- strides,
- header.storage_type,
- allocator_);
- if (!result) { return ForwardError(result); }
- result = endpoint->write_ptr(tensor.pointer(), tensor.size(), tensor.storage_type());
- if (!result) { return ForwardError(result); }
- return tensor;
-}
-
} // namespace gxf
} // namespace nvidia
diff --git a/gxf_extensions/ucx/ucx_holoscan_component_serializer.hpp b/gxf_extensions/ucx/ucx_holoscan_component_serializer.hpp
index a9902cfd..0a5deec0 100644
--- a/gxf_extensions/ucx/ucx_holoscan_component_serializer.hpp
+++ b/gxf_extensions/ucx/ucx_holoscan_component_serializer.hpp
@@ -1,5 +1,5 @@
/*
- * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -25,7 +25,6 @@
#include "gxf/std/allocator.hpp"
#include "gxf/std/tensor.hpp"
#include "holoscan/core/codec_registry.hpp"
-#include "holoscan/core/gxf/gxf_tensor.hpp"
#include "holoscan/core/message.hpp"
namespace nvidia {
@@ -45,19 +44,10 @@ class UcxHoloscanComponentSerializer : public ComponentSerializer {
Expected configureSerializers();
// Configures all deserializer functions
Expected configureDeserializers();
- // Serializes a holoscan::gxf::GXFTensor
- Expected serializeHoloscanGXFTensor(const holoscan::gxf::GXFTensor& tensor,
- Endpoint* endpoint);
- // Deserializes a holoscan::gxf::GXFTensor
- Expected deserializeHoloscanGXFTensor(Endpoint* endpoint);
// Serializes a holoscan::Message
Expected serializeHoloscanMessage(const holoscan::Message& message, Endpoint* endpoint);
// Deserializes a holoscan::Message
Expected deserializeHoloscanMessage(Endpoint* endpoint);
- // Serializes a nvidia::gxf::Tensor
- Expected serializeTensor(const Tensor& tensor, Endpoint* endpoint);
- // Deserializes a nvidia::gxf::Tensor
- Expected deserializeTensor(Endpoint* endpoint);
Parameter> allocator_;
};
diff --git a/include/common/logger/spdlog_logger.hpp b/include/common/logger/spdlog_logger.hpp
new file mode 100644
index 00000000..b005b65d
--- /dev/null
+++ b/include/common/logger/spdlog_logger.hpp
@@ -0,0 +1,58 @@
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef COMMON_LOGGER_SPDLOG_LOGGER_HPP
+#define COMMON_LOGGER_SPDLOG_LOGGER_HPP
+
+#include
+#include
+#include
+#include
+
+#include
+
+namespace nvidia {
+
+/// Namespace for the NVIDIA logger functionality.
+namespace logger {
+
+class SpdlogLogger : public Logger {
+ public:
+ /// Create a logger with the given name.
+ ///
+ /// This constructor creates a logger with the given name and optional logger and log function.
+ /// If no logger or log function is provided, a default spdlog logger will be created.
+ ///
+ /// @param name The name of the logger.
+ /// @param logger The logger to use (default: nullptr).
+ /// @param func The log function to use (default: nullptr).
+ explicit SpdlogLogger(const char* name, const std::shared_ptr& logger = nullptr,
+ const LogFunction& func = nullptr);
+
+ /// Return the log pattern.
+ /// @return The reference to the log pattern string.
+ std::string& pattern_string();
+
+ protected:
+ std::string name_; ///< logger name
+};
+
+} // namespace logger
+
+} // namespace nvidia
+
+#endif /* COMMON_LOGGER_SPDLOG_LOGGER_HPP */
diff --git a/include/holoscan/core/application.hpp b/include/holoscan/core/application.hpp
index 5bab1114..4be1ace5 100644
--- a/include/holoscan/core/application.hpp
+++ b/include/holoscan/core/application.hpp
@@ -1,5 +1,5 @@
/*
- * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -119,7 +119,7 @@ class Application : public Fragment {
*/
template >>
- std::shared_ptr make_fragment(const StringT& name, ArgsT&&... args) {
+ std::shared_ptr make_fragment(StringT name, ArgsT&&... args) {
auto fragment = std::make_shared(std::forward(args)...);
fragment->name(name);
fragment->application(this);
diff --git a/include/holoscan/core/argument_setter.hpp b/include/holoscan/core/argument_setter.hpp
index 12483f5f..6be8cb10 100644
--- a/include/holoscan/core/argument_setter.hpp
+++ b/include/holoscan/core/argument_setter.hpp
@@ -1,5 +1,5 @@
/*
- * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -266,6 +266,7 @@ class ArgumentSetter {
typename holoscan::type_info::derived_type>(arg_value);
// Initialize the condition in case the condition created by
// Fragment::make_condition() is added to the operator as an argument.
+ // TODO: would like this to be assigned to the same entity as the operator
if (converted_value) { converted_value->initialize(); }
param = converted_value;
@@ -281,6 +282,7 @@ class ArgumentSetter {
typename holoscan::type_info::derived_type>(arg_value);
// Initialize the resource in case the resource created by
// Fragment::make_resource() is added to the operator as an argument.
+ // TODO: would like this to be assigned to the same entity as the operator
if (converted_value) { converted_value->initialize(); }
param = converted_value;
@@ -389,6 +391,7 @@ class ArgumentSetter {
// Initialize the condition in case the condition created by
// Fragment::make_condition() is added to the operator as an argument.
+ // TODO: would like this to be assigned to the same entity as the operator
if (condition) { condition->initialize(); }
converted_value.push_back(condition);
@@ -411,6 +414,7 @@ class ArgumentSetter {
// Initialize the resource in case the resource created by
// Fragment::make_resource() is added to the operator as an argument.
+ // TODO: would like this to be assigned to the same entity as the operator
if (resource) { resource->initialize(); }
converted_value.push_back(resource);
diff --git a/include/holoscan/core/codecs.hpp b/include/holoscan/core/codecs.hpp
index 77a260fd..8e26ac88 100644
--- a/include/holoscan/core/codecs.hpp
+++ b/include/holoscan/core/codecs.hpp
@@ -171,44 +171,45 @@ struct codec {
//////////////////////////////////////////////////////////////////////////////////////////////////
// Codec type 4: serialization of std::vector only
//
-// Note: Have to serialize std::vector differently than the numeric types due to how it is
-// packed. This is currently inefficient as 8x the data size is transferred due to bit->byte
-// conversion. Could revisit packing the data more efficiently if needed, but likely not
-// worth it if only a small length vector is being sent.
+// Performs bit-packing/unpacking to/from uint8_t type for more efficient serialization.
// codec of std::vector
template <>
struct codec> {
static expected serialize(const std::vector& data,
Endpoint* endpoint) {
- ContiguousDataHeader header;
- header.size = data.size();
- header.bytes_per_element = header.size > 0 ? sizeof(data[0]) : 1;
- auto size = endpoint->write_trivial_type(&header);
+ size_t total_bytes = 0;
+ size_t num_bits = data.size();
+ size_t num_bytes = (num_bits + 7) / 8; // the number of bytes needed to store the bits
+ auto size = endpoint->write_trivial_type(&num_bits);
if (!size) { return forward_error(size); }
- size_t total_bytes = size.value();
- expected size2;
- for (const auto b : data) {
- bool bool_b = b;
- size2 = endpoint->write_trivial_type(&bool_b);
- if (!size2) { return forward_error(size2); }
+ total_bytes += size.value();
+ std::vector packed_data(num_bytes, 0); // Create a vector to store the packed data
+ for (size_t i = 0; i < num_bits; ++i) {
+ if (data[i]) {
+ packed_data[i / 8] |= (1 << (i % 8)); // Pack the bits into the bytes
+ }
}
- total_bytes += size2.value() * header.size;
+ auto result = endpoint->write(packed_data.data(), packed_data.size());
+ if (!result) { return forward_error(result); }
+ total_bytes += result.value();
return total_bytes;
}
static expected, RuntimeError> deserialize(Endpoint* endpoint) {
- ContiguousDataHeader header;
- auto header_size = endpoint->read_trivial_type(&header);
- if (!header_size) { return forward_error(header_size); }
- std::vector data;
- data.resize(header.size);
- expected result;
- for (auto&& b : data) {
- bool bool_b;
- result = endpoint->read_trivial_type(&bool_b);
- if (!result) { return forward_error(result); }
- b = bool_b;
+ size_t num_bits;
+ auto size = endpoint->read_trivial_type(&num_bits);
+ if (!size) { return forward_error(size); }
+ size_t num_bytes =
+ (num_bits + 7) / 8; // Calculate the number of bytes needed to store the bits
+ std::vector packed_data(num_bytes, 0); // Create a vector to store the packed data
+ auto result = endpoint->read(packed_data.data(), packed_data.size());
+ if (!result) { return forward_error(result); }
+ std::vector data(num_bits, false); // Create a vector to store the unpacked data
+ for (size_t i = 0; i < num_bits; ++i) {
+ if (packed_data[i / 8] & (1 << (i % 8))) { // Unpack the bits from the bytes
+ data[i] = true;
+ }
}
return data;
}
diff --git a/include/holoscan/core/component.hpp b/include/holoscan/core/component.hpp
index 9afe89f1..4e41e932 100644
--- a/include/holoscan/core/component.hpp
+++ b/include/holoscan/core/component.hpp
@@ -1,5 +1,5 @@
/*
- * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -23,6 +23,7 @@
#include
#include
#include
+#include
#include
#include
@@ -31,11 +32,11 @@
#include "./arg.hpp"
#include "./forward_def.hpp"
-#define HOLOSCAN_COMPONENT_FORWARD_TEMPLATE() \
- template > && \
- (std::is_same_v> || \
+#define HOLOSCAN_COMPONENT_FORWARD_TEMPLATE() \
+ template > && \
+ (std::is_same_v> || \
std::is_same_v>)>>
#define HOLOSCAN_COMPONENT_FORWARD_ARGS(class_name) \
HOLOSCAN_COMPONENT_FORWARD_TEMPLATE() \
@@ -49,6 +50,10 @@
namespace holoscan {
+namespace gxf {
+class GXFExecutor;
+} // namespace gxf
+
/**
* @brief Base class for all components.
*
@@ -56,9 +61,9 @@ namespace holoscan {
* `holoscan::Condition`, and `holoscan::Resource`.
* It is used to define the common interface for all components.
*/
-class Component {
+class ComponentBase {
public:
- Component() = default;
+ ComponentBase() = default;
/**
* @brief Construct a new Component object.
@@ -66,12 +71,12 @@ class Component {
* @param args The arguments to be passed to the component.
*/
HOLOSCAN_COMPONENT_FORWARD_TEMPLATE()
- explicit Component(ArgT&& arg, ArgsT&&... args) {
+ explicit ComponentBase(ArgT&& arg, ArgsT&&... args) {
add_arg(std::forward(arg));
(add_arg(std::forward(args)), ...);
}
- virtual ~Component() = default;
+ virtual ~ComponentBase() = default;
/**
* @brief Get the identifier of the component.
@@ -164,7 +169,18 @@ class Component {
std::string description() const;
protected:
- friend class Executor;
+ friend class holoscan::Executor;
+ // Make GXFExecutor a friend class so it can call protected initialization methods
+ friend class holoscan::gxf::GXFExecutor;
+
+ // Make Fragment a friend class so it can call reset_graph_entities
+ friend class holoscan::Fragment;
+
+ /// Update parameters based on the specified arguments
+ void update_params_from_args(std::unordered_map& params);
+
+ /// Reset the GXF GraphEntity of any arguments that have one
+ virtual void reset_graph_entities();
int64_t id_ = -1; ///< The ID of the component.
std::string name_ = ""; ///< Name of the component
@@ -172,6 +188,32 @@ class Component {
std::vector args_; ///< List of arguments
};
+/**
+ * @brief Common class for all non-Operator components
+ *
+ * This class is the base class for all non-Operator components including
+ * `holoscan::Condition`, `holoscan::Resource`, `holoscan::NetworkContext`, `holoscan::Scheduler`
+ * It is used to define the common interface for all components.
+ *
+ * `holoscan::Operator` does not inherit from this class as it uses `holosccan::OperatorSpec`
+ * instead of `holoscan::ComponentSpec`.
+ */
+class Component : public ComponentBase {
+ protected:
+ // Make GXFExecutor a friend class so it can call protected initialization methods
+ friend class holoscan::gxf::GXFExecutor;
+
+ using ComponentBase::update_params_from_args;
+
+ /// Update parameters based on the specified arguments
+ void update_params_from_args();
+
+ /// Set the parameters based on defaults (sets GXF parameters for GXF operators)
+ virtual void set_parameters() {}
+
+ std::shared_ptr spec_; ///< The component specification.
+};
+
} // namespace holoscan
#endif /* HOLOSCAN_CORE_COMPONENT_HPP */
diff --git a/include/holoscan/core/condition.hpp b/include/holoscan/core/condition.hpp
index dfcacd37..dd9f513d 100644
--- a/include/holoscan/core/condition.hpp
+++ b/include/holoscan/core/condition.hpp
@@ -1,5 +1,5 @@
/*
- * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -95,6 +95,9 @@
namespace holoscan {
+// Forward declarations
+class Operator;
+
enum class ConditionType {
kNone, ///< No condition
kMessageAvailable, ///< Default for input port (nvidia::gxf::MessageAvailableSchedulingTerm)
@@ -206,8 +209,12 @@ class Condition : public Component {
YAML::Node to_yaml_node() const override;
protected:
- std::shared_ptr spec_; ///< The component specification.
- bool is_initialized_ = false; ///< Whether the condition is initialized.
+ // Add friend classes that can call reset_graph_entites
+ friend class holoscan::Operator;
+
+ using Component::reset_graph_entities;
+
+ bool is_initialized_ = false; ///< Whether the condition is initialized.
};
} // namespace holoscan
diff --git a/include/holoscan/core/conditions/gxf/asynchronous.hpp b/include/holoscan/core/conditions/gxf/asynchronous.hpp
index 999e0c00..8f133168 100644
--- a/include/holoscan/core/conditions/gxf/asynchronous.hpp
+++ b/include/holoscan/core/conditions/gxf/asynchronous.hpp
@@ -1,5 +1,5 @@
/*
- * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -71,6 +71,8 @@ class AsynchronousCondition : public gxf::GXFCondition {
*/
AsynchronousEventState event_state() const;
+ nvidia::gxf::AsynchronousSchedulingTerm* get() const;
+
private:
AsynchronousEventState event_state_{AsynchronousEventState::READY};
};
diff --git a/include/holoscan/core/conditions/gxf/boolean.hpp b/include/holoscan/core/conditions/gxf/boolean.hpp
index 27782e50..16002b5d 100644
--- a/include/holoscan/core/conditions/gxf/boolean.hpp
+++ b/include/holoscan/core/conditions/gxf/boolean.hpp
@@ -1,5 +1,5 @@
/*
- * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -39,6 +39,8 @@ class BooleanCondition : public gxf::GXFCondition {
void setup(ComponentSpec& spec) override;
+ nvidia::gxf::BooleanSchedulingTerm* get() const;
+
private:
Parameter enable_tick_;
};
diff --git a/include/holoscan/core/conditions/gxf/downstream_affordable.hpp b/include/holoscan/core/conditions/gxf/downstream_affordable.hpp
index 6e5be644..ee2cf141 100644
--- a/include/holoscan/core/conditions/gxf/downstream_affordable.hpp
+++ b/include/holoscan/core/conditions/gxf/downstream_affordable.hpp
@@ -1,5 +1,5 @@
/*
- * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -44,7 +44,11 @@ class DownstreamMessageAffordableCondition : public gxf::GXFCondition {
void initialize() override { GXFCondition::initialize(); }
+ // TODO(GXF4): Expected setTransmitter(Handle value)
+ // TODO(GXF4): Expected setMinSize(uint64_t value)
+
private:
+ // TODO(GXF4): this is now a std::set> transmitters_
Parameter> transmitter_;
Parameter min_size_;
};
diff --git a/include/holoscan/core/conditions/gxf/message_available.hpp b/include/holoscan/core/conditions/gxf/message_available.hpp
index 95519cac..1ea924fb 100644
--- a/include/holoscan/core/conditions/gxf/message_available.hpp
+++ b/include/holoscan/core/conditions/gxf/message_available.hpp
@@ -1,5 +1,5 @@
/*
- * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -39,21 +39,24 @@ class MessageAvailableCondition : public gxf::GXFCondition {
void receiver(std::shared_ptr