examples/data/train/
- 0
- 1
.
.
.
- n
examples/data/test/
- 0
- 1
.
.
.
- n
cd examples && python train.py
cd examples && python transform_model.py
cd makefile/pytorch
mkdir build && cd build && cmake -A x64 ..
or
mkdir build && cd build && cmake -G "Visual Studio 15 2017 Win64" ..
set Command Arguments -> ..\..\..\examples\checkpoint ..\..\..\examples\images
set Environment -> path=%path%;../../../thirdparty/libtorch/lib;../../../thirdparty/opencv/build/x64/vc15/bin;
cd makefile/cuda
mkdir build && cd build && cmake -A x64 ..
or
mkdir build && cd build && cmake -G "Visual Studio 15 2017 Win64" ..
cd makefile/tensorRT/classification
mkdir build && cd build && cmake -A x64 ..
or
mkdir build && cd build && cmake -G "Visual Studio 15 2017 Win64" ..
set Environment -> path=%path%;../../../../thirdparty/TensorRT/lib;
cd makefile/tensorRT/detection
mkdir build && cd build && cmake -A x64 ..
or
mkdir build && cd build && cmake -G "Visual Studio 15 2017 Win64" ..
set Environment -> path=%path%;../../../../thirdparty/TensorRT/lib;
download VGG16_faster_rcnn_final.caffemodel
thirdparty/
- libtorch
- opencv
- CUDA
- TensorRT
download thirdparty from here.
docker pull zccyman/deepframe
nvidia-docker run -it --name=mydocker zccyman/deepframe /bin/bash
cd workspace && git clone https://github.com/zccyman/pytorch-inference.git
- Windows10
- VS2017
- CMake3.13
- CUDA10.0
- CUDNN7.3
- Pyton3.5
- ONNX1.1.2
- TensorRT5.0.1
- Pytorch1.0
- Libtorch
- OpenCV4.0.1
-
train and transform pytorch model
-
multi-batch inference pytorch model in C++
-
cpu and gpu softmax
-
transform pytorch model to ONNX model, and inference onnx model using tensorRT
-
inference caffe model for faster-rcnn using tensorRT
-
build classification network
-
compress pytorch model
-
object detection pytorch inference using C++ on Window platforms
-
"torch.jit.trace" doesn’t support nn.DataParallel so far.
-
TensorRT doesn’t supports opset 7 above so far, but pyTorch ONNX exporter seems to export opset 9.