Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Exporter Custom Models Fix #77

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 43 additions & 0 deletions docker/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
## Basic Docker Enviroment for correct OpenPCDet for CUDA-PointPillars
##
## Prerequisites:
## - docker
## - nvidia driver
## - nvidia container toolkit
##
## Build with:
## docker build -t open_pcdet:latest .
##
## Save with:
## docker save open_pcdet:latest | gzip > open_pcdet.tar.gz
##
## Run with:
## docker run --network host --gpus all --name PointPillars --rm -it open_pcdet:latest
## Debug with:
## docker run --network host --gpus all --name PointPillars --entrypoint /bin/bash -it open_pcdet:latest

FROM nvcr.io/nvidia/pytorch:20.12-py3

## Meta information
LABEL cuda.version="11.1" maintainers="Robert Belshaw <[email protected]>"

WORKDIR /
RUN DEBIAN_FRONTEND=noninteractive apt-get update
RUN pip install --upgrade pip wheel setuptools
RUN pip install spconv-cu113
RUN git clone --branch v0.5.2 https://github.com/open-mmlab/OpenPCDet.git
WORKDIR /OpenPCDet
RUN python setup.py develop
RUN DEBIAN_FRONTEND=noninteractive apt-get install ffmpeg libsm6 libxext6 -y
RUN DEBIAN_FRONTEND=noninteractive pip install onnxsim nvidia-pyindex
RUN DEBIAN_FRONTEND=noninteractive pip install onnx-graphsurgeon
RUN DEBIAN_FRONTEND=noninteractive pip install kornia==0.4.1

# Docker clean-up
RUN rm -rf /var/lib/apt/lists/*

## Create entry point for image
WORKDIR /
COPY ./entrypoint.sh .
ENTRYPOINT ["/bin/bash"]
CMD ["/bin/bash", "-c"]
9 changes: 9 additions & 0 deletions docker/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
# Cuda PointPillars Exporter Docker

## usage example:

```
bash export.sh -m ../model/pointpillar_7728.pth -c ../tool/cfgs/kitti_models/pointpillar.yaml
```

tested with docker-compose 1.25.0 and docker 20.10.2120.10.21
16 changes: 16 additions & 0 deletions docker/docker-compose.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# email: [email protected]
# Need to docker login
version: '2.4'
services:
open_pcdet_export:
image: open_pcdet:latest
build: .
user: ${_UID}:${_UID}
restart: on-failure:2
entrypoint: /bin/bash ./entrypoint.sh
volumes:
- ./entrypoint.sh:/entrypoint.sh
- ../tool:/exporter
- ./${_CKPT}:/checkpoint.pth
- ../tool/cfgs/dataset_configs/kitti_dataset.yaml:/exporter/dataset.yaml
- ./${_CONFIG}:/pointpillar.yaml
2 changes: 2 additions & 0 deletions docker/entrypoint.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
cd /exporter
python exporter.py --cfg_file /pointpillar.yaml --ckpt /checkpoint.pth --data_path /exporter/dataset.yaml
42 changes: 42 additions & 0 deletions docker/export.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
#!/usr/bin/env bash
set -e
cd "$(cd -P -- "$(dirname -- "$0")" && pwd -P)"

## Usage ##
#
# bash export.sh -m path/to/ckpt.pth -c path/to/pointpillar.yaml
#
# note: path is relative to this script
#

while getopts m:c: flag
do
case "${flag}" in
m) _CKPT=${OPTARG};;
c) _CONFIG=${OPTARG};;
esac
done

if [ ! -f "${_CONFIG}" ]
then
echo "Config .yaml Does not exist at: ${_CONFIG}"
exit
fi

if [ ! -f "${_CKPT}" ]
then
echo "Ckpt .pth Does not exist at: ${_CKPT}"
exit
fi

_CONFIG=$_CONFIG
export _CONFIG
echo "CONFIG: $_CONFIG";
_CKPT=$_CKPT
export _CKPT
echo "CKPT: $_CKPT";

export _UID=$(id -u)

# Run backend
docker-compose run --rm open_pcdet_export
31 changes: 27 additions & 4 deletions tool/exporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,9 +101,32 @@ def main():
with torch.no_grad():

MAX_VOXELS = 10000
NUMBER_OF_CLASSES = len(cfg.CLASS_NAMES)

MAX_POINTS_PER_VOXEL = None

DATA_PROCESSOR = cfg.DATA_CONFIG.DATA_PROCESSOR
POINT_CLOUD_RANGE = cfg.DATA_CONFIG.POINT_CLOUD_RANGE
for i in DATA_PROCESSOR:
if i['NAME'] == "transform_points_to_voxels":
MAX_POINTS_PER_VOXEL = i['MAX_POINTS_PER_VOXEL']
VOXEL_SIZES = i['VOXEL_SIZE']
break


if MAX_POINTS_PER_VOXEL == None:
logger.info('Could Not Parse Config... Exiting')
import sys
sys.exit()

VOXEL_SIZE_X = abs(POINT_CLOUD_RANGE[0] - POINT_CLOUD_RANGE[3]) / VOXEL_SIZES[0]
VOXEL_SIZE_Y = abs(POINT_CLOUD_RANGE[1] - POINT_CLOUD_RANGE[4]) / VOXEL_SIZES[1]

FEATURE_SIZE_X = VOXEL_SIZE_X / 2 #Is this number of bins?
FEATURE_SIZE_Y = VOXEL_SIZE_Y / 2

dummy_voxels = torch.zeros(
(MAX_VOXELS, 32, 4),
(MAX_VOXELS, MAX_POINTS_PER_VOXEL, 4),
dtype=torch.float32,
device='cuda:0')

Expand All @@ -121,7 +144,7 @@ def main():
dummy_input['voxels'] = dummy_voxels
dummy_input['voxel_num_points'] = dummy_voxel_num
dummy_input['voxel_coords'] = dummy_voxel_idxs
dummy_input['batch_size'] = 1
dummy_input['batch_size'] = torch.tensor(1)

torch.onnx.export(model, # model being run
dummy_input, # model input (or a tuple for multiple inputs)
Expand All @@ -135,12 +158,12 @@ def main():
)

onnx_raw = onnx.load("./pointpillar_raw.onnx") # load onnx model
onnx_trim_post = simplify_postprocess(onnx_raw)
onnx_trim_post = simplify_postprocess(onnx_raw, FEATURE_SIZE_X, FEATURE_SIZE_Y, NUMBER_OF_CLASSES)

onnx_simp, check = simplify(onnx_trim_post)
assert check, "Simplified ONNX model could not be validated"

onnx_final = simplify_preprocess(onnx_simp)
onnx_final = simplify_preprocess(onnx_simp, VOXEL_SIZE_X, VOXEL_SIZE_Y, MAX_POINTS_PER_VOXEL)
onnx.save(onnx_final, "pointpillar.onnx")
print('finished exporting onnx')

Expand Down
20 changes: 11 additions & 9 deletions tool/simplifier_onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,15 @@
import onnx_graphsurgeon as gs

@gs.Graph.register()
def replace_with_clip(self, inputs, outputs):
def replace_with_clip(self, inputs, outputs, voxel_array):
for inp in inputs:
inp.outputs.clear()

for out in outputs:
out.inputs.clear()

op_attrs = dict()
op_attrs["dense_shape"] = np.array([496,432])
op_attrs["dense_shape"] = voxel_array

return self.layer(name="PPScatter_0", op="PPScatterPlugin", inputs=inputs, outputs=outputs, attrs=op_attrs)

Expand All @@ -36,13 +36,13 @@ def loop_node(graph, current_node, loop_time=0):
current_node = next_node
return next_node

def simplify_postprocess(onnx_model):
def simplify_postprocess(onnx_model, FEATURE_SIZE_X, FEATURE_SIZE_Y, NUMBER_OF_CLASSES):
print("Use onnx_graphsurgeon to adjust postprocessing part in the onnx...")
graph = gs.import_onnx(onnx_model)

cls_preds = gs.Variable(name="cls_preds", dtype=np.float32, shape=(1, 248, 216, 18))
box_preds = gs.Variable(name="box_preds", dtype=np.float32, shape=(1, 248, 216, 42))
dir_cls_preds = gs.Variable(name="dir_cls_preds", dtype=np.float32, shape=(1, 248, 216, 12))
cls_preds = gs.Variable(name="cls_preds", dtype=np.float32, shape=(1, int(FEATURE_SIZE_Y), int(FEATURE_SIZE_X), 2*NUMBER_OF_CLASSES*NUMBER_OF_CLASSES))
box_preds = gs.Variable(name="box_preds", dtype=np.float32, shape=(1, int(FEATURE_SIZE_Y), int(FEATURE_SIZE_X), 14*NUMBER_OF_CLASSES))
dir_cls_preds = gs.Variable(name="dir_cls_preds", dtype=np.float32, shape=(1, int(FEATURE_SIZE_Y), int(FEATURE_SIZE_X), 4*NUMBER_OF_CLASSES))

tmap = graph.tensors()
new_inputs = [tmap["voxels"], tmap["voxel_idxs"], tmap["voxel_num"]]
Expand Down Expand Up @@ -73,18 +73,20 @@ def simplify_postprocess(onnx_model):
return gs.export_onnx(graph)


def simplify_preprocess(onnx_model):
def simplify_preprocess(onnx_model, VOXEL_SIZE_X, VOXEL_SIZE_Y, MAX_POINTS_PER_VOXEL):
print("Use onnx_graphsurgeon to modify onnx...")
graph = gs.import_onnx(onnx_model)

tmap = graph.tensors()
MAX_VOXELS = tmap["voxels"].shape[0]

VOXEL_ARRAY = np.array([int(VOXEL_SIZE_X),int(VOXEL_SIZE_Y)])

# voxels: [V, P, C']
# V is the maximum number of voxels per frame
# P is the maximum number of points per voxel
# C' is the number of channels(features) per point in voxels.
input_new = gs.Variable(name="voxels", dtype=np.float32, shape=(MAX_VOXELS, 32, 10))
input_new = gs.Variable(name="voxels", dtype=np.float32, shape=(MAX_VOXELS, MAX_POINTS_PER_VOXEL, 10))

# voxel_idxs: [V, 4]
# V is the maximum number of voxels per frame
Expand Down Expand Up @@ -113,7 +115,7 @@ def simplify_preprocess(onnx_model):
graph.inputs.append(Y)
inputs = [last_node_pillarvfe.outputs[0], X, Y]
outputs = [first_node_after_pillarscatter.inputs[0]]
graph.replace_with_clip(inputs, outputs)
graph.replace_with_clip(inputs, outputs, VOXEL_ARRAY)

# Remove the now-dangling subgraph.
graph.cleanup().toposort()
Expand Down