diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..43d5c682 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,14 @@ +* +!data/kitti_split1/devkit +!data/kitti_split1/setup_split.py +!data/kitti_split1/test.txt +!data/kitti_split1/train.txt +!data/kitti_split1/trainval.txt +!data/kitti_split1/val.txt +!data/kitti_split2/devkit +!data/kitti_split2/kitti_ids_new.mat +!data/kitti_split2/setup_split.py +!docker/requirements.txt +!lib +!models +!scripts diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..bdc5dc2a --- /dev/null +++ b/.gitignore @@ -0,0 +1,224 @@ +/data/kitti +/data/kitti_split1/training +/data/kitti_split1/validation +/data/kitti_split2/training +/data/kitti_split2/validation +/output +/weights + +# Created by https://www.toptal.com/developers/gitignore/api/python,virtualenv,jupyternotebooks,visualstudiocode,macos,linux +# Edit at https://www.toptal.com/developers/gitignore?templates=python,virtualenv,jupyternotebooks,visualstudiocode,macos,linux + +### JupyterNotebooks ### +# gitignore template for Jupyter Notebooks +# website: http://jupyter.org/ + +.ipynb_checkpoints +*/.ipynb_checkpoints/* + +# IPython +profile_default/ +ipython_config.py + +# Remove previous ipynb_checkpoints +# git rm -r .ipynb_checkpoints/ + +### Linux ### +*~ + +# temporary files which can be created if a process still has a handle open of a deleted file +.fuse_hidden* + +# KDE directory preferences +.directory + +# Linux trash folder which might appear on any partition or disk +.Trash-* + +# .nfs files are created when an open file is removed but is still being accessed +.nfs* + +### macOS ### +# General +.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +### Python ### +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook + +# IPython + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +### VirtualEnv ### +# Virtualenv +# http://iamzed.com/2009/05/07/a-primer-on-virtualenv/ +[Bb]in +[Ii]nclude +[Ll]ib +[Ll]ib64 +[Ll]ocal +[Ss]cripts +pyvenv.cfg +pip-selfcheck.json + +### VisualStudioCode ### +.vscode/* +#!.vscode/settings.json +#!.vscode/tasks.json +#!.vscode/launch.json +#!.vscode/extensions.json +*.code-workspace + +### VisualStudioCode Patch ### +# Ignore all local history of files +.history + +# End of https://www.toptal.com/developers/gitignore/api/python,virtualenv,jupyternotebooks,visualstudiocode,macos,linux \ No newline at end of file diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 00000000..83891c80 --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,61 @@ +FROM nvidia/cuda:9.2-cudnn7-devel-ubuntu16.04 + +RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections + +# Install some basic utilities +RUN apt-get update && apt-get install -y \ + build-essential \ + bzip2 \ + ca-certificates \ + git \ + curl \ + libboost-dev \ + libgl1-mesa-dev \ + libsm6 \ + libx11-6 \ + libxext-dev \ + libxrender1 \ + sudo \ + && rm -rf /var/lib/apt/lists/* + +# Create a working directory +RUN mkdir /work +WORKDIR /work + +# Install Miniconda and Python 3.6 +ENV CONDA_AUTO_UPDATE_CONDA=false +ENV PATH=/root/miniconda/bin:$PATH +RUN curl -sLo ~/miniconda.sh https://repo.continuum.io/archive/Anaconda3-5.2.0-Linux-x86_64.sh \ + && chmod +x ~/miniconda.sh \ + && ~/miniconda.sh -b -p ~/miniconda \ + && rm ~/miniconda.sh \ + && conda install -y python==3.6.8 \ + && conda clean -ya + +# CUDA 9.2-specific steps +RUN conda install -y -c pytorch \ + cuda92 \ + "pytorch=0.4.1=py36_cuda9.2.148_cudnn7.1.4_1" \ + "torchvision=0.2.1=py36_1" \ + && conda clean -ya + +# Install Python modules +COPY docker/requirements.txt /tmp/requirements.txt +RUN pip install -r /tmp/requirements.txt + +# Copy data/ and build KITTI devkit +RUN mkdir /work/data +COPY data/kitti_split1 /work/data/kitti_split1 +COPY data/kitti_split2 /work/data/kitti_split2 +RUN cd /work && sh data/kitti_split1/devkit/cpp/build.sh +RUN cd /work && sh data/kitti_split2/devkit/cpp/build.sh + +# Copy lib/ and build NMS +COPY lib /work/lib +RUN cd /work/lib/nms && make + +# Copy models/ +COPY models /work/models + +# Copy scripts/ +COPY scripts /work/scripts diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 00000000..5fe60ef7 --- /dev/null +++ b/docker/README.md @@ -0,0 +1,164 @@ +# Train & Test M3D-RPN with NVIDIA Docker + +[Motoki Kimura](https://github.com/motokimura) + +Tested with: + +``` +- OS: Ubuntu 18.04 LTS +- NVIDIA Driver: 450.51.05 +- Docker: 19.03.12 +- NVIDIA Docker 2: 2.4.0 +``` + +May work with other versions. + +## Setup + +- **NVIDIA Driver** + + Find an appropriate version to your NVIDIA device from [here](https://www.nvidia.com/Download/index.aspx) and install it. + +- **Docker & NVIDIA Docker 2** + + Install [Docker](https://docs.docker.com/engine/install/ubuntu/) +and [NVIDIA Docker 2](https://github.com/NVIDIA/nvidia-docker/wiki/Installation-(version-2.0)). + +- **KITTI dataset** + + Download [KITTI 3D Object Detection](http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d) dataset and extract them. + + Default path is set to `M3D-RPN/data/kitti`. + +- **M3D-RPN weights** + + Download a zip file by following [the original README](https://github.com/garrickbrazil/M3D-RPN#testing) +and extract it. + + Default path is set to `M3D-RPN/weights/M3D-RPN-Release`. + +## Testing + +Build docker image: + +``` +./docker/build.sh +``` + +Run container: + +``` +./docker/run.sh +``` + +Notice: if your KITTI data or pretrained weights are not located in the default paths described above, +you have to update `KITTI_DIR` and `WEIGHT_DIR` in `run.sh`. + +Now you should be in the container, and can apply trainval spliting by: + +``` +(in container) python data/kitti_split1/setup_split.py +(in container) python data/kitti_split2/setup_split.py +``` + +Start testing: + +``` +# trainval split #1 +(in container) python scripts/test_rpn_3d.py --config=weights/M3D-RPN-Release/m3d_rpn_depth_aware_val1_config.pkl --weight=weights/M3D-RPN-Release/m3d_rpn_depth_aware_val1 + +# trainval split #2 +(in container) python scripts/test_rpn_3d.py --config=weights/M3D-RPN-Release/m3d_rpn_depth_aware_val2_config.pkl --weight=weights/M3D-RPN-Release/m3d_rpn_depth_aware_val2 +``` + +After waiting for a while, you will see: + +``` +# trainval split #1 +test_iter m3d_rpn_depth_aware_val1 2d car --> easy: 0.9024, mod: 0.8367, hard: 0.6769 +test_iter m3d_rpn_depth_aware_val1 gr car --> easy: 0.2594, mod: 0.2118, hard: 0.1790 +test_iter m3d_rpn_depth_aware_val1 3d car --> easy: 0.2027, mod: 0.1706, hard: 0.1521 +test_iter m3d_rpn_depth_aware_val1 2d pedestrian --> easy: 0.6622, mod: 0.5838, hard: 0.5018 +test_iter m3d_rpn_depth_aware_val1 gr pedestrian --> easy: 0.1305, mod: 0.1160, hard: 0.1115 +test_iter m3d_rpn_depth_aware_val1 3d pedestrian --> easy: 0.1142, mod: 0.1128, hard: 0.1034 +test_iter m3d_rpn_depth_aware_val1 2d cyclist --> easy: 0.6680, mod: 0.4901, hard: 0.4206 +test_iter m3d_rpn_depth_aware_val1 gr cyclist --> easy: 0.1196, mod: 0.1013, hard: 0.1013 +test_iter m3d_rpn_depth_aware_val1 3d cyclist --> easy: 0.1050, mod: 0.1001, hard: 0.0909 +``` + +``` +# trainval split #2 +test_iter m3d_rpn_depth_aware_val2 2d car --> easy: 0.9362, mod: 0.8473, hard: 0.6765 +test_iter m3d_rpn_depth_aware_val2 gr car --> easy: 0.2686, mod: 0.2115, hard: 0.1714 +test_iter m3d_rpn_depth_aware_val2 3d car --> easy: 0.2040, mod: 0.1648, hard: 0.1334 +test_iter m3d_rpn_depth_aware_val2 2d pedestrian --> easy: 0.7691, mod: 0.6016, hard: 0.5192 +test_iter m3d_rpn_depth_aware_val2 gr pedestrian --> easy: 0.1340, mod: 0.1144, hard: 0.1151 +test_iter m3d_rpn_depth_aware_val2 3d pedestrian --> easy: 0.1280, mod: 0.1130, hard: 0.1117 +test_iter m3d_rpn_depth_aware_val2 2d cyclist --> easy: 0.5147, mod: 0.4277, hard: 0.3491 +test_iter m3d_rpn_depth_aware_val2 gr cyclist --> easy: 0.0290, mod: 0.0909, hard: 0.0909 +test_iter m3d_rpn_depth_aware_val2 3d cyclist --> easy: 0.0222, mod: 0.0909, hard: 0.0909 +``` + +Detailed results are saved under `M3D-RPN/data/output` +(defined as `OUT_DIR` in `run.sh`). + +You can test your own model by setting paths to the config and weight files +with `--weight` and `--config` options in the same way. + +## Training + +Build docker image: + +``` +./docker/build.sh +``` + +Run container: + +``` +./docker/run.sh +``` + +Notice: if your KITTI data or pretrained weights are not located in the default paths described above, +you have to update `KITTI_DIR` and `WEIGHT_DIR` defined in `run.sh`. + +Now you should be in the container, and can apply trainval spliting by: + +``` +(in container) python data/kitti_split1/setup_split.py +(in container) python data/kitti_split2/setup_split.py +``` + +Before training, launch visdom server: + +``` +(in container) python -m visdom.server -port 8100 -readonly +``` + +Launch **a new terminal**. +Then, start a new bash session in the container: + +``` +./docker/exec.sh +``` + +Start training: + +``` +# First train the warmup (without depth-aware) +(in container) python scripts/train_rpn_3d.py --config=kitti_3d_multi_warmup + +# Then train the main experiment (with depth-aware) +(in container) python scripts/train_rpn_3d.py --config=kitti_3d_multi_main +``` + +The training status can be monitored at [http://localhost:8100](http://localhost:8100). + +The training config, trained weights, etc. are saved under `M3D-RPN/data/output` +(defined as `OUT_DIR` in `run.sh`). + +You can configure hyper parameters (e.g., trainval split) by changing +`M3D-RPN/scripts/config/kitti_3d_multi_warmup.py` and `M3D-RPN/scripts/config/kitti_3d_multi_main.py`. + +Notice: if you changed the files, you have to re-build and re-launch the container +to reflect the changes. diff --git a/docker/build.sh b/docker/build.sh new file mode 100755 index 00000000..762929df --- /dev/null +++ b/docker/build.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +nvidia-docker build -t m3d_rpn -f docker/Dockerfile . diff --git a/docker/exec.sh b/docker/exec.sh new file mode 100755 index 00000000..bb8e751c --- /dev/null +++ b/docker/exec.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +docker exec -it m3d_rpn /bin/bash diff --git a/docker/requirements.txt b/docker/requirements.txt new file mode 100644 index 00000000..68057c59 --- /dev/null +++ b/docker/requirements.txt @@ -0,0 +1,6 @@ +easydict==1.9 +opencv-python==3.1.0.5 +shapely==1.6.4 +#torch==0.4.1 # installed in Dockerfile +#torchvision==0.2.1 # installaed in Dockerfile +visdom==0.1.8.5 diff --git a/docker/run.sh b/docker/run.sh new file mode 100755 index 00000000..dd98ddce --- /dev/null +++ b/docker/run.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +THIS_DIR=$(cd $(dirname $0); pwd) +PROJ_DIR=`dirname ${THIS_DIR}` + +# NOTICE: Change `KITTI_DIR`, `WEIGHT_DIR`, and `OUT_DIR` bellow based on your settings +# NOTICE: The paths must be absolute ones! + +# Directory containing the input KITTI object3d data +KITTI_DIR=${PROJ_DIR}/data/kitti + +# Directory containing the pretrained weight and config files +WEIGHT_DIR=${PROJ_DIR}/weights + +# Directory to which training/testing results will be saved +OUT_DIR=${PROJ_DIR}/output +mkdir -p -m 777 ${OUT_DIR} + +nvidia-docker run --rm -it --ipc=host \ + -v ${KITTI_DIR}:/work/data/kitti \ + -v ${WEIGHT_DIR}:/work/weights \ + -v ${OUT_DIR}:/work/output \ + -p 8100:8100 \ + --name m3d_rpn \ + m3d_rpn /bin/bash diff --git a/scripts/test_rpn_3d.py b/scripts/test_rpn_3d.py index 715d1901..f76a8e67 100644 --- a/scripts/test_rpn_3d.py +++ b/scripts/test_rpn_3d.py @@ -18,15 +18,43 @@ # ----------------------------------------- from lib.imdb_util import * -conf_path = '/home/garrick/Desktop/M3D-RPN-Release/m3d_rpn_depth_aware_test_config.pkl' -weights_path = '/home/garrick/Desktop/M3D-RPN-Release/m3d_rpn_depth_aware_test' + +def parse_args(argv): + from getopt import getopt + opts, args = getopt(argv, '', ['config=', 'weight=', 'outdir=']) + # defaults (trainval split #1) + conf_path = 'weights/M3D-RPN-Release/m3d_rpn_depth_aware_val1_config.pkl' + weights_path = 'weights/M3D-RPN-Release/m3d_rpn_depth_aware_val1' + outdir = None + # read opts + for opt, arg in opts: + if opt in ('--config'): + conf_path = arg + if opt in ('--weight'): + weights_path = arg + if opt in ('--outdir'): + outdir = arg + + if outdir is None: + # if --outdir option is not used, give the weight file name to output directory + outdir = os.path.basename(weights_path) + + return conf_path, weights_path, outdir + + +conf_path, weights_path, outdir = parse_args(sys.argv[1:]) +print() +print('CONFIG: {}'.format(conf_path)) +print('WEIGHT: {}'.format(weights_path)) +print('OUTDIR: {}'.format(outdir)) +print() # load config conf = edict(pickle_read(conf_path)) conf.pretrained = None data_path = os.path.join(os.getcwd(), 'data') -results_path = os.path.join('output', 'tmp_results', 'data') +results_path = os.path.join('output', outdir, 'data') # make directory mkdir_if_missing(results_path, delete_if_exist=True)