From 445b5975d0b168f5aaf4260e3d2553be120eb24b Mon Sep 17 00:00:00 2001 From: ezamalie Date: Thu, 7 Nov 2019 16:37:00 +0300 Subject: [PATCH 01/18] Added test for "mask_rcnn_demo" --- demos/mask_rcnn_demo/models.lst | 5 +++++ demos/tests/cases.py | 10 +++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 demos/mask_rcnn_demo/models.lst diff --git a/demos/mask_rcnn_demo/models.lst b/demos/mask_rcnn_demo/models.lst new file mode 100644 index 00000000000..1ffb9148c4f --- /dev/null +++ b/demos/mask_rcnn_demo/models.lst @@ -0,0 +1,5 @@ +# This file can be used with the --list option of the model downloader. +mask_rcnn_inception_resnet_v2_atrous_coco +mask_rcnn_inception_v2_coco +mask_rcnn_resnet101_atrous_coco +mask_rcnn_resnet50_atrous_coco diff --git a/demos/tests/cases.py b/demos/tests/cases.py index bc93b671201..0ff14442130 100644 --- a/demos/tests/cases.py +++ b/demos/tests/cases.py @@ -139,7 +139,15 @@ def single_option_cases(key, *args): ], )), - # TODO: mask_rcnn_demo: no models.lst + NativeDemo(subdirectory='mask_rcnn_demo', test_cases=combine_cases( + TestCase(options={'-i': ImageDirectoryArg('semantic-segmentation-adas')}), + device_cases('-d'), + single_option_cases('-m', + ModelArg('mask_rcnn_inception_resnet_v2_atrous_coco'), + ModelArg('mask_rcnn_inception_v2_coco'), + ModelArg('mask_rcnn_resnet101_atrous_coco'), + ModelArg('mask_rcnn_resnet50_atrous_coco')) + )), NativeDemo(subdirectory='multi_channel/face_detection_demo', device_keys=['-d'], From 5cc3b2785994e53141013cf94161ba9032da0a8f Mon Sep 17 00:00:00 2001 From: ezamalie Date: Fri, 8 Nov 2019 15:43:51 +0300 Subject: [PATCH 02/18] WIP: Added test for image_retrieval_demo --- demos/tests/args.py | 5 +++++ demos/tests/cases.py | 9 ++++++++- demos/tests/image_sequences.py | 13 +++++++++++++ 3 files changed, 26 insertions(+), 1 deletion(-) diff --git a/demos/tests/args.py b/demos/tests/args.py index d0dc5da6bbf..388a8361b4f 100644 --- a/demos/tests/args.py +++ b/demos/tests/args.py @@ -30,6 +30,11 @@ def resolve(self, context): def image_net_arg(id): return TestDataArg('ILSVRC2012_img_val/ILSVRC2012_val_{}.JPEG'.format(id)) + +def image_retrieval_arg(id): + return TestDataArg('test-master/{}'.format(id)) + + class ModelArg: def __init__(self, name, precision='FP32'): self.name = name diff --git a/demos/tests/cases.py b/demos/tests/cases.py index 0ff14442130..e416c802901 100644 --- a/demos/tests/cases.py +++ b/demos/tests/cases.py @@ -296,7 +296,14 @@ def single_option_cases(key, *args): )), # TODO: face_recognition_demo: requires face gallery - # TODO: image_retrieval_demo: current images does not suit the usecase, requires user defined gallery + + PythonDemo(subdirectory='image_retrieval_demo', test_cases=combine_cases( + TestCase(options={'--no_show':None, + '-m': ModelArg('image-retrieval-0001')}), + device_cases('-d'), + single_option_cases('-i', *IMAGE_SEQUENCES['image-retrieval-video']), + single_option_cases('-g', *IMAGE_SEQUENCES['image-retrieval-gallery']), + )), PythonDemo(subdirectory='instance_segmentation_demo', device_keys=[], test_cases=combine_cases( TestCase(options={'--no_show': None, diff --git a/demos/tests/image_sequences.py b/demos/tests/image_sequences.py index 51c1a97e1d8..4205285a36c 100644 --- a/demos/tests/image_sequences.py +++ b/demos/tests/image_sequences.py @@ -213,4 +213,17 @@ image_net_arg('00037128'), image_net_arg('00048316'), ], + + 'image-retrieval-gallery': [ + image_retrieval_arg('gallery.txt'), + ], + + 'image-retrieval-video': [ + image_retrieval_arg('4946fb41-9da0-4af7-a858-b443bee6d0f6.dav'), + image_retrieval_arg('7f8bd393-0f04-43fe-8cb8-4e0cde7305e6.dav'), + image_retrieval_arg('d0c460d0-4d75-4315-98a8-a0116d3dfb81.dav'), + image_retrieval_arg('636e91cc-4829-40bd-a8bc-18505b943a9b.dav'), + image_retrieval_arg('add09613-b45c-432f-94c0-f785d3ae2a7e.dav'), + + ] } From 8f793a4eda519496dd75d11efe4fda4ae56456fd Mon Sep 17 00:00:00 2001 From: ezamalie Date: Fri, 8 Nov 2019 16:04:48 +0300 Subject: [PATCH 03/18] Added test for 3d_segmentation_demo --- demos/tests/args.py | 4 ++++ demos/tests/cases.py | 7 ++++++- demos/tests/image_sequences.py | 8 +++++++- 3 files changed, 17 insertions(+), 2 deletions(-) diff --git a/demos/tests/args.py b/demos/tests/args.py index 388a8361b4f..b7dd0e80fdd 100644 --- a/demos/tests/args.py +++ b/demos/tests/args.py @@ -31,6 +31,10 @@ def image_net_arg(id): return TestDataArg('ILSVRC2012_img_val/ILSVRC2012_val_{}.JPEG'.format(id)) +def brats_arg(id): + return TestDataArg('HGG_LGG/{}'.format(id)) + + def image_retrieval_arg(id): return TestDataArg('test-master/{}'.format(id)) diff --git a/demos/tests/cases.py b/demos/tests/cases.py index e416c802901..48551022bd5 100644 --- a/demos/tests/cases.py +++ b/demos/tests/cases.py @@ -279,7 +279,12 @@ def single_option_cases(key, *args): ] PYTHON_DEMOS = [ - # TODO: 3d_segmentation_demo: no input data + PythonDemo(subdirectory='3d_segmentation_demo', test_cases=combine_cases( + TestCase(options={'-m': ModelArg('brain-tumor-segmentation-0001'), + '-o': '.'}), + device_cases('-d'), + single_option_cases('-i', *IMAGE_SEQUENCES['brain-tumor-nifti']), + )), PythonDemo(subdirectory='action_recognition', device_keys=['-d'], test_cases=combine_cases( TestCase(options={'--no_show': None, '-i': ImagePatternArg('action-recognition')}), diff --git a/demos/tests/image_sequences.py b/demos/tests/image_sequences.py index 4205285a36c..61c630e0367 100644 --- a/demos/tests/image_sequences.py +++ b/demos/tests/image_sequences.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from args import image_net_arg +from args import image_net_arg, brats_arg, image_retrieval_arg IMAGE_SEQUENCES = { 'action-recognition': [ @@ -214,6 +214,12 @@ image_net_arg('00048316'), ], + 'brain-tumor-nifti': [ + brats_arg('BRATS_485.nii.gz'), + brats_arg('BRATS_501.nii.gz'), + brats_arg('BRATS_750.nii,gz'), + ], + 'image-retrieval-gallery': [ image_retrieval_arg('gallery.txt'), ], From 23d2967f22871d78f4922e73f55d83b0a2c92caf Mon Sep 17 00:00:00 2001 From: ezamalie Date: Wed, 20 Nov 2019 11:37:47 +0300 Subject: [PATCH 04/18] Fix indent --- demos/tests/cases.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/demos/tests/cases.py b/demos/tests/cases.py index 48551022bd5..e9208cb8171 100644 --- a/demos/tests/cases.py +++ b/demos/tests/cases.py @@ -143,10 +143,10 @@ def single_option_cases(key, *args): TestCase(options={'-i': ImageDirectoryArg('semantic-segmentation-adas')}), device_cases('-d'), single_option_cases('-m', - ModelArg('mask_rcnn_inception_resnet_v2_atrous_coco'), - ModelArg('mask_rcnn_inception_v2_coco'), - ModelArg('mask_rcnn_resnet101_atrous_coco'), - ModelArg('mask_rcnn_resnet50_atrous_coco')) + ModelArg('mask_rcnn_inception_resnet_v2_atrous_coco'), + ModelArg('mask_rcnn_inception_v2_coco'), + ModelArg('mask_rcnn_resnet101_atrous_coco'), + ModelArg('mask_rcnn_resnet50_atrous_coco')) )), NativeDemo(subdirectory='multi_channel/face_detection_demo', From a6fed499301abccdd5caaaf2b536da4943c4e268 Mon Sep 17 00:00:00 2001 From: ezamalie Date: Wed, 20 Nov 2019 11:41:05 +0300 Subject: [PATCH 05/18] Fixed readme for mask_rcnn_demo --- demos/mask_rcnn_demo/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demos/mask_rcnn_demo/README.md b/demos/mask_rcnn_demo/README.md index 7da26a82949..ca448554ccf 100644 --- a/demos/mask_rcnn_demo/README.md +++ b/demos/mask_rcnn_demo/README.md @@ -41,7 +41,7 @@ To run the demo, you can use public or pre-trained models. To download the pre-t You can use the following command to do inference on CPU on an image using a trained network: ```sh -./mask_rcnn_demo -i /inputImage.bmp -m /faster_rcnn.xml +./mask_rcnn_demo -i /inputImage.bmp -m /mask_rcnn_inception_resnet_v2_atrous_coco.xml ``` ## Demo Output From 34aeb3aa18d823dd84819b61bc178b4bce97987d Mon Sep 17 00:00:00 2001 From: ezamalie Date: Fri, 22 Nov 2019 19:04:24 +0300 Subject: [PATCH 06/18] Add test for face_recognition_demo --- demos/tests/cases.py | 18 ++++++++++++++++-- demos/tests/image_sequences.py | 6 ++++++ 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/demos/tests/cases.py b/demos/tests/cases.py index e9208cb8171..b4a36149ef8 100644 --- a/demos/tests/cases.py +++ b/demos/tests/cases.py @@ -300,8 +300,22 @@ def single_option_cases(key, *args): ], )), - # TODO: face_recognition_demo: requires face gallery - + PythonDemo(subdirectory='face_recognition_demo', test_cases=combine_cases( + TestCase(options={'--no_show': None, + '-i': ImagePatternArg('face-detection-adas'), + '-fg': ImageDirectoryArg('face-recognition-gallery') + }), + device_cases('-d_fd', '-d_lm', '-d_reid'), + single_option_cases('-m_fd', + ModelArg('face-detection-adas-0001'), + ModelArg('face-detection-adas-binary-0001', "INT1"), + ModelArg('face-detection-retail-0004'), + ModelArg('face-detection-retail-0005'), + ModelArg('face-detection-retail-0044')), + TestCase(options={'-m_lm': ModelArg('landmarks-regression-retail-0009')}), + TestCase(options={'-m_reid': ModelArg('face-reidentification-retail-0095')}), + )), + PythonDemo(subdirectory='image_retrieval_demo', test_cases=combine_cases( TestCase(options={'--no_show':None, '-m': ModelArg('image-retrieval-0001')}), diff --git a/demos/tests/image_sequences.py b/demos/tests/image_sequences.py index 61c630e0367..8e266c9f87b 100644 --- a/demos/tests/image_sequences.py +++ b/demos/tests/image_sequences.py @@ -51,6 +51,12 @@ image_net_arg('00045630'), ], + 'face-recognition-gallery': [ + image_net_arg('00000184'), + image_net_arg('00008165'), + image_net_arg('00040548'), + ], + 'gaze-estimation-adas': [ image_net_arg('00008165'), image_net_arg('00008170'), From f65712bafc5037385027de72c7e29a4bcf58aff7 Mon Sep 17 00:00:00 2001 From: ezamalie Date: Fri, 22 Nov 2019 19:25:16 +0300 Subject: [PATCH 07/18] "device_cases" -> "device_keys" --- demos/tests/cases.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/demos/tests/cases.py b/demos/tests/cases.py index b4a36149ef8..7d1884b0435 100644 --- a/demos/tests/cases.py +++ b/demos/tests/cases.py @@ -139,9 +139,8 @@ def single_option_cases(key, *args): ], )), - NativeDemo(subdirectory='mask_rcnn_demo', test_cases=combine_cases( + NativeDemo(subdirectory='mask_rcnn_demo', device_keys=['-d'], test_cases=combine_cases( TestCase(options={'-i': ImageDirectoryArg('semantic-segmentation-adas')}), - device_cases('-d'), single_option_cases('-m', ModelArg('mask_rcnn_inception_resnet_v2_atrous_coco'), ModelArg('mask_rcnn_inception_v2_coco'), @@ -279,10 +278,9 @@ def single_option_cases(key, *args): ] PYTHON_DEMOS = [ - PythonDemo(subdirectory='3d_segmentation_demo', test_cases=combine_cases( + PythonDemo(subdirectory='3d_segmentation_demo', device_keys=['-d'], test_cases=combine_cases( TestCase(options={'-m': ModelArg('brain-tumor-segmentation-0001'), '-o': '.'}), - device_cases('-d'), single_option_cases('-i', *IMAGE_SEQUENCES['brain-tumor-nifti']), )), @@ -300,12 +298,12 @@ def single_option_cases(key, *args): ], )), - PythonDemo(subdirectory='face_recognition_demo', test_cases=combine_cases( + PythonDemo(subdirectory='face_recognition_demo', device_keys=['-d_fd', '-d_lm', '-d_reid'], + test_cases=combine_cases( TestCase(options={'--no_show': None, '-i': ImagePatternArg('face-detection-adas'), '-fg': ImageDirectoryArg('face-recognition-gallery') }), - device_cases('-d_fd', '-d_lm', '-d_reid'), single_option_cases('-m_fd', ModelArg('face-detection-adas-0001'), ModelArg('face-detection-adas-binary-0001', "INT1"), @@ -316,10 +314,9 @@ def single_option_cases(key, *args): TestCase(options={'-m_reid': ModelArg('face-reidentification-retail-0095')}), )), - PythonDemo(subdirectory='image_retrieval_demo', test_cases=combine_cases( + PythonDemo(subdirectory='image_retrieval_demo', device_keys=['-d'], test_cases=combine_cases( TestCase(options={'--no_show':None, '-m': ModelArg('image-retrieval-0001')}), - device_cases('-d'), single_option_cases('-i', *IMAGE_SEQUENCES['image-retrieval-video']), single_option_cases('-g', *IMAGE_SEQUENCES['image-retrieval-gallery']), )), From c9cb026f5201f817da9736eb0e5bef87d414a91f Mon Sep 17 00:00:00 2001 From: ezamalie Date: Mon, 25 Nov 2019 12:08:40 +0300 Subject: [PATCH 08/18] Demos requirements updated --- ci/requirements-demos.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ci/requirements-demos.txt b/ci/requirements-demos.txt index 8682c8aae05..45cfbfec66a 100644 --- a/ci/requirements-demos.txt +++ b/ci/requirements-demos.txt @@ -1,4 +1,7 @@ +joblib==0.14.0 nibabel==2.5.1 numpy==1.17.2 ; python_version >= "3.4" +scikit-learn==0.21.3 scipy==1.3.1 six==1.12.0 # via nibabel +tqdm==4.39.0 From e8dd9b2d03b58735e502498dca225de386e2dd50 Mon Sep 17 00:00:00 2001 From: ezamalie Date: Tue, 26 Nov 2019 11:29:30 +0300 Subject: [PATCH 09/18] Added data source for image_retrieval_demo --- demos/tests/args.py | 2 +- demos/tests/image_sequences.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/demos/tests/args.py b/demos/tests/args.py index b7dd0e80fdd..8a80808511c 100644 --- a/demos/tests/args.py +++ b/demos/tests/args.py @@ -36,7 +36,7 @@ def brats_arg(id): def image_retrieval_arg(id): - return TestDataArg('test-master/{}'.format(id)) + return TestDataArg('Image_Retrieval/{}'.format(id)) class ModelArg: diff --git a/demos/tests/image_sequences.py b/demos/tests/image_sequences.py index 8e266c9f87b..398a0aafd13 100644 --- a/demos/tests/image_sequences.py +++ b/demos/tests/image_sequences.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from args import image_net_arg, brats_arg, image_retrieval_arg +from args import image_net_arg, brats_arg, image_retrieval_arg, TestDataArg IMAGE_SEQUENCES = { 'action-recognition': [ @@ -227,7 +227,7 @@ ], 'image-retrieval-gallery': [ - image_retrieval_arg('gallery.txt'), + TestDataArg('gallery.txt'), ], 'image-retrieval-video': [ From b6b0c17d0bbcd745ca4f369d283527614b176b97 Mon Sep 17 00:00:00 2001 From: ezamalie Date: Tue, 26 Nov 2019 12:04:12 +0300 Subject: [PATCH 10/18] Added data source for 3d_segmentation_demo --- demos/tests/args.py | 2 +- demos/tests/image_sequences.py | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/demos/tests/args.py b/demos/tests/args.py index 8a80808511c..1b1d50d91d2 100644 --- a/demos/tests/args.py +++ b/demos/tests/args.py @@ -32,7 +32,7 @@ def image_net_arg(id): def brats_arg(id): - return TestDataArg('HGG_LGG/{}'.format(id)) + return TestDataArg('BraTS/{}'.format(id)) def image_retrieval_arg(id): diff --git a/demos/tests/image_sequences.py b/demos/tests/image_sequences.py index 398a0aafd13..135d8ab8157 100644 --- a/demos/tests/image_sequences.py +++ b/demos/tests/image_sequences.py @@ -222,8 +222,10 @@ 'brain-tumor-nifti': [ brats_arg('BRATS_485.nii.gz'), - brats_arg('BRATS_501.nii.gz'), - brats_arg('BRATS_750.nii,gz'), + brats_arg('BRATS_486.nii.gz'), + brats_arg('BRATS_487.nii.gz'), + brats_arg('BRATS_488.nii.gz'), + brats_arg('BRATS_489.nii.gz'), ], 'image-retrieval-gallery': [ From 1553b8329bf3146f3ceb9e68ad03ffcc528b35b5 Mon Sep 17 00:00:00 2001 From: ezamalie Date: Tue, 26 Nov 2019 12:25:01 +0300 Subject: [PATCH 11/18] Fixed data source for image_retrieval_demo --- demos/tests/image_sequences.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/demos/tests/image_sequences.py b/demos/tests/image_sequences.py index 135d8ab8157..d57262eeff3 100644 --- a/demos/tests/image_sequences.py +++ b/demos/tests/image_sequences.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from args import image_net_arg, brats_arg, image_retrieval_arg, TestDataArg +from args import image_net_arg, brats_arg, image_retrieval_arg IMAGE_SEQUENCES = { 'action-recognition': [ @@ -229,7 +229,7 @@ ], 'image-retrieval-gallery': [ - TestDataArg('gallery.txt'), + image_retrieval_arg('gallery.txt'), ], 'image-retrieval-video': [ From 3995e4e4a6f9d0bf9aedb003a0b7f45a71bcdcfd Mon Sep 17 00:00:00 2001 From: eizamaliev Date: Sat, 30 Nov 2019 12:24:26 +0300 Subject: [PATCH 12/18] FIX --- demos/tests/image_sequences.py | 1 - demos/tests/run_tests.py | 4 +++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/demos/tests/image_sequences.py b/demos/tests/image_sequences.py index d57262eeff3..ecf7bb24432 100644 --- a/demos/tests/image_sequences.py +++ b/demos/tests/image_sequences.py @@ -238,6 +238,5 @@ image_retrieval_arg('d0c460d0-4d75-4315-98a8-a0116d3dfb81.dav'), image_retrieval_arg('636e91cc-4829-40bd-a8bc-18505b943a9b.dav'), image_retrieval_arg('add09613-b45c-432f-94c0-f785d3ae2a7e.dav'), - ] } diff --git a/demos/tests/run_tests.py b/demos/tests/run_tests.py index d248559d390..19a1efd09a9 100644 --- a/demos/tests/run_tests.py +++ b/demos/tests/run_tests.py @@ -18,7 +18,9 @@ Test script for the demos. For the tests to work, the test data directory must contain a "ILSVRC2012_img_val" -subdirectory with the ILSVRC2012 dataset. +subdirectory with the ILSVRC2012 dataset, a "BraTS" subdirectory with BraTS 2017 +dataset in NIFTI format and "Image_Retrieval" subdirectory with image retrieval +dataset (images, videos). """ import argparse From ecd0c627e508c57d9fb6882605545a665125e0f7 Mon Sep 17 00:00:00 2001 From: ezamalie Date: Mon, 2 Dec 2019 15:29:10 +0300 Subject: [PATCH 13/18] Update --- demos/python_demos/requirements.txt | 10 +++- demos/tests/args.py | 25 ++++---- demos/tests/cases.py | 60 +++++++++---------- .../{image_sequences.py => data_sequences.py} | 2 +- demos/tests/run_tests.py | 17 +++--- 5 files changed, 62 insertions(+), 52 deletions(-) rename demos/tests/{image_sequences.py => data_sequences.py} (99%) diff --git a/demos/python_demos/requirements.txt b/demos/python_demos/requirements.txt index 1a80eafc6bb..45cfbfec66a 100644 --- a/demos/python_demos/requirements.txt +++ b/demos/python_demos/requirements.txt @@ -1,3 +1,7 @@ -opencv-python -numpy -scipy +joblib==0.14.0 +nibabel==2.5.1 +numpy==1.17.2 ; python_version >= "3.4" +scikit-learn==0.21.3 +scipy==1.3.1 +six==1.12.0 # via nibabel +tqdm==4.39.0 diff --git a/demos/tests/args.py b/demos/tests/args.py index 1b1d50d91d2..9512697c048 100644 --- a/demos/tests/args.py +++ b/demos/tests/args.py @@ -18,7 +18,8 @@ from pathlib import Path ArgContext = collections.namedtuple('ArgContext', - ['source_dir', 'test_data_dir', 'dl_dir', 'model_info', 'image_sequences', 'image_sequence_dir']) + ['source_dir', 'test_data_dir', 'dl_dir', 'model_info', 'data_sequences', 'data_sequence_dir']) + class TestDataArg: def __init__(self, rel_path): @@ -27,6 +28,7 @@ def __init__(self, rel_path): def resolve(self, context): return str(context.test_data_dir / self.rel_path) + def image_net_arg(id): return TestDataArg('ILSVRC2012_img_val/ILSVRC2012_val_{}.JPEG'.format(id)) @@ -47,16 +49,17 @@ def __init__(self, name, precision='FP32'): def resolve(self, context): return str(context.dl_dir / context.model_info[self.name]["subdirectory"] / self.precision / (self.name + '.xml')) -class ImagePatternArg: + +class DataPatternArg: def __init__(self, sequence_name): self.sequence_name = sequence_name def resolve(self, context): - seq_dir = context.image_sequence_dir / self.sequence_name - seq = [Path(image.resolve(context)) - for image in context.image_sequences[self.sequence_name]] + seq_dir = context.data_sequence_dir / self.sequence_name + seq = [Path(data.resolve(context)) + for data in context.data_sequences[self.sequence_name]] - assert len(set(image.suffix for image in seq)) == 1, "all images in the sequence must have the same extension" + assert len(set(data.suffix for data in seq)) == 1, "all images in the sequence must have the same extension" assert '%' not in seq[0].suffix name_format = 'input-%04d' + seq[0].suffix @@ -64,19 +67,21 @@ def resolve(self, context): if not seq_dir.is_dir(): seq_dir.mkdir(parents=True) - for index, image in enumerate(context.image_sequences[self.sequence_name]): - shutil.copyfile(image.resolve(context), str(seq_dir / (name_format % index))) + for index, data in enumerate(context.data_sequences[self.sequence_name]): + shutil.copyfile(data.resolve(context), str(seq_dir / (name_format % index))) return str(seq_dir / name_format) -class ImageDirectoryArg: + +class DataDirectoryArg: def __init__(self, sequence_name): - self.backend = ImagePatternArg(sequence_name) + self.backend = DataPatternArg(sequence_name) def resolve(self, context): pattern = self.backend.resolve(context) return str(Path(pattern).parent) + class DemoFileArg: def __init__(self, file_name): self.file_name = file_name diff --git a/demos/tests/cases.py b/demos/tests/cases.py index 7d1884b0435..79faff4afa2 100644 --- a/demos/tests/cases.py +++ b/demos/tests/cases.py @@ -16,7 +16,7 @@ import sys from args import * -from image_sequences import IMAGE_SEQUENCES +from data_sequences import DATA_SEQUENCES MONITORS = {'-u': 'cdm'} TestCase = collections.namedtuple('TestCase', ['options']) @@ -90,7 +90,7 @@ def single_option_cases(key, *args): test_cases=combine_cases( TestCase(options={'-no_show': None, **MONITORS, - '-i': ImagePatternArg('person-vehicle-bike-detection-crossroad')}), + '-i': DataPatternArg('person-vehicle-bike-detection-crossroad')}), TestCase(options={'-m': ModelArg('person-vehicle-bike-detection-crossroad-0078')}), single_option_cases('-m_pa', None, ModelArg('person-attributes-recognition-crossroad-0230')), single_option_cases('-m_reid', None, ModelArg('person-reidentification-retail-0079')), @@ -101,7 +101,7 @@ def single_option_cases(key, *args): test_cases=combine_cases( TestCase(options={'-no_show': None, **MONITORS, - '-i': ImagePatternArg('gaze-estimation-adas')}), + '-i': DataPatternArg('gaze-estimation-adas')}), TestCase(options={ '-m': ModelArg('gaze-estimation-adas-0002'), '-m_fd': ModelArg('face-detection-adas-0001'), @@ -113,7 +113,7 @@ def single_option_cases(key, *args): NativeDemo(subdirectory='human_pose_estimation_demo', device_keys=['-d'], test_cases=combine_cases( TestCase(options={'-no_show': None, **MONITORS, - '-i': ImagePatternArg('human-pose-estimation')}), + '-i': DataPatternArg('human-pose-estimation')}), TestCase(options={'-m': ModelArg('human-pose-estimation-0001')}), )), @@ -122,7 +122,7 @@ def single_option_cases(key, *args): test_cases=combine_cases( TestCase(options={'-no_show': None, **MONITORS, - '-i': ImagePatternArg('face-detection-adas')}), + '-i': DataPatternArg('face-detection-adas')}), TestCase(options={'-m': ModelArg('face-detection-adas-0001')}), [ TestCase(options={}), @@ -140,7 +140,7 @@ def single_option_cases(key, *args): )), NativeDemo(subdirectory='mask_rcnn_demo', device_keys=['-d'], test_cases=combine_cases( - TestCase(options={'-i': ImageDirectoryArg('semantic-segmentation-adas')}), + TestCase(options={'-i': DataDirectoryArg('semantic-segmentation-adas')}), single_option_cases('-m', ModelArg('mask_rcnn_inception_resnet_v2_atrous_coco'), ModelArg('mask_rcnn_inception_v2_coco'), @@ -153,7 +153,7 @@ def single_option_cases(key, *args): test_cases=combine_cases( TestCase(options={'-no_show': None, **MONITORS, - '-i': IMAGE_SEQUENCES['face-detection-adas']}), + '-i': DATA_SEQUENCES['face-detection-adas']}), single_option_cases('-m', ModelArg('face-detection-adas-0001'), ModelArg('face-detection-adas-binary-0001', "INT1"), @@ -166,7 +166,7 @@ def single_option_cases(key, *args): test_cases=combine_cases( TestCase(options={'-no_show': None, **MONITORS, - '-i': IMAGE_SEQUENCES['human-pose-estimation'], + '-i': DATA_SEQUENCES['human-pose-estimation'], '-m': ModelArg('human-pose-estimation-0001')}), )), @@ -175,15 +175,15 @@ def single_option_cases(key, *args): [ TestCase(options={ '-m': ModelArg('face-detection-adas-0001'), - '-i': ImagePatternArg('face-detection-adas'), + '-i': DataPatternArg('face-detection-adas'), }), TestCase(options={ '-m': ModelArg('person-detection-retail-0002'), - '-i': ImagePatternArg('person-detection-retail'), + '-i': DataPatternArg('person-detection-retail'), }), TestCase(options={ '-m': ModelArg('person-detection-retail-0013'), - '-i': ImagePatternArg('person-detection-retail'), + '-i': DataPatternArg('person-detection-retail'), }), ], )), @@ -193,7 +193,7 @@ def single_option_cases(key, *args): NativeDemo('pedestrian_tracker_demo', device_keys=['-d_det', '-d_reid'], test_cases=combine_cases( TestCase(options={'-no_show': None, **MONITORS, - '-i': ImagePatternArg('person-detection-retail')}), + '-i': DataPatternArg('person-detection-retail')}), [ TestCase(options={'-m_det': ModelArg('person-detection-retail-0002')}), TestCase(options={'-m_det': ModelArg('person-detection-retail-0013')}), @@ -209,7 +209,7 @@ def single_option_cases(key, *args): test_cases=combine_cases( TestCase(options={'-no_show': None, **MONITORS, - '-i': ImageDirectoryArg('vehicle-license-plate-detection-barrier')}), + '-i': DataDirectoryArg('vehicle-license-plate-detection-barrier')}), TestCase(options={'-m': ModelArg('vehicle-license-plate-detection-barrier-0106')}), single_option_cases('-m_lpr', None, @@ -222,11 +222,11 @@ def single_option_cases(key, *args): [ TestCase(options={ '-m': ModelArg('road-segmentation-adas-0001'), - '-i': ImageDirectoryArg('road-segmentation-adas'), + '-i': DataDirectoryArg('road-segmentation-adas'), }), TestCase(options={ '-m': ModelArg('semantic-segmentation-adas-0001'), - '-i': ImageDirectoryArg('semantic-segmentation-adas'), + '-i': DataDirectoryArg('semantic-segmentation-adas'), }), ], )), @@ -236,7 +236,7 @@ def single_option_cases(key, *args): test_cases=combine_cases( TestCase(options={'-no_show': None, **MONITORS, - '-i': ImagePatternArg('smart-classroom-demo'), + '-i': DataPatternArg('smart-classroom-demo'), '-m_fd': ModelArg('face-detection-adas-0001')}), [ *combine_cases( @@ -262,7 +262,7 @@ def single_option_cases(key, *args): )), NativeDemo(subdirectory='super_resolution_demo', device_keys=['-d'], test_cases=combine_cases( - TestCase(options={'-i': ImageDirectoryArg('single-image-super-resolution')}), + TestCase(options={'-i': DataDirectoryArg('single-image-super-resolution')}), TestCase(options={ '-m': ModelArg('single-image-super-resolution-1033'), }), @@ -271,7 +271,7 @@ def single_option_cases(key, *args): NativeDemo(subdirectory='text_detection_demo', device_keys=['-d_td', '-d_tr'], test_cases=combine_cases( TestCase(options={'-no_show': None, '-dt': 'video', **MONITORS, - '-i': ImagePatternArg('text-detection')}), + '-i': DataPatternArg('text-detection')}), single_option_cases('-m_td', ModelArg('text-detection-0003'), ModelArg('text-detection-0004')), single_option_cases('-m_tr', None, ModelArg('text-recognition-0012')), )), @@ -281,11 +281,11 @@ def single_option_cases(key, *args): PythonDemo(subdirectory='3d_segmentation_demo', device_keys=['-d'], test_cases=combine_cases( TestCase(options={'-m': ModelArg('brain-tumor-segmentation-0001'), '-o': '.'}), - single_option_cases('-i', *IMAGE_SEQUENCES['brain-tumor-nifti']), + single_option_cases('-i', *DATA_SEQUENCES['brain-tumor-nifti']), )), PythonDemo(subdirectory='action_recognition', device_keys=['-d'], test_cases=combine_cases( - TestCase(options={'--no_show': None, '-i': ImagePatternArg('action-recognition')}), + TestCase(options={'--no_show': None, '-i': DataPatternArg('action-recognition')}), [ TestCase(options={ '-m_en': ModelArg('action-recognition-0001-encoder'), @@ -301,8 +301,8 @@ def single_option_cases(key, *args): PythonDemo(subdirectory='face_recognition_demo', device_keys=['-d_fd', '-d_lm', '-d_reid'], test_cases=combine_cases( TestCase(options={'--no_show': None, - '-i': ImagePatternArg('face-detection-adas'), - '-fg': ImageDirectoryArg('face-recognition-gallery') + '-i': DataPatternArg('face-detection-adas'), + '-fg': DataDirectoryArg('face-recognition-gallery') }), single_option_cases('-m_fd', ModelArg('face-detection-adas-0001'), @@ -317,13 +317,13 @@ def single_option_cases(key, *args): PythonDemo(subdirectory='image_retrieval_demo', device_keys=['-d'], test_cases=combine_cases( TestCase(options={'--no_show':None, '-m': ModelArg('image-retrieval-0001')}), - single_option_cases('-i', *IMAGE_SEQUENCES['image-retrieval-video']), - single_option_cases('-g', *IMAGE_SEQUENCES['image-retrieval-gallery']), + single_option_cases('-i', *DATA_SEQUENCES['image-retrieval-video']), + single_option_cases('-g', *DATA_SEQUENCES['image-retrieval-gallery']), )), PythonDemo(subdirectory='instance_segmentation_demo', device_keys=[], test_cases=combine_cases( TestCase(options={'--no_show': None, - '-i': ImagePatternArg('instance-segmentation'), + '-i': DataPatternArg('instance-segmentation'), '--delay': '1', '-d': 'CPU', # GPU is not supported '--labels': DemoFileArg('coco_labels.txt')}), @@ -335,8 +335,8 @@ def single_option_cases(key, *args): PythonDemo(subdirectory='multi_camera_multi_person_tracking', device_keys=['-d'], test_cases=combine_cases( TestCase(options={'--no_show': None, - '-i': [ImagePatternArg('multi-camera-multi-person-tracking'), - ImagePatternArg('multi-camera-multi-person-tracking/repeated')], + '-i': [DataPatternArg('multi-camera-multi-person-tracking'), + DataPatternArg('multi-camera-multi-person-tracking/repeated')], '-m': ModelArg('person-detection-retail-0013')}), single_option_cases('--m_reid', ModelArg('person-reidentification-retail-0031'), @@ -346,7 +346,7 @@ def single_option_cases(key, *args): PythonDemo(subdirectory='object_detection_demo_ssd_async', device_keys=['-d'], test_cases=combine_cases( TestCase(options={'--no_show': None, - '-i': ImagePatternArg('object-detection-demo-ssd-async')}), + '-i': DataPatternArg('object-detection-demo-ssd-async')}), single_option_cases('-m', ModelArg('face-detection-adas-0001'), ModelArg('face-detection-adas-binary-0001', "INT1"), @@ -368,11 +368,11 @@ def single_option_cases(key, *args): [ TestCase(options={ '-m': ModelArg('road-segmentation-adas-0001'), - '-i': IMAGE_SEQUENCES['road-segmentation-adas'], + '-i': DATA_SEQUENCES['road-segmentation-adas'], }), TestCase(options={ '-m': ModelArg('semantic-segmentation-adas-0001'), - '-i': IMAGE_SEQUENCES['semantic-segmentation-adas'], + '-i': DATA_SEQUENCES['semantic-segmentation-adas'], }), ], )), diff --git a/demos/tests/image_sequences.py b/demos/tests/data_sequences.py similarity index 99% rename from demos/tests/image_sequences.py rename to demos/tests/data_sequences.py index ecf7bb24432..94bd96bd9ec 100644 --- a/demos/tests/image_sequences.py +++ b/demos/tests/data_sequences.py @@ -14,7 +14,7 @@ from args import image_net_arg, brats_arg, image_retrieval_arg -IMAGE_SEQUENCES = { +DATA_SEQUENCES = { 'action-recognition': [ image_net_arg('00000001'), image_net_arg('00000002'), diff --git a/demos/tests/run_tests.py b/demos/tests/run_tests.py index 19a1efd09a9..853b6bd4a9a 100644 --- a/demos/tests/run_tests.py +++ b/demos/tests/run_tests.py @@ -17,10 +17,11 @@ """ Test script for the demos. -For the tests to work, the test data directory must contain a "ILSVRC2012_img_val" -subdirectory with the ILSVRC2012 dataset, a "BraTS" subdirectory with BraTS 2017 -dataset in NIFTI format and "Image_Retrieval" subdirectory with image retrieval -dataset (images, videos). +For the tests to work, the test data directory must contain: +* a "ILSVRC2012_img_val" subdirectory with the ILSVRC2012 dataset; +* a "BraTS" subdirectory with BraTS 2017 dataset in NIFTI format (see http://medicaldecathlon.com); +* a "Image_Retrieval" subdirectory with image retrieval dataset (images, videos) (see https://github.com/19900531/test) + and list of images (see https://github.com/opencv/openvino_training_extensions/blob/develop/tensorflow_toolkit/image_retrieval/data/gallery/gallery.txt) """ import argparse @@ -39,7 +40,7 @@ from args import ArgContext, ModelArg from cases import DEMOS -from image_sequences import IMAGE_SEQUENCES +from data_sequences import DATA_SEQUENCES def parse_args(): parser = argparse.ArgumentParser( @@ -131,8 +132,8 @@ def main(): arg_context = ArgContext( source_dir=demos_dir / demo.subdirectory, dl_dir=dl_dir, - image_sequence_dir=Path(temp_dir) / 'image_seq', - image_sequences=IMAGE_SEQUENCES, + data_sequence_dir=Path(temp_dir) / 'data_seq', + data_sequences=DATA_SEQUENCES, model_info=model_info, test_data_dir=args.test_data_dir, ) @@ -173,7 +174,7 @@ def option_to_args(key, value): print('Exit code:', e.returncode) num_failures += 1 execution_time = -1 - + if args.report_file: collect_result(demo.full_name, device, pipeline, execution_time, args.report_file) From ee38d7ea6e9d49172a2bf3c36ee7cc816c2e0613 Mon Sep 17 00:00:00 2001 From: ezamalie Date: Mon, 2 Dec 2019 16:28:08 +0300 Subject: [PATCH 14/18] FIX --- demos/python_demos/requirements.txt | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/demos/python_demos/requirements.txt b/demos/python_demos/requirements.txt index 45cfbfec66a..351f2634bec 100644 --- a/demos/python_demos/requirements.txt +++ b/demos/python_demos/requirements.txt @@ -1,7 +1,7 @@ -joblib==0.14.0 -nibabel==2.5.1 -numpy==1.17.2 ; python_version >= "3.4" -scikit-learn==0.21.3 -scipy==1.3.1 -six==1.12.0 # via nibabel -tqdm==4.39.0 +joblib +nibabel +numpy +scikit-learn +scipy +six +tqdm From c64e9572d8f6e1ceae54d9d71559dd9a7e8760d8 Mon Sep 17 00:00:00 2001 From: ezamalie Date: Wed, 4 Dec 2019 11:52:47 +0300 Subject: [PATCH 15/18] FIX --- demos/python_demos/requirements.txt | 2 -- demos/tests/run_tests.py | 3 ++- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/demos/python_demos/requirements.txt b/demos/python_demos/requirements.txt index 351f2634bec..e76286bba86 100644 --- a/demos/python_demos/requirements.txt +++ b/demos/python_demos/requirements.txt @@ -1,7 +1,5 @@ -joblib nibabel numpy scikit-learn scipy -six tqdm diff --git a/demos/tests/run_tests.py b/demos/tests/run_tests.py index 853b6bd4a9a..0207134c864 100644 --- a/demos/tests/run_tests.py +++ b/demos/tests/run_tests.py @@ -18,8 +18,9 @@ Test script for the demos. For the tests to work, the test data directory must contain: +* a "BraTS" subdirectory with BraTS 2017 dataset in NIFTI format (see http://medicaldecathlon.com, + https://drive.google.com/open?id=1A2IU8Sgea1h3fYLpYtFb2v7NYdMjvEhU); * a "ILSVRC2012_img_val" subdirectory with the ILSVRC2012 dataset; -* a "BraTS" subdirectory with BraTS 2017 dataset in NIFTI format (see http://medicaldecathlon.com); * a "Image_Retrieval" subdirectory with image retrieval dataset (images, videos) (see https://github.com/19900531/test) and list of images (see https://github.com/opencv/openvino_training_extensions/blob/develop/tensorflow_toolkit/image_retrieval/data/gallery/gallery.txt) """ From af8b184bda5851b0739aff0b12feb1ae1d9373e7 Mon Sep 17 00:00:00 2001 From: ezamalie Date: Mon, 16 Dec 2019 11:39:58 +0300 Subject: [PATCH 16/18] FIX: Testing data reduced to one for "one-per-run" demos --- demos/tests/data_sequences.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/demos/tests/data_sequences.py b/demos/tests/data_sequences.py index 94bd96bd9ec..b9871932fa6 100644 --- a/demos/tests/data_sequences.py +++ b/demos/tests/data_sequences.py @@ -222,10 +222,6 @@ 'brain-tumor-nifti': [ brats_arg('BRATS_485.nii.gz'), - brats_arg('BRATS_486.nii.gz'), - brats_arg('BRATS_487.nii.gz'), - brats_arg('BRATS_488.nii.gz'), - brats_arg('BRATS_489.nii.gz'), ], 'image-retrieval-gallery': [ @@ -234,9 +230,5 @@ 'image-retrieval-video': [ image_retrieval_arg('4946fb41-9da0-4af7-a858-b443bee6d0f6.dav'), - image_retrieval_arg('7f8bd393-0f04-43fe-8cb8-4e0cde7305e6.dav'), - image_retrieval_arg('d0c460d0-4d75-4315-98a8-a0116d3dfb81.dav'), - image_retrieval_arg('636e91cc-4829-40bd-a8bc-18505b943a9b.dav'), - image_retrieval_arg('add09613-b45c-432f-94c0-f785d3ae2a7e.dav'), ] } From 7ef73a13ebb9cc19792ecdd29af1af9211cad047 Mon Sep 17 00:00:00 2001 From: ezamalie Date: Mon, 16 Dec 2019 11:40:41 +0300 Subject: [PATCH 17/18] FIX: gallery.txt no data sequence anymore --- demos/tests/cases.py | 2 +- demos/tests/data_sequences.py | 4 ---- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/demos/tests/cases.py b/demos/tests/cases.py index 79faff4afa2..71638601b10 100644 --- a/demos/tests/cases.py +++ b/demos/tests/cases.py @@ -318,7 +318,7 @@ def single_option_cases(key, *args): TestCase(options={'--no_show':None, '-m': ModelArg('image-retrieval-0001')}), single_option_cases('-i', *DATA_SEQUENCES['image-retrieval-video']), - single_option_cases('-g', *DATA_SEQUENCES['image-retrieval-gallery']), + single_option_cases('-g', image_retrieval_arg('gallery.txt')), )), PythonDemo(subdirectory='instance_segmentation_demo', device_keys=[], test_cases=combine_cases( diff --git a/demos/tests/data_sequences.py b/demos/tests/data_sequences.py index b9871932fa6..b38fe2334a4 100644 --- a/demos/tests/data_sequences.py +++ b/demos/tests/data_sequences.py @@ -224,10 +224,6 @@ brats_arg('BRATS_485.nii.gz'), ], - 'image-retrieval-gallery': [ - image_retrieval_arg('gallery.txt'), - ], - 'image-retrieval-video': [ image_retrieval_arg('4946fb41-9da0-4af7-a858-b443bee6d0f6.dav'), ] From 42f6bb1f1b57bde9049a6db5cd4a487e07e3ec51 Mon Sep 17 00:00:00 2001 From: ezamalie Date: Tue, 17 Dec 2019 11:16:41 +0300 Subject: [PATCH 18/18] Remarks are fixed --- demos/tests/data_sequences.py | 16 ++++++++-------- demos/tests/run_tests.py | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/demos/tests/data_sequences.py b/demos/tests/data_sequences.py index b38fe2334a4..2692f545650 100644 --- a/demos/tests/data_sequences.py +++ b/demos/tests/data_sequences.py @@ -38,6 +38,10 @@ image_net_arg('00000020'), ], + 'brain-tumor-nifti': [ + brats_arg('BRATS_485.nii.gz'), + ], + 'face-detection-adas': [ image_net_arg('00000002'), image_net_arg('00000032'), @@ -83,6 +87,10 @@ image_net_arg('00048311'), ], + 'image-retrieval-video': [ + image_retrieval_arg('4946fb41-9da0-4af7-a858-b443bee6d0f6.dav'), + ], + 'instance-segmentation': [ image_net_arg('00000001'), image_net_arg('00000002'), @@ -219,12 +227,4 @@ image_net_arg('00037128'), image_net_arg('00048316'), ], - - 'brain-tumor-nifti': [ - brats_arg('BRATS_485.nii.gz'), - ], - - 'image-retrieval-video': [ - image_retrieval_arg('4946fb41-9da0-4af7-a858-b443bee6d0f6.dav'), - ] } diff --git a/demos/tests/run_tests.py b/demos/tests/run_tests.py index 0207134c864..61782bf6e22 100644 --- a/demos/tests/run_tests.py +++ b/demos/tests/run_tests.py @@ -18,7 +18,7 @@ Test script for the demos. For the tests to work, the test data directory must contain: -* a "BraTS" subdirectory with BraTS 2017 dataset in NIFTI format (see http://medicaldecathlon.com, +* a "BraTS" subdirectory with brain tumor dataset in NIFTI format (see http://medicaldecathlon.com, https://drive.google.com/open?id=1A2IU8Sgea1h3fYLpYtFb2v7NYdMjvEhU); * a "ILSVRC2012_img_val" subdirectory with the ILSVRC2012 dataset; * a "Image_Retrieval" subdirectory with image retrieval dataset (images, videos) (see https://github.com/19900531/test)