diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..c9ecf2d --- /dev/null +++ b/.gitignore @@ -0,0 +1,138 @@ +# Initially taken from Github's Python gitignore file + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# vscode +.vscode + +# TF code +tensorflow_code + +# Models +models +proc_data + +# examples +runs +examples/runs + +# pyCharm +.idea/ + +# local folders +data +models +output diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..bef2b03 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,6 @@ +[submodule "transformers"] + path = transformers + url = git@github.com:huggingface/transformers.git +[submodule "coco_caption"] + path = coco_caption + url = git@github.com:LuoweiZhou/coco-caption.git diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..f9ba8cf --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,9 @@ +# Microsoft Open Source Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). + +Resources: + +- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) +- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +- Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns diff --git a/DOWNLOAD.md b/DOWNLOAD.md new file mode 100644 index 0000000..6e03798 --- /dev/null +++ b/DOWNLOAD.md @@ -0,0 +1,23 @@ +# Download +## Datasets +We provide the extracted image region features, object tags, and the original text annotations for each downstream tasks. +```bash +wget https://biglmdiag.blob.core.windows.net/oscar/datasets/$TASK_NAME.zip +unzip $TASK_NAME.zip -d $DATA_DIR +``` +`TASK_NAME` could be `coco_caption`, `coco_ir`, `vqa`, `GQA`, `nlvr2`. + +## Pre-trained Models +We provide pre-trained *Oscar* models of Bert-base and Bert-large structures, with the name starting with `base` and `large`, respectively. +```bash +wget https://biglmdiag.blob.core.windows.net/oscar/pretrained_models/$MODEL_NAME.zip +unzip $MODEL_NAME.zip -d $MODEL_DIR +``` +`MODEL_NAME` could be `base-vg-labels`, `large-vg-labels`, `base-oid-labels`, `base-no-labels`. + +The models are trained with both image region features and object tags. The image region features are extracted by the Faster R-CNN with +ResNet-101, using object and attribute annotations from [Visual Genome](http://visualgenome.org/). +The object tags are from: + 1) the same VisualGenome model, named as `-vg-labels`. Or, + 2) the model trained on object annotations from [Open Images V5](https://storage.googleapis.com/openimages/web/index.html). named as `-oid-labels`. Or, + 3) no object tags provied, serving as baseline, named as `-no-labels`. diff --git a/INSTALL.md b/INSTALL.md new file mode 100644 index 0000000..a003505 --- /dev/null +++ b/INSTALL.md @@ -0,0 +1,38 @@ +## Installation +### Requirements +- Python 3.7 +- Pytorch 1.2 +- torchvision 0.4.0 +- cuda 10.0 + +### Setup with Conda +```bash +# create a new environment +conda create --name oscar python=3.7 +conda activate oscar + +# install pytorch1.2 +conda install pytorch==1.2.0 torchvision==0.4.0 cudatoolkit=10.0 -c pytorch + +export INSTALL_DIR=$PWD + +# install apex +cd $INSTALL_DIR +git clone https://github.com/NVIDIA/apex.git +cd apex +python setup.py install --cuda_ext --cpp_ext + +# install oscar +cd $INSTALL_DIR +git clone --recursive git@github.com:xjli/Oscar.git +cd Oscar/coco_caption +./get_stanford_models.sh +cd .. +python setup.py build develop + +# install requirements +pip install -r requirements.txt + +unset INSTALL_DIR +``` + diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..9e841e7 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/MODEL_ZOO.md b/MODEL_ZOO.md new file mode 100644 index 0000000..2ab19ac --- /dev/null +++ b/MODEL_ZOO.md @@ -0,0 +1,264 @@ +## Table of Contents +- VQA +- GQA +- NLVR2 +- Image/Text Retrieval +- Image Captioning on COCO + + +## Performance +Task | t2i | t2i | i2t | i2t | IC | IC | IC | IC | NoCaps | NoCaps | VQA | NLVR2 | +--------|-----|-----|-----|-----|-----|-----|------|------|--------|--------|----------|---------| +Metric | R@1 | R@5 | R@1 | R@5 | B@4 | M | C | S | C | S | test-std | test-P | +SoTA_S |39.2 | 68.0|56.6 | 84.5|38.9 |29.2 |129.8 | 22.4 | 61.5 | 9.2 | 70.90 | 53.50 | +SoTA_B |48.4 | 76.7|63.3 | 87.0|39.5 |29.3 |129.3 | 23.2 | 73.1 | 11.2 | 72.54 | 78.87 | +SoTA_L |51.7 | 78.4|66.6 | 89.4| - | - | - | - | - | - | 73.40 | 79.50 | +----- |--- |--- |--- |--- |--- |--- |--- |--- |--- |--- |--- |--- | +Oscar_B |54.0 | 80.8|70.0 | 91.1|40.5 |29.7 |137.6 | 22.8 | 78.8 | 11.7 | 73.44 | 78.44 | +Oscar_L |57.5 | 82.8|73.5 | 92.2|41.7 |30.6 |140.0 | 24.5 | 80.9 | 11.3 | 73.82 | 80.37 | +gain | 5.8 | 4.4| 6.9 | 2.8| 2.2 | 1.3 | 10.7 | 1.3 | 7.8 | 0.5 | 0.42 | 0.87 | + +t2i: text-to-image retrieval; i2t: image-to-text retrieval; IC: image captioning on COCO. + +For reference, we also release the training logs and output. + + +## VQA +Script to finetune for Oscar base model. +Base model is trained on train split and evaluated on the val split. Good for later comparison. + +Training logs: [eval_logs.json](https://biglmdiag.blob.core.windows.net/oscar/exp/vqa/base/base_9m_ep107_1192k_eu1/application_1575931286052_40649/results/eval_logs.json), [output.txt](https://biglmdiag.blob.core.windows.net/oscar/exp/vqa/base/base_9m_ep107_1192k_eu1/application_1575931286052_40649/results/stdout.txt).
+Final server results: [results.txt](https://biglmdiag.blob.core.windows.net/oscar/exp/vqa/base/results.txt). +```bash +python oscar/run_vqa.py -j 4 --img_feature_dim 2054 --max_img_seq_length + 50 --data_label_type mask --img_feature_type faster_r-cnn --data_dir datasets/vqa/2k + --model_type bert --model_name_or_path pretrained_models/base-vg-labels/ep_107_1192087 + --task_name vqa_text --do_train --do_lower_case --max_seq_length 128 --per_gpu_eval_batch_size + 256 --per_gpu_train_batch_size 32 --learning_rate 5e-05 --num_train_epochs 25 + --output_dir results --label_file datasets/vqa/cache/trainval_ans2label.pkl + --save_epoch 1 --seed 88 --evaluate_during_training --logging_steps 4000 --drop_out + 0.3 --weight_decay 0.05 --warmup_steps 0 --loss_type bce --img_feat_format pt + --classifier linear --cls_hidden_scale 3 --txt_data_dir datasets/vqa/2k +``` + +Script to finetune for Oscar large model. +Large model is trained on train+val split and evaluated on the val split, for reproduce the paper's best result. + +Training logs: [eval_logs.json](https://biglmdiag.blob.core.windows.net/oscar/exp/vqa/large/ab128_img_large_rr1_ep20_590k_tv_done_good/exp_ab128_img_large_rr1_ep20_590k_tv_0.00003_128_50_dp_0.3_wd_0.05_bce_3linear_s88_abcd/results/eval_logs.json), [output.txt](https://biglmdiag.blob.core.windows.net/oscar/exp/vqa/large/ab128_img_large_rr1_ep20_590k_tv_done_good/exp_ab128_img_large_rr1_ep20_590k_tv_0.00003_128_50_dp_0.3_wd_0.05_bce_3linear_s88_abcd/stdout.txt).
+Final server results: [results.txt](https://biglmdiag.blob.core.windows.net/oscar/exp/vqa/large/results.txt). +```bash +python oscar/run_vqa.py -j 4 --img_feature_dim 2054 --max_img_seq_length + 50 --data_label_type mask --img_feature_type faster_r-cnn --data_dir datasets/vqa/2k + --model_type bert --model_name_or_path pretrained_models/large-vg-labels/ep_20_590000 + --task_name vqa_text --do_train_val --do_lower_case --max_seq_length 128 --per_gpu_eval_batch_size + 256 --per_gpu_train_batch_size 24 --learning_rate 3e-05 --num_train_epochs 25 + --label_file datasets/vqa/cache/trainval_ans2label.pkl --save_epoch 30 + --seed 88 --evaluate_during_training --logging_steps 4000 --drop_out 0.3 --weight_decay + 0.05 --warmup_steps 0 --loss_type bce --save_after_epoch 15 --output_dir results --img_feat_format pt --classifier linear --cls_hidden_scale 3 --txt_data_dir datasets/vqa/2k +``` + + +## GQA +Script to finetune for Oscar base model. + +Training logs: [eval_logs.json](https://biglmdiag.blob.core.windows.net/oscar/exp/gqa/base/ab175_base_ep107_1192k_0.4true_taeb_done_25eps_good/exp_ab175_base_ep107_1192k_0.4true_taeb_b_48_0.00005_165_45_dp_0.3_abce/results/eval_logs.json), [output.txt](https://biglmdiag.blob.core.windows.net/oscar/exp/gqa/base/ab175_base_ep107_1192k_0.4true_taeb_done_25eps_good/exp_ab175_base_ep107_1192k_0.4true_taeb_b_48_0.00005_165_45_dp_0.3_abce/stdout.txt).
+Final server results: [results.txt](https://biglmdiag.blob.core.windows.net/oscar/exp/gqa/base/ab165_img45_1568928610179_62515_test_done_good/results.txt). +```bash +python oscar/run_gqa.py -j 4 --img_feature_dim 2054 --max_img_seq_length + 45 --data_dir datasets/GQA/0.4true --model_type bert --model_name_or_path pretrained_models/base-vg-labels/ep_107_1192087 + --task_name gqa --do_lower_case --max_seq_length 165 --per_gpu_eval_batch_size + 256 --per_gpu_train_batch_size 48 --learning_rate 5e-05 --num_train_epochs 5 --output_dir + results --label_file datasets/GQA/questions1.2/trainval_testdev_all_ans2label.pkl + --img_feature_type faster_r-cnn --data_label_type all --train_data_type all --eval_data_type + bal --label2ans_file datasets/GQA/questions1.2/trainval_testdev_all_label2ans.pkl + --loss_type xe --save_epoch 2 --seed 88 --evaluate_during_training --logging_steps + 4000 --drop_out 0.3 --do_train --weight_decay 0.05 --warmup_steps 0 +``` + +## NLVR2 +Script to finetune for Oscar base model. + +Training logs: [eval_logs.json](https://biglmdiag.blob.core.windows.net/oscar/exp/nlvr2/base/exp_rvln_base_ep107_1192k_wm1w_b72_0.00003_55_40_dp0.3_3mlp_wm10000_abcf_best/results/eval_logs.json), [output.txt](https://biglmdiag.blob.core.windows.net/oscar/exp/nlvr2/base/exp_rvln_base_ep107_1192k_wm1w_b72_0.00003_55_40_dp0.3_3mlp_wm10000_abcf_best/stdout.txt).
+Final server results: [results.txt](https://biglmdiag.blob.core.windows.net/oscar/exp/nlvr2/base/exp_nlvr_base_11123_testall_b24_0.00003_55_43_dp_0.3_mlp_abcj_best/stdout.txt). +```bash +python oscar/run_nlvr.py -j 4 --img_feature_dim 2054 --max_img_seq_length + 40 --data_dir datasets/nlvr2/ft_corpus --model_type bert --model_name_or_path pretrained_models/base-vg-labels/ep_107_1192087 + --task_name nlvr --do_lower_case --max_seq_length 55 --per_gpu_eval_batch_size + 64 --per_gpu_train_batch_size 72 --learning_rate 3e-05 --num_train_epochs 20 --output_dir + results --img_feature_type faster_r-cnn --data_label_type all --train_data_type + all --eval_data_type all --loss_type xe --save_epoch -1 --seed 88 --evaluate_during_training + --logging_steps -1 --drop_out 0.3 --do_train --weight_decay 0.05 --warmup_steps + 10000 --classifier mlp --cls_hidden_scale 3 --num_choice 2 --use_pair +``` + +Script to finetune for Oscar large model. + +Training logs: [eval_logs.json](https://biglmdiag.blob.core.windows.net/oscar/exp/nlvr2/large/large_1583307153868_14140/exp_rvln_large_ep55_1618k_b24_0.00002_seq55_img40_dp0.3_2mlp_wm5000_abcj/results/eval_logs.json), [output.txt](https://biglmdiag.blob.core.windows.net/oscar/exp/nlvr2/large/large_1583307153868_14140/exp_rvln_large_ep55_1618k_b24_0.00002_seq55_img40_dp0.3_2mlp_wm5000_abcj/stdout.txt).
+Final server results: [results.txt](https://biglmdiag.blob.core.windows.net/oscar/exp/nlvr2/large/large_1583307153868_14140/exp_nlvr_large_1583307153868_14140_testall_b24_0.00003_55_43_dp_0.3_mlp_abck/stdout.txt). +```bash +python oscar/run_nlvr.py -j 4 --img_feature_dim 2054 --max_img_seq_length + 40 --data_dir datasets/nlvr2/ft_corpus --model_type bert --model_name_or_path pretrained_models/large-vg-labels/ep_55_1617000 + --task_name nlvr --do_lower_case --max_seq_length 55 --per_gpu_eval_batch_size + 64 --per_gpu_train_batch_size 24 --learning_rate 3e-05 --num_train_epochs 20 --output_dir + results --img_feature_type faster_r-cnn --data_label_type all --train_data_type + all --eval_data_type all --loss_type xe --save_epoch -1 --seed 88 --evaluate_during_training + --logging_steps -1 --drop_out 0.3 --do_train --weight_decay 0.05 --warmup_steps + 5000 --classifier mlp --cls_hidden_scale 2 --num_choice 2 --use_pair +``` + + + +## Image Text Retrieval +Script to finetune for Oscar base model (4 V100 with 16G mem): +```bash +python oscar/retrieval.py \ + --model_name_or_path pretrained_models/base-vg-labels/ep_67_588997 \ + --do_train \ + --do_lower_case \ + --evaluate_during_training \ + --num_captions_per_img_val 20 \ + --eval_caption_index_file minival_caption_indexs_top20.pt \ + --per_gpu_train_batch_size 64 \ + --learning_rate 0.00002 \ + --num_train_epochs 20 \ + --weight_decay 0.05 \ + --save_steps 5000 \ + --add_od_labels \ + --od_label_type vg \ + --max_seq_length 70 \ + --output_dir output/ +``` + +Script to finetune for Oscar large model (8 V100 with 32G mem): +```bash +python oscar/run_retrieval.py \ + --model_name_or_path pretrained_models/large-vg-labels/ep_7_816000 \ + --do_train \ + --do_lower_case \ + --evaluate_during_training \ + --num_captions_per_img_val 20 \ + --eval_caption_index_file minival_caption_indexs_top20.pt \ + --per_gpu_train_batch_size 16 \ + --learning_rate 0.00001 \ + --num_train_epochs 30 \ + --save_steps 5000 \ + --add_od_labels \ + --od_label_type vg \ + --max_seq_length 70 \ + --output_dir output/ +``` + +Script to inference on COCO 1K test set: +```bash +python oscar/run_retrieval.py \ + --do_test \ + --do_eval \ + --test_split test \ + --num_captions_per_img_val 5 \ + --eval_img_keys_file test_img_keys_1k.tsv \ + --cross_image_eval \ + --per_gpu_eval_batch_size 64 \ + --eval_model_dir your_model_for_evaluation # could be base/large models. +``` + +Script to inference on COCO 5K test set: +```bash +python oscar/run_retrieval.py \ + --do_test \ + --do_eval \ + --test_split test \ + --num_captions_per_img_val 5 \ + --eval_img_keys_file test_img_keys.tsv \ + --cross_image_eval \ + --per_gpu_eval_batch_size 64 \ + --eval_model_dir your_model_for_evaluation # could be base/large models. +``` + + +## Image Captioning on COCO +Script to finetune for Oscar base model (4 V100 with 16G mem): +1) First train with cross-entropy loss: +```bash +python oscar/run_captioning.py \ + --model_name_or_path pretrained_models/base-vg-labels/ep_67_588997 \ + --do_train \ + --do_lower_case \ + --evaluate_during_training \ + --add_od_labels \ + --learning_rate 0.00003 \ + --per_gpu_train_batch_size 64 \ + --num_train_epochs 30 \ + --save_steps 5000 \ + --output_dir output/ +``` +2) Finetune with CIDEr optimization: +```bash +python oscar/run_captioning.py \ + --model_name_or_path your_checkpoint_from_cross_entropy \ + --do_train \ + --do_lower_case \ + --evaluate_during_training \ + --add_od_labels \ + --learning_rate 0.000005 \ + --per_gpu_train_batch_size 16 \ + --num_train_epochs 5 \ + --scst \ + --save_steps 2000 \ + --output_dir output/ +``` + +Script to finetune for Oscar large model (8 V100 with 32G mem): +1) First train with cross-entropy loss: +```bash +python oscar/run_captioning.py \ + --model_name_or_path pretrained_models/large-vg-labels/ep_7_816000 \ + --do_train \ + --do_lower_case \ + --evaluate_during_training \ + --add_od_labels \ + --learning_rate 0.00001 \ + --per_gpu_train_batch_size 32 \ + --num_train_epochs 30 \ + --save_steps 5000 \ + --output_dir output/ +``` +2) Finetune with CIDEr optimization: +```bash +python oscar/run_captioning.py \ + --model_name_or_path your_checkpoint_from_cross_entropy \ + --do_train \ + --do_lower_case \ + --evaluate_during_training \ + --add_od_labels \ + --learning_rate 0.000005 \ + --per_gpu_train_batch_size 8 \ + --num_train_epochs 5 \ + --scst \ + --save_steps 2000 \ + --output_dir output/ +``` + +Script to inference on COCO test set: +```bash +python oscar/run_captioning.py \ + --do_test \ + --do_eval \ + --test_yaml test.yaml \ + --per_gpu_eval_batch_size 64 \ + --num_beams 5 \ + --max_gen_length 20 \ + --eval_model_dir your_model_for_evaluation # could be bert base/large. +``` diff --git a/README.md b/README.md new file mode 100644 index 0000000..0b91642 --- /dev/null +++ b/README.md @@ -0,0 +1,49 @@ +# Oscar: Object-Semantics Aligned Pre-training for Vision-and-Language Tasks +## Updates +05/15/2020: Released pretrained models, datasets, and code for downstream tasks finetuning. + +## Introduction +This repository contains source code necessary to reproduce the results presented in the paper [Oscar: Object-Semantics Aligned Pre-training for Vision-Language Tasks](https://arxiv.org/abs/2004.06165). +We propose a new cross-modal pre-training method **Oscar** (Object-Semantics Aligned Pre-training). It leverages **object tags** detected in images as anchor points to significantly ease the learning of image-text alignments. We pre-train Oscar on the public corpus of 6.5 million text-image pairs, and fine-tune it on downstream tasks, creating new state-of-the-arts on six well-established vision-language understanding and generation tasks. For more on this project, see the [Microsoft Research Blog post](https://www.microsoft.com/en-us/research/blog). + + + + +## Performance +Task | t2i | t2i | i2t | i2t | IC | IC | IC | IC | NoCaps | NoCaps | VQA | NLVR2 | +--------|-----|-----|-----|-----|-----|-----|------|------|--------|--------|----------|---------| +Metric | R@1 | R@5 | R@1 | R@5 | B@4 | M | C | S | C | S | test-std | test-P | +SoTA_S |39.2 | 68.0|56.6 | 84.5|38.9 |29.2 |129.8 | 22.4 | 61.5 | 9.2 | 70.90 | 53.50 | +SoTA_B |48.4 | 76.7|63.3 | 87.0|39.5 |29.3 |129.3 | 23.2 | 73.1 | 11.2 | 72.54 | 78.87 | +SoTA_L |51.7 | 78.4|66.6 | 89.4| - | - | - | - | - | - | 73.40 | 79.50 | +----- |--- |--- |--- |--- |--- |--- |--- |--- |--- |--- |--- |--- | +Oscar_B |54.0 | 80.8|70.0 | 91.1|40.5 |29.7 |137.6 | 22.8 | 78.8 | 11.7 | 73.44 | 78.36 | +Oscar_L |57.5 | 82.8|73.5 | 92.2|41.7 |30.6 |140.0 | 24.5 | 80.9 | 11.3 | 73.82 | 80.05 | +gain | 5.8 | 4.4| 6.9 | 2.8| 2.2 | 1.3 | 10.7 | 1.3 | 7.8 | 0.5 | 0.42 | 0.55 | + +t2i: text-to-image retrieval; i2t: image-to-text retrieval; IC: image captioning on COCO. + + +## Download +We released pre-trained models and datasets for downstream tasks. Please check [DOWNLOAD.md](DOWNLOAD.md) for details. + +## Installation +Check [INSTALL.md](INSTALL.md) for installation instructions. + +## Model Zoo +Check [MODEL_ZOO.md](MODEL_ZOO.md) for scripts to run each downstream tasks and the expected performance. + +## Citations +Please consider citing this paper if you use the code: +``` +@article{li2020oscar, + title={Oscar: Object-Semantics Aligned Pre-training for Vision-Language Tasks}, + author={Li, Xiujun and Yin, Xi and Li, Chunyuan and Hu, Xiaowei and Zhang, Pengchuan and Zhang, Lei and Wang, Lijuan and Hu, Houdong and Dong, Li and Wei, Furu and Choi, Yejin and Gao, Jianfeng}, + journal={arXiv preprint arXiv:2004.06165}, + year={2020} +} +``` + +## License +Oscar is released under the MIT license. See [LICENSE](LICENSE) for details. + diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..f7b8998 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,41 @@ + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://docs.microsoft.com/en-us/previous-versions/tn-archive/cc751383(v=technet.10)), please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://msrc.microsoft.com/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://www.microsoft.com/en-us/msrc/pgp-key-msrc). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://microsoft.com/msrc/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://www.microsoft.com/en-us/msrc/cvd). + + \ No newline at end of file diff --git a/coco_caption b/coco_caption new file mode 160000 index 0000000..de6f385 --- /dev/null +++ b/coco_caption @@ -0,0 +1 @@ +Subproject commit de6f385503ac9a4305a1dcdc39c02312f9fa13fc diff --git a/docs/oscar.PNG b/docs/oscar.PNG new file mode 100644 index 0000000..76c8757 Binary files /dev/null and b/docs/oscar.PNG differ diff --git a/docs/oscar_logo.png b/docs/oscar_logo.png new file mode 100644 index 0000000..e7aca84 Binary files /dev/null and b/docs/oscar_logo.png differ diff --git a/oscar/__init__.py b/oscar/__init__.py new file mode 100644 index 0000000..3dc1f76 --- /dev/null +++ b/oscar/__init__.py @@ -0,0 +1 @@ +__version__ = "0.1.0" diff --git a/oscar/modeling/__init__.py b/oscar/modeling/__init__.py new file mode 100644 index 0000000..3dc1f76 --- /dev/null +++ b/oscar/modeling/__init__.py @@ -0,0 +1 @@ +__version__ = "0.1.0" diff --git a/oscar/modeling/modeling_bert.py b/oscar/modeling/modeling_bert.py new file mode 100644 index 0000000..aee8cb3 --- /dev/null +++ b/oscar/modeling/modeling_bert.py @@ -0,0 +1,711 @@ +# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license. + +from __future__ import absolute_import, division, print_function, unicode_literals +import logging +import math +import torch +from torch import nn +import torch.nn.functional as F +from torch.nn import CrossEntropyLoss, MSELoss +from transformers.pytorch_transformers.modeling_bert import (BertEmbeddings, + BertSelfAttention, BertAttention, BertEncoder, BertLayer, + BertSelfOutput, BertIntermediate, BertOutput, + BertPooler, BertLayerNorm, BertPreTrainedModel, + BertPredictionHeadTransform) +from .modeling_utils import CaptionPreTrainedModel +from ..utils.cbs import ConstrainedBeamSearch, select_best_beam_with_constraints + +logger = logging.getLogger(__name__) + + +class CaptionBertSelfAttention(BertSelfAttention): + """ + Modified from BertSelfAttention to add support for output_hidden_states. + """ + def __init__(self, config): + super(CaptionBertSelfAttention, self).__init__(config) + + def forward(self, hidden_states, attention_mask, head_mask=None, + history_state=None): + if history_state is not None: + x_states = torch.cat([history_state, hidden_states], dim=1) + mixed_query_layer = self.query(hidden_states) + mixed_key_layer = self.key(x_states) + mixed_value_layer = self.value(x_states) + else: + mixed_query_layer = self.query(hidden_states) + mixed_key_layer = self.key(hidden_states) + mixed_value_layer = self.value(hidden_states) + + query_layer = self.transpose_for_scores(mixed_query_layer) + key_layer = self.transpose_for_scores(mixed_key_layer) + value_layer = self.transpose_for_scores(mixed_value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + # Apply the attention mask is (precomputed for all layers in BertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.Softmax(dim=-1)(attention_scores) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(*new_context_layer_shape) + + outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,) + return outputs + + +class CaptionBertAttention(BertAttention): + """ + Modified from BertAttention to add support for output_hidden_states. + """ + def __init__(self, config): + super(CaptionBertAttention, self).__init__(config) + self.self = CaptionBertSelfAttention(config) + self.output = BertSelfOutput(config) + + def forward(self, input_tensor, attention_mask, head_mask=None, + history_state=None): + self_outputs = self.self(input_tensor, attention_mask, head_mask, history_state) + attention_output = self.output(self_outputs[0], input_tensor) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +class CaptionBertEncoder(BertEncoder): + """ + Modified from BertEncoder to add support for output_hidden_states. + """ + def __init__(self, config): + super(CaptionBertEncoder, self).__init__(config) + self.output_attentions = config.output_attentions + self.output_hidden_states = config.output_hidden_states + self.layer = nn.ModuleList([CaptionBertLayer(config) for _ in range(config.num_hidden_layers)]) + + def forward(self, hidden_states, attention_mask, head_mask=None, + encoder_history_states=None): + all_hidden_states = () + all_attentions = () + for i, layer_module in enumerate(self.layer): + if self.output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + history_state = None if encoder_history_states is None else encoder_history_states[i] + layer_outputs = layer_module( + hidden_states, attention_mask, head_mask[i], + history_state) + hidden_states = layer_outputs[0] + + if self.output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + # Add last layer + if self.output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + outputs = (hidden_states,) + if self.output_hidden_states: + outputs = outputs + (all_hidden_states,) + if self.output_attentions: + outputs = outputs + (all_attentions,) + return outputs # outputs, (hidden states), (attentions) + + +class CaptionBertLayer(BertLayer): + """ + Modified from BertLayer to add support for output_hidden_states. + """ + def __init__(self, config): + super(CaptionBertLayer, self).__init__(config) + self.attention = CaptionBertAttention(config) + self.intermediate = BertIntermediate(config) + self.output = BertOutput(config) + + def forward(self, hidden_states, attention_mask, head_mask=None, + history_state=None): + attention_outputs = self.attention(hidden_states, attention_mask, + head_mask, history_state) + attention_output = attention_outputs[0] + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them + return outputs + + +class BertImgModel(BertPreTrainedModel): + """ Expand from BertModel to handle image region features as input + """ + def __init__(self, config): + super(BertImgModel, self).__init__(config) + self.embeddings = BertEmbeddings(config) + self.encoder = CaptionBertEncoder(config) + self.pooler = BertPooler(config) + + self.img_dim = config.img_feature_dim + logger.info('BertImgModel Image Dimension: {}'.format(self.img_dim)) + self.img_feature_type = config.img_feature_type + if hasattr(config, 'use_img_layernorm'): + self.use_img_layernorm = config.use_img_layernorm + else: + self.use_img_layernorm = None + + if config.img_feature_type == 'dis_code': + self.code_embeddings = nn.Embedding(config.code_voc, config.code_dim, padding_idx=0) + self.img_embedding = nn.Linear(config.code_dim, self.config.hidden_size, bias=True) + elif config.img_feature_type == 'dis_code_t': # transpose + self.code_embeddings = nn.Embedding(config.code_voc, config.code_dim, padding_idx=0) + self.img_embedding = nn.Linear(config.code_size, self.config.hidden_size, bias=True) + elif config.img_feature_type == 'dis_code_scale': # scaled + self.input_embeddings = nn.Linear(config.code_dim, config.code_size, bias=True) + self.code_embeddings = nn.Embedding(config.code_voc, config.code_dim, padding_idx=0) + self.img_embedding = nn.Linear(config.code_dim, self.config.hidden_size, bias=True) + else: + self.img_embedding = nn.Linear(self.img_dim, self.config.hidden_size, bias=True) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + if self.use_img_layernorm: + self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.img_layer_norm_eps) + + self.apply(self.init_weights) + + def _resize_token_embeddings(self, new_num_tokens): + old_embeddings = self.embeddings.word_embeddings + new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) + self.embeddings.word_embeddings = new_embeddings + return self.embeddings.word_embeddings + + def _prune_heads(self, heads_to_prune): + """ Prunes heads of the model. + heads_to_prune: dict of {layer_num: list of heads to prune in this layer} + See base class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, + position_ids=None, head_mask=None, img_feats=None, + encoder_history_states=None): + if attention_mask is None: + attention_mask = torch.ones_like(input_ids) + + if token_type_ids is None: + token_type_ids = torch.zeros_like(input_ids) + + # We create a 3D attention mask from a 2D tensor mask. + # Sizes are [batch_size, 1, 1, to_seq_length] + # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] + # this attention mask is more simple than the triangular masking of causal attention + # used in OpenAI GPT, we just need to prepare the broadcast dimension here. + if attention_mask.dim() == 2: + extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) + elif attention_mask.dim() == 3: + extended_attention_mask = attention_mask.unsqueeze(1) + else: + raise NotImplementedError + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + if head_mask is not None: + if head_mask.dim() == 1: + head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) + head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1) + elif head_mask.dim() == 2: + head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer + # switch to float if needed + fp16 compatibility + head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility + else: + head_mask = [None] * self.config.num_hidden_layers + + embedding_output = self.embeddings(input_ids, position_ids=position_ids, + token_type_ids=token_type_ids) + if encoder_history_states: + assert img_feats is None, "Cannot take image features while using encoder history states" + + if img_feats is not None: + if self.img_feature_type == 'dis_code': + code_emb = self.code_embeddings(img_feats) + img_embedding_output = self.img_embedding(code_emb) + elif self.img_feature_type == 'dis_code_t': # transpose + code_emb = self.code_embeddings(img_feats) + code_emb = code_emb.permute(0, 2, 1) + img_embedding_output = self.img_embedding(code_emb) + elif self.img_feature_type == 'dis_code_scale': # left scaled + code_emb = self.code_embeddings(img_feats) + img_embedding_output = self.img_embedding(code_emb) + else: + img_embedding_output = self.img_embedding(img_feats) + if self.use_img_layernorm: + img_embedding_output = self.LayerNorm(img_embedding_output) + + # add dropout on image embedding + img_embedding_output = self.dropout(img_embedding_output) + + # concatenate two embeddings + embedding_output = torch.cat((embedding_output, img_embedding_output), 1) + + encoder_outputs = self.encoder(embedding_output, + extended_attention_mask, head_mask=head_mask, + encoder_history_states=encoder_history_states) + sequence_output = encoder_outputs[0] + pooled_output = self.pooler(sequence_output) + + # add hidden_states and attentions if they are here + outputs = (sequence_output, pooled_output,) + encoder_outputs[1:] + return outputs + + +def instance_bce_with_logits(logits, labels, reduction='mean'): + assert logits.dim() == 2 + loss = F.binary_cross_entropy_with_logits(logits, labels, reduction=reduction) + if reduction == 'mean': + loss *= labels.size(1) + return loss + + +class ImageBertForSequenceClassification(BertPreTrainedModel): + """ + Modified from BertForSequenceClassification to support oscar training. + """ + def __init__(self, config): + super(ImageBertForSequenceClassification, self).__init__(config) + self.num_labels = config.num_labels + self.loss_type = config.loss_type + self.config = config + if config.img_feature_dim > 0: + self.bert = BertImgModel(config) + else: + self.bert = BertModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + if hasattr(config, 'classifier'): + if not hasattr(config, 'cls_hidden_scale'): + config.cls_hidden_scale = 2 + + if config.classifier == 'linear': + self.classifier = nn.Linear(config.hidden_size, + self.config.num_labels) + elif config.classifier == 'mlp': + self.classifier = nn.Sequential( + nn.Linear(config.hidden_size, config.hidden_size * config.cls_hidden_scale), + nn.ReLU(), + nn.Linear(config.hidden_size * config.cls_hidden_scale, self.config.num_labels) + ) + else: + self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) # original + self.apply(self.init_weights) + + def init_code_embedding(self, em): + self.bert.code_embeddings.weight.data = em.clone() + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, + position_ids=None, head_mask=None, img_feats=None): + outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids, + attention_mask=attention_mask, head_mask=head_mask, img_feats=img_feats) + pooled_output = outputs[1] + + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + + outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here + if labels is not None: + if self.num_labels == 1: # doing regression + loss_fct = MSELoss() + labels = labels.to(torch.float) + loss = loss_fct(logits.view(-1), labels.view(-1)) + else: + if self.loss_type == 'kl': + # KL Loss: https://github.com/uclanlp/visualbert/blob/master/pytorch_pretrained_bert/modeling.py + loss_fct = torch.nn.KLDivLoss(reduction="batchmean") + log_softmax = torch.nn.LogSoftmax(dim=-1) + reshaped_logits = logits.contiguous().view(-1, 3129) + reshaped_logits = log_softmax(reshaped_logits) + loss = loss_fct(reshaped_logits, labels.contiguous()) + elif self.loss_type == 'bce': # [VQA] + loss = instance_bce_with_logits(logits, labels) + else: # cross_entropy [GQA, Retrieval, Captioning] + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + outputs = (loss,) + outputs + return outputs + + +class ImageBertForMultipleChoice(BertPreTrainedModel): + """ + Modified from BertForMultipleChoice to support oscar training. + """ + def __init__(self, config): + super(ImageBertForMultipleChoice, self).__init__(config) + self.loss_type = config.loss_type + if config.img_feature_dim > 0: + self.bert = BertImgModel(config) # ImageBERT + else: + self.bert = BertModel(config) # original BERT + + self.dropout = nn.Dropout(config.hidden_dropout_prob) + if hasattr(config, 'classifier'): + if not hasattr(config, 'cls_hidden_scale'): config.cls_hidden_scale = 2 + if config.classifier == 'linear': + self.classifier = nn.Linear(config.num_choice*config.hidden_size, self.config.num_labels) + elif config.classifier == 'mlp': + self.classifier = nn.Sequential( + nn.Linear(config.num_choice*config.hidden_size, config.hidden_size*config.cls_hidden_scale), + nn.ReLU(), + nn.Linear(config.hidden_size*config.cls_hidden_scale, self.config.num_labels) + ) + else: + self.classifier = nn.Linear(config.num_choice*config.hidden_size, self.config.num_labels) # original + + self.apply(self.init_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, + position_ids=None, head_mask=None, img_feats=None): + num_choices = input_ids.shape[1] + + flat_input_ids = input_ids.view(-1, input_ids.size(-1)) + flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None + flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None + flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None + + flat_img_feats = img_feats.view(-1, img_feats.size(-2), img_feats.size(-1)) if img_feats is not None else None + + if isinstance(self.bert, BertImgModel): + outputs = self.bert(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, + attention_mask=flat_attention_mask, head_mask=head_mask, img_feats=flat_img_feats) + else: + outputs = self.bert(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, + attention_mask=flat_attention_mask, head_mask=head_mask) + pooled_output = outputs[1] + + pooled_output = self.dropout(pooled_output) + + # reshaped_pool_output + reshaped_pool_output = pooled_output.view(-1, self.config.num_choice*(pooled_output.shape[1])) + logits = self.classifier(reshaped_pool_output) + + outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here + + if labels is not None: + if self.loss_type == 'bce': + loss = instance_bce_with_logits(logits, labels.view(-1, self.config.num_labels)) + else: + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits, labels) + outputs = (loss,) + outputs + return outputs + + +class BertForImageCaptioning(CaptionPreTrainedModel): + """ + Bert for Image Captioning. + """ + def __init__(self, config): + super(BertForImageCaptioning, self).__init__(config) + self.config = config + self.bert = BertImgModel(config) + self.transform = BertPredictionHeadTransform(config) + bert_embedding_weight = self.bert.embeddings.word_embeddings.weight + self.decoder = nn.Linear(bert_embedding_weight.size(1), + bert_embedding_weight.size(0), bias=False) + self.loss = nn.CrossEntropyLoss(reduction='mean') + self.drop_worst_ratio = 0.2 + + def forward(self, *args, **kwargs): + is_decode = kwargs.get('is_decode', False) + if is_decode: + return self.generate(*args, **kwargs) + else: + return self.encode_forward(*args, **kwargs) + + def encode_forward(self, input_ids, img_feats, attention_mask, masked_pos, masked_ids=None, + token_type_ids=None, position_ids=None, head_mask=None, + is_training=True, encoder_history_states=None): + outputs = self.bert(input_ids, img_feats=img_feats, attention_mask=attention_mask, + position_ids=position_ids, token_type_ids=token_type_ids, + head_mask=head_mask, + encoder_history_states=encoder_history_states) + sequence_output = outputs[0][:, :masked_pos.shape[-1], :] + + if is_training: + # num_masks_in_batch * hidden_size + sequence_output_masked = sequence_output[masked_pos==1, :] + transformed_output_masked = self.transform(sequence_output_masked) + class_logits = self.decoder(transformed_output_masked) + masked_ids = masked_ids[masked_ids != 0] # remove padding masks + masked_loss = self.loss(class_logits.float(), masked_ids) + outputs = (masked_loss, class_logits,) + outputs[2:] + else: + class_logits = self.decoder(self.transform(sequence_output)) + outputs = (class_logits,) + outputs[2:] + return outputs + + def prepare_inputs_for_generation(self, curr_ids, past=None): + # NOTE: if attention is on, it should be the token used to mask words in training + mask_token_id = self.mask_token_id + batch_size = curr_ids.shape[0] + mask_ids = torch.full( + (batch_size, 1), mask_token_id, dtype=torch.long, device=curr_ids.device + ) + + def _slice(t, start, end): + if t is None: + return t + assert t.shape == (batch_size, self.max_seq_len + self.od_labels_len) + return t[:, start: end] + + def _remove_elements(t, start, end): + if t is None: + return t + assert t.shape == (batch_size, self.max_seq_len + self.od_labels_len) + return torch.cat([t[:, :start], t[:, end:]], dim=1) + + if past is None: + input_ids = torch.cat([curr_ids, mask_ids], dim=1) + + curr_len = input_ids.shape[1] + full_len = self.max_seq_len + self.od_labels_len + self.img_seq_len + assert self.full_attention_mask.shape == (batch_size, + full_len, full_len) + + def _remove_rows_cols(t, row_start, row_end, col_start, col_end): + t00 = t[:, :row_start, :col_start] + t01 = t[:, :row_start, col_end:] + t10 = t[:, row_end:, :col_start] + t11 = t[:, row_end:, col_end:] + res = torch.cat([torch.cat([t00, t01], dim=2), torch.cat([t10, t11], + dim=2)], dim=1) + assert res.shape == (t.shape[0], t.shape[1]-row_end+row_start, + t.shape[2]-col_end+col_start) + return res + + seq_start = curr_len + seq_end = self.max_seq_len + attention_mask = _remove_rows_cols(self.full_attention_mask, seq_start, + seq_end, seq_start, seq_end) + + masked_pos = _remove_elements(self.full_masked_pos, seq_start, seq_end) + token_type_ids = _remove_elements(self.full_token_type_ids, seq_start, seq_end) + position_ids = _remove_elements(self.full_position_ids, seq_start, seq_end) + img_feats = self.img_feats + + if self.add_od_labels: + assert self.od_label_ids.shape[1] == self.od_labels_len + input_ids = torch.cat([input_ids, self.od_label_ids], dim=1) + else: + last_token = curr_ids[:, -1:] + # The representation of last token should be re-computed, because + # it depends on both self-attention context and input tensor + input_ids = torch.cat([last_token, mask_ids], dim=1) + start_pos = curr_ids.shape[1] - 1 + end_pos = start_pos + input_ids.shape[1] + masked_pos = _slice(self.full_masked_pos, start_pos, end_pos) + token_type_ids = _slice(self.full_token_type_ids, start_pos, end_pos) + position_ids = _slice(self.full_position_ids, start_pos, end_pos) + + img_feats = None + assert past[0].shape[0] == batch_size + if self.prev_encoded_layers is None: + assert start_pos == 1 # the first token after BOS + assert past[0].shape[1] == 2 + self.od_labels_len + self.img_seq_len + # reorder to [od_labels, img_feats, sentence] + self.prev_encoded_layers = [ + torch.cat([x[:, 2:, :], x[:, :start_pos,:]], dim=1) + for x in past] + s2s = self.full_attention_mask[:, :self.max_seq_len, + :self.max_seq_len] + s2i = self.full_attention_mask[:, :self.max_seq_len, + self.max_seq_len:] + i2s = self.full_attention_mask[:, self.max_seq_len:, + :self.max_seq_len] + i2i = self.full_attention_mask[:, self.max_seq_len:, + self.max_seq_len:] + self.full_attention_mask = torch.cat( + [torch.cat([i2i, i2s], dim=2), + torch.cat([s2i, s2s], dim=2)], + dim=1) + else: + assert start_pos > 1 + assert past[0].shape[1] == 2 + self.prev_encoded_layers = [torch.cat([x, p[:, :-1, :]], dim=1) + for x, p in zip(self.prev_encoded_layers, past)] + + attention_mask = self.full_attention_mask[:, + self.od_labels_len+self.img_seq_len+start_pos: self.od_labels_len+self.img_seq_len+end_pos, + :self.od_labels_len+self.img_seq_len+end_pos] + + return {'input_ids': input_ids, 'img_feats': img_feats, + 'masked_pos': masked_pos, 'attention_mask': attention_mask, + 'token_type_ids': token_type_ids, 'position_ids': position_ids, + 'is_training': False, + 'encoder_history_states': self.prev_encoded_layers} + + def get_output_embeddings(self): + return self.decoder + + def generate(self, img_feats, attention_mask, masked_pos, token_type_ids=None, + position_ids=None, head_mask=None, input_ids=None, max_length=None, + do_sample=None, num_beams=None, temperature=None, top_k=None, top_p=None, + repetition_penalty=None, bos_token_id=None, pad_token_id=None, + eos_token_ids=None, mask_token_id=None, length_penalty=None, num_return_sequences=None, + num_keep_best=1, is_decode=None, + add_od_labels=False, od_labels_start_posid=None, + use_cbs=False, fsm=None, num_constraints=None, + min_constraints_to_satisfy=None, use_hypo=False, + ): + """ Generates captions given image features + """ + assert is_decode + batch_size = img_feats.shape[0] + self.img_seq_len = img_feats.shape[1] + self.max_seq_len = max_length + self.mask_token_id = mask_token_id + self.prev_encoded_layers = None + # NOTE: num_keep_best is not equavilant to num_return_sequences + # num_keep_best is the number of hypotheses to keep in beam search + # num_return_sequences is the repeating times of input, coupled with + # do_sample=True can generate more than one samples per image + self.num_keep_best = num_keep_best + + vocab_size = self.config.vocab_size + if not use_cbs: + num_fsm_states = 1 + else: + b, num_fsm_states, f1, v = fsm.shape + assert b==batch_size and v==vocab_size and f1==num_fsm_states + + self.add_od_labels = add_od_labels + # avoid position_ids collision of caption and od labels + self.od_labels_start_posid = max(od_labels_start_posid, self.max_seq_len) + if self.add_od_labels: + # get od labels part from input_ids + assert input_ids.shape[0] == batch_size + od_label_ids = input_ids[:, self.max_seq_len:] + self.od_labels_len = input_ids.shape[1] - self.max_seq_len + self.od_label_ids = self._expand_for_beams(od_label_ids, num_beams, + num_fsm_states) + input_ids = None + else: + self.od_labels_len = 0 + self.od_label_ids = None + assert input_ids.shape == (batch_size, self.max_seq_len) + input_ids = None + + if input_ids is None: + input_ids = torch.full( + (batch_size, 1), bos_token_id, dtype=torch.long, device=next(self.parameters()).device + ) + else: + assert input_ids.dim() == 2, "Input prompt should be of shape (batch_size, sequence length)." + assert input_ids.shape[0] == batch_size, "Input batch size must match image features" + + if position_ids is None: + position_ids = torch.arange(self.max_seq_len, dtype=torch.long, device=input_ids.device) + posids_len = self.max_seq_len + if self.add_od_labels: + od_labels_posids = torch.arange( + self.od_labels_start_posid, + self.od_labels_start_posid + self.od_labels_len, dtype=torch.long, device=input_ids.device) + position_ids = torch.cat([position_ids, od_labels_posids]) + posids_len += self.od_labels_len + position_ids = position_ids.unsqueeze(0).expand([batch_size, posids_len]) + + cur_len = input_ids.shape[1] + assert num_return_sequences == 1, 'not supported num_return_sequences != 1' + effective_batch_size = batch_size + + self.img_feats = self._expand_for_beams(img_feats, num_beams, num_fsm_states) + self.full_attention_mask = self._expand_for_beams(attention_mask, num_beams, num_fsm_states) + self.full_masked_pos = self._expand_for_beams(masked_pos, num_beams, num_fsm_states) + self.full_token_type_ids = self._expand_for_beams(token_type_ids, num_beams, num_fsm_states) + self.full_position_ids = self._expand_for_beams(position_ids, num_beams, num_fsm_states) + self.full_head_mask = self._expand_for_beams(head_mask, num_beams, num_fsm_states) + + if not use_cbs: + if num_beams > 1: + output = self._generate_beam_search( + input_ids, + cur_len, + max_length, + do_sample, + temperature, + top_k, + top_p, + repetition_penalty, + pad_token_id, + eos_token_ids, + effective_batch_size, + length_penalty, + num_beams, + vocab_size, + ) + else: + output = self._generate_no_beam_search( + input_ids, + cur_len, + max_length, + do_sample, + temperature, + top_k, + top_p, + repetition_penalty, + pad_token_id, + eos_token_ids, + effective_batch_size, + ) + else: + assert self.num_keep_best == 1, 'not supported n_best > 1 for CBS' + searcher = ConstrainedBeamSearch(eos_token_ids, max_length, + num_beams, use_hypo=use_hypo) + curr_ids, sum_logprobs = searcher.search( + input_ids, + None, + self._decode_step, + fsm, + ) + curr_ids, sum_logprobs = select_best_beam_with_constraints( + curr_ids, + sum_logprobs, + num_constraints, + min_constraints_to_satisfy, + ) + # (batch_size, n_best, max_len), (batch_size, n_best) + output = (curr_ids.unsqueeze(1), sum_logprobs.unsqueeze(1)) + + return output + + def _expand_for_beams(self, x, num_beams, num_fsm_states): + num_expand = num_beams * num_fsm_states + if x is None or num_expand == 1: + return x + + input_shape = list(x.shape) + expanded_shape = input_shape[:1] + [num_expand] + input_shape[1:] + x = x.unsqueeze(1).expand(expanded_shape) + # (batch_size * num_beams, ...) + x = x.contiguous().view([input_shape[0] * num_expand] + input_shape[1:]) + return x + + def _do_output_past(self, outputs): + return len(outputs) > 1 diff --git a/oscar/modeling/modeling_utils.py b/oscar/modeling/modeling_utils.py new file mode 100644 index 0000000..0763a60 --- /dev/null +++ b/oscar/modeling/modeling_utils.py @@ -0,0 +1,671 @@ +# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license. + +from __future__ import absolute_import, division, print_function, unicode_literals + +import torch +import torch.nn.functional as F + +from transformers.pytorch_transformers.modeling_bert import (BertConfig, + load_tf_weights_in_bert, BERT_PRETRAINED_MODEL_ARCHIVE_MAP) +from transformers.pytorch_transformers.modeling_utils import PreTrainedModel + + +class CaptionPreTrainedModel(PreTrainedModel): + """ Expand base class for image captioning modeling. + """ + config_class = BertConfig + pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP + load_tf_weights = load_tf_weights_in_bert + base_model_prefix = 'bert' + + def __init__(self, config, *inputs, **kwargs): + super(CaptionPreTrainedModel, self).__init__(config, *inputs, **kwargs) + + def prepare_inputs_for_generation(self, input_ids, **kwargs): + return {"input_ids": input_ids} + + def _do_output_past(self, outputs): + has_output_past = hasattr(self.config, "output_past") and self.config.output_past + has_mem_len = hasattr(self.config, "mem_len") and self.config.mem_len + + if has_output_past and not has_mem_len and len(outputs) > 1: + return True + elif has_mem_len and self.config.mem_len > 0 and len(outputs) > 1: + return True + + return False + + def generate( + self, + input_ids=None, + max_length=None, + do_sample=None, + num_beams=None, + temperature=None, + top_k=None, + top_p=None, + repetition_penalty=None, + bos_token_id=None, + pad_token_id=None, + eos_token_ids=None, + length_penalty=None, + num_return_sequences=None, + ): + r""" Generates sequences for models with a LM head. The method currently supports greedy or penalized greedy decoding, sampling with top-k or nucleus sampling + and beam-search. + + Adapted in part from `Facebook's XLM beam search code`_. + + .. _`Facebook's XLM beam search code`: + https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529 + + + Parameters: + + input_ids: (`optional`) `torch.LongTensor` of shape `(batch_size, sequence_length)` + The sequence used as a prompt for the generation. If `None` the method initializes + it as an empty `torch.LongTensor` of shape `(1,)`. + + max_length: (`optional`) int + The max length of the sequence to be generated. Between 1 and infinity. Default to 20. + + do_sample: (`optional`) bool + If set to `False` greedy decoding is used. Otherwise sampling is used. Default to greedy sampling. + + num_beams: (`optional`) int + Number of beams for beam search. Must be between 1 and infinity. 1 means no beam search. Default to 1. + + temperature: (`optional`) float + The value used to module the next token probabilities. Must be strictely positive. Default to 1.0. + + top_k: (`optional`) int + The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50. + + top_p: (`optional`) float + The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1. + + repetition_penalty: (`optional`) float + The parameter for repetition penalty. Between 1.0 and infinity. 1.0 means no penalty. Default to 1.0. + + bos_token_id: (`optional`) int + Beginning of sentence token if no prompt is provided. Default to 0. + + eos_token_ids: (`optional`) int or list of int + End of sequence token or list of tokens to stop the generation. Default to 0. + length_penalty: (`optional`) float + Exponential penalty to the length. Default to 1. + + num_return_sequences: (`optional`) int + The number of independently computed returned sequences for each element in the batch. Default to 1. + + Examples:: + + tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer + model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache. + outputs = model.generate(max_length=40, bos_token_id=tokenizer.bos_token_id, eos_token_ids=tokenizer.eos_token_id) # do greedy decoding without beam search + print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True))) + + tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer + model = AutoModelWithLMHead.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache. + input_context = 'The dog' + input_ids = torch.tensor(tokenizer.encode(input_context)).unsqueeze(0) # encode input context + outputs = model.generate(input_ids=input_ids, do_sample=True, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog' + for i in range(3): # 3 output sequences were generated + print('Generated {}: {}'.format(i, tokenizer.decode(outputs[0][i], skip_special_tokens=True))) + + tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer + model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache. + input_context = 'The dog' + input_ids = torch.tensor(tokenizer.encode(input_context)).unsqueeze(0) # encode input context + outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, bos_token_id=tokenizer.bos_token_id, eos_token_ids=tokenizer.eos_token_id, num_beams=3) # generate sequences using greedy beam search decoding (3 beams) + print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True))) + + tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer + model = AutoModelWithLMHead.from_pretrained('ctrl') # Download model and configuration from S3 and cache. + input_context = 'Legal My neighbor is' # "Legal" is one of the control codes for ctrl + input_ids = torch.tensor(tokenizer.encode(input_context)).unsqueeze(0) # encode input context + outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences using using greedy search + print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True))) + + """ + + # We cannot generate if the model does not have a LM head + if self.get_output_embeddings() is None: + raise AttributeError( + "You tried to generate sequences with a model that does not have a LM Head." + "Please use another model class (e.g. `OpenAIGPTLMHeadModel`, `XLNetLMHeadModel`, `GPT2LMHeadModel`, `CTRLLMHeadModel`, `T5WithLMHeadModel`, `TransfoXLLMHeadModel`)" + ) + + max_length = max_length if max_length is not None else self.config.max_length + do_sample = do_sample if do_sample is not None else self.config.do_sample + num_beams = num_beams if num_beams is not None else self.config.num_beams + temperature = temperature if temperature is not None else self.config.temperature + top_k = top_k if top_k is not None else self.config.top_k + top_p = top_p if top_p is not None else self.config.top_p + repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty + bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id + pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id + eos_token_ids = eos_token_ids if eos_token_ids is not None else self.config.eos_token_ids + length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty + num_return_sequences = ( + num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences + ) + + if input_ids is not None: + batch_size = input_ids.shape[0] # overriden by the input batch_size + else: + batch_size = 1 + if isinstance(eos_token_ids, int): + eos_token_ids = [eos_token_ids] + + assert isinstance(max_length, int) and max_length > 0, "`max_length` should be a strictely positive integer." + assert isinstance(do_sample, bool), "`do_sample` should be a boolean." + assert isinstance(num_beams, int) and num_beams > 0, "`num_beams` should be a strictely positive integer." + assert temperature > 0, "`temperature` should be strictely positive." + assert isinstance(top_k, int) and top_k >= 0, "`top_k` should be a positive integer." + assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1." + assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1." + assert isinstance(bos_token_id, int) and bos_token_id >= 0, "`bos_token_id` should be a positive integer." + assert isinstance(pad_token_id, int) and pad_token_id >= 0, "`pad_token_id` should be a positive integer." + assert isinstance(eos_token_ids, (list, tuple)) and ( + e >= 0 for e in eos_token_ids + ), "`eos_token_ids` should be a positive integer or a list/tuple of positive integers." + assert length_penalty > 0, "`length_penalty` should be strictely positive." + assert ( + isinstance(num_return_sequences, int) and num_return_sequences > 0 + ), "`num_return_sequences` should be a strictely positive integer." + + if input_ids is None: + input_ids = torch.full( + (batch_size, 1), bos_token_id, dtype=torch.long, device=next(self.parameters()).device + ) + else: + assert input_ids.dim() == 2, "Input prompt should be of shape (batch_size, sequence length)." + + # current position and vocab size + cur_len = input_ids.shape[1] + vocab_size = self.config.vocab_size + + if num_return_sequences != 1: + # Expand input to num return sequences + input_ids = input_ids.unsqueeze(1).expand(batch_size, num_return_sequences, cur_len) + input_ids = input_ids.contiguous().view( + batch_size * num_return_sequences, cur_len + ) # (batch_size * num_return_sequences, cur_len) + effective_batch_size = batch_size * num_return_sequences + else: + effective_batch_size = batch_size + + if num_beams > 1: + output = self._generate_beam_search( + input_ids, + cur_len, + max_length, + do_sample, + temperature, + top_k, + top_p, + repetition_penalty, + pad_token_id, + eos_token_ids, + effective_batch_size, + length_penalty, + num_beams, + vocab_size, + ) + else: + output = self._generate_no_beam_search( + input_ids, + cur_len, + max_length, + do_sample, + temperature, + top_k, + top_p, + repetition_penalty, + pad_token_id, + eos_token_ids, + effective_batch_size, + ) + + if num_return_sequences != 1: + for i in range(len(output)): + output[i] = output[i].view(batch_size, num_return_sequences, -1) + return output + + def _decode_step(self, input_ids, past): + model_inputs = self.prepare_inputs_for_generation(input_ids, past=past) + outputs = self(**model_inputs) # (batch_size * num_beams, cur_len, vocab_size) + token_len = outputs[0].shape[1] + if self.od_labels_len == 0: + next_token_idx = token_len - 1 + else: + if token_len == 2: + assert self._do_output_past(outputs) + next_token_idx = 1 + else: + next_token_idx = token_len - self.od_labels_len - 1 + + next_token_logits = outputs[0][:, next_token_idx, :] # (batch_size * num_beams, vocab_size) + assert outputs[0].shape[1] == model_inputs['input_ids'].shape[1] + + # if model has past, then set the past variable to speed up decoding + if self._do_output_past(outputs): + past = outputs[1] + return next_token_logits, past + + def _generate_no_beam_search( + self, + input_ids, + cur_len, + max_length, + do_sample, + temperature, + top_k, + top_p, + repetition_penalty, + pad_token_id, + eos_token_ids, + batch_size, + ): + """ Generate sequences for each example without beam search (num_beams == 1). + All returned sequence are generated independantly. + """ + assert self.num_keep_best == 1, 'cannot generate >1 sentences in greedy search' + # current position / max lengths / length of generated sentences / unfinished sentences + unfinished_sents = [] + cur_unfinished = input_ids.new(batch_size).fill_(1) + + # log of scores for each sentence in the batch + logprobs = [] + + past = None + + while cur_len < max_length: + model_inputs = self.prepare_inputs_for_generation(input_ids, past=past) + outputs = self(**model_inputs) + if cur_len == 1: + token_len = 2 + self.od_labels_len + next_token_idx = 1 + else: + assert cur_len > 1 + if not self._do_output_past(outputs): + token_len = cur_len + 1 + self.od_labels_len + next_token_idx = cur_len + else: + token_len = 2 + next_token_idx = 1 + + assert outputs[0].shape[1] == token_len + next_token_logits = outputs[0][:, next_token_idx, :] + + # if model has past, then set the past variable to speed up decoding + if self._do_output_past(outputs): + past = outputs[1] + + # repetition penalty from CTRL paper (https://arxiv.org/abs/1909.05858) + if repetition_penalty != 1.0: + for i in range(batch_size): + for previous_token in set(input_ids[i].tolist()): + # if score < 0 then repetition penalty has to multiplied to reduce the previous token probability + if next_token_logits[i, previous_token] < 0: + next_token_logits[i, previous_token] *= repetition_penalty + else: + next_token_logits[i, previous_token] /= repetition_penalty + + if do_sample: + # Temperature (higher temperature => more likely to sample low probability tokens) + if temperature != 1.0: + next_token_logits = next_token_logits / temperature + # Top-p/top-k filtering + next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p) + # Sample + next_token = torch.multinomial(F.softmax(next_token_logits, dim=-1), num_samples=1).squeeze(1) + else: + # Greedy decoding + next_token = torch.argmax(next_token_logits, dim=-1) + + # Compute scores + _scores = F.log_softmax(next_token_logits, dim=-1) # (batch_size, vocab_size) + _scores = torch.gather(_scores, -1, next_token.unsqueeze(-1)) # (batch_size, 1) + logprobs.append(_scores) # (batch_size, 1) + unfinished_sents.append(cur_unfinished) + + # update generations and finished sentences + tokens_to_add = next_token * cur_unfinished + pad_token_id * (1 - cur_unfinished) + input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1) + + #for t in input_ids: + #print(self.tokenizer.convert_ids_to_tokens(t.tolist())) + + for eos_token_id in eos_token_ids: + cur_unfinished = cur_unfinished.mul(tokens_to_add.ne(eos_token_id).long()) + cur_len = cur_len + 1 + + # stop when there is a in each sentence, or if we exceed the maximul length + if cur_unfinished.max() == 0: + break + + # add eos_token_ids to unfinished sentences + if cur_len == max_length: + input_ids[:, -1].masked_fill_(cur_unfinished.to(dtype=torch.bool), eos_token_ids[0]) + + logprobs = torch.cat(logprobs, dim=1) + unfinished_sents = torch.stack(unfinished_sents, dim=1).float() + sum_logprobs = (logprobs * unfinished_sents).sum(dim=1) + + # pad to the same length, otherwise DataParallel will give error + pad_len = max_length - input_ids.shape[1] + if pad_len > 0: + padding_ids = input_ids.new(batch_size, pad_len).fill_(pad_token_id) + input_ids = torch.cat([input_ids, padding_ids], dim=1) + + # (batch_size, n_best, max_len), (batch_size, n_best) + return input_ids.unsqueeze(1), sum_logprobs.unsqueeze(1) + + def _generate_beam_search( + self, + input_ids, + cur_len, + max_length, + do_sample, + temperature, + top_k, + top_p, + repetition_penalty, + pad_token_id, + eos_token_ids, + batch_size, + length_penalty, + num_beams, + vocab_size, + ): + """ Generate sequences for each example with beam search. + """ + # Expand input to num beams + input_ids = input_ids.unsqueeze(1).expand(batch_size, num_beams, cur_len) + input_ids = input_ids.contiguous().view(batch_size * num_beams, cur_len) # (batch_size * num_beams, cur_len) + + # generated hypotheses + num_keep_best = self.num_keep_best + generated_hyps = [ + BeamHypotheses(num_keep_best, max_length, length_penalty, early_stopping=False) for _ in range(batch_size) + ] + # NOTE: Expand >1 words to leave some spare tokens to keep the + # beam size, because some sentences may end here and cannot expand + # in the next level + TOPN_PER_BEAM = 2 + + # scores for each sentence in the beam + beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) + beam_scores[:, 1:] = -1e9 + beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,) + + # cache compute states + past = None + + # done sentences + done = [False for _ in range(batch_size)] + + while cur_len < max_length: + model_inputs = self.prepare_inputs_for_generation(input_ids, past=past) + outputs = self(**model_inputs) # (batch_size * num_beams, cur_len, vocab_size) + if cur_len == 1: + token_len = 2 + self.od_labels_len + next_token_idx = 1 + else: + assert cur_len > 1 + if not self._do_output_past(outputs): + token_len = cur_len + 1 + self.od_labels_len + next_token_idx = cur_len + else: + token_len = 2 + next_token_idx = 1 + + assert outputs[0].shape[1] == token_len + scores = outputs[0][:, next_token_idx, :] # (batch_size * num_beams, vocab_size) + assert outputs[0].shape[1] == model_inputs['input_ids'].shape[1] + + # if model has past, then set the past variable to speed up decoding + if self._do_output_past(outputs): + past = outputs[1] + + # repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858) + if repetition_penalty != 1.0: + for i in range(batch_size * num_beams): + for previous_token in set(input_ids[i].tolist()): + # if score < 0 then repetition penalty has to multiplied to reduce the previous token probability + if scores[i, previous_token] < 0: + scores[i, previous_token] *= repetition_penalty + else: + scores[i, previous_token] /= repetition_penalty + + if do_sample: + # Temperature (higher temperature => more likely to sample low probability tokens) + if temperature != 1.0: + scores = scores / temperature + # Top-p/top-k filtering + scores = top_k_top_p_filtering( + scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2 + ) # (batch_size * num_beams, vocab_size) + # Sample [TOPN_PER_BEAM] next words for each beam (so we have some spare tokens and match output of greedy beam search) + next_words = torch.multinomial(F.softmax(scores, dim=-1), + num_samples=TOPN_PER_BEAM) # (batch_size * num_beams, TOPN_PER_BEAM) + # Compute next scores + _scores = F.log_softmax(scores, dim=-1) # (batch_size * num_beams, vocab_size) + _scores = torch.gather(_scores, -1, next_words) # (batch_size * num_beams, TOPN_PER_BEAM) + next_scores = _scores + beam_scores[:, None].expand_as(_scores) # (batch_size * num_beams, TOPN_PER_BEAM) + # Match shape of greedy beam search + beam_indices = torch.arange(num_beams) * vocab_size + beam_indices = beam_indices.repeat(batch_size, TOPN_PER_BEAM).to(next_words.device) + next_words = next_words.view(batch_size, TOPN_PER_BEAM * num_beams) # (batch_size, TOPN_PER_BEAM * num_beams) + next_words = next_words + beam_indices + next_scores = next_scores.view(batch_size, TOPN_PER_BEAM * num_beams) # (batch_size, TOPN_PER_BEAM * num_beams) + else: + # do greedy beam search + scores = F.log_softmax(scores, dim=-1) # (batch_size * num_beams, vocab_size) + assert scores.size() == (batch_size * num_beams, vocab_size) + # Add the log prob of the new beams to the log prob of the beginning of the sequence (sum of logs == log of the product) + _scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size) + # re-organize to group the beam together (we are keeping top hypothesis accross beams) + _scores = _scores.view(batch_size, num_beams * vocab_size) # (batch_size, num_beams * vocab_size) + next_scores, next_words = torch.topk(_scores, TOPN_PER_BEAM * num_beams, dim=1, largest=True, sorted=True) + + assert next_scores.size() == next_words.size() == (batch_size, TOPN_PER_BEAM * num_beams) + + # next batch beam content + # list of (batch_size * num_beams) tuple(next hypothesis score, next word, current position in the batch) + next_batch_beam = [] + + # for each sentence + for batch_ex in range(batch_size): + + # if we are done with this sentence + done[batch_ex] = done[batch_ex] or generated_hyps[batch_ex].is_done(next_scores[batch_ex].max().item()) + if done[batch_ex]: + next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) # pad the batch + continue + + # next sentence beam content + next_sent_beam = [] + + # next words for this sentence + for idx, score in zip(next_words[batch_ex], next_scores[batch_ex]): + + # get beam and word IDs + beam_id = idx // vocab_size + word_id = idx % vocab_size + + # end of sentence, or next word + if word_id.item() in eos_token_ids or cur_len + 1 == max_length: + generated_hyps[batch_ex].add( + input_ids[batch_ex * num_beams + beam_id, :cur_len].clone(), score.item() + ) + else: + next_sent_beam.append((score, word_id, batch_ex * num_beams + beam_id)) + + # the beam for next step is full + if len(next_sent_beam) == num_beams: + break + + # update next beam content + if cur_len + 1 == max_length: + assert len(next_sent_beam) == 0 + else: + assert len(next_sent_beam) == num_beams + + if len(next_sent_beam) == 0: + next_sent_beam = [(0, pad_token_id, 0)] * num_beams # pad the batch + next_batch_beam.extend(next_sent_beam) + assert len(next_batch_beam) == num_beams * (batch_ex + 1) + + # sanity check / prepare next batch + assert len(next_batch_beam) == batch_size * num_beams + beam_scores = beam_scores.new([x[0] for x in next_batch_beam]) + beam_words = input_ids.new([x[1] for x in next_batch_beam]) + beam_idx = input_ids.new([x[2] for x in next_batch_beam]) + + # re-order batch + input_ids = input_ids[beam_idx, :] + input_ids = torch.cat([input_ids, beam_words.unsqueeze(1)], dim=-1) + + # re-order internal states + if past: + reordered_past = [] + for layer_past in past: + # get the correct batch idx from layer past batch dim + # batch dim of `past` and `mems` is at 1st position + reordered_layer_past = [layer_past[i].unsqueeze(0).clone().detach() for i in beam_idx] + reordered_layer_past = torch.cat(reordered_layer_past, dim=0) + # check that shape matches + assert reordered_layer_past.shape == layer_past.shape + reordered_past.append(reordered_layer_past) + past = tuple(reordered_past) + + # update current length + cur_len = cur_len + 1 + + # stop when we are done with each sentence + if all(done): + break + + # visualize hypotheses + # print([len(x) for x in generated_hyps], cur_len) + # globals().update( locals() ); + # !import code; code.interact(local=vars()) + # for ii in range(batch_size): + # for ss, ww in sorted(generated_hyps[ii].hyp, key=lambda x: x[0], reverse=True): + # print("%.3f " % ss + " ".join(self.dico[x] for x in ww.tolist())) + # print("") + + # select the best hypotheses + tgt_len = torch.ones(batch_size, num_keep_best, dtype=torch.long) + logprobs = torch.zeros(batch_size, num_keep_best, + dtype=torch.float).fill_(-1e5).to(input_ids.device) + all_best = [] + + for i, hypotheses in enumerate(generated_hyps): + best = [] + hyp_scores = torch.tensor([x[0] for x in hypotheses.hyp]) + _, best_indices = torch.topk(hyp_scores, + min(num_keep_best, len(hyp_scores)), largest=True) + for best_idx, hyp_idx in enumerate(best_indices): + conf, best_hyp = hypotheses.hyp[hyp_idx] + best.append(best_hyp) + logprobs[i, best_idx] = conf + tgt_len[i, best_idx] = len(best_hyp) + 1 # +1 for the symbol + + all_best.append(best) + + # generate target batch, pad to the same length + decoded = input_ids.new(batch_size, num_keep_best, max_length).fill_(pad_token_id) + for batch_idx, best in enumerate(all_best): + for best_idx, hypo in enumerate(best): + decoded[batch_idx, best_idx, : tgt_len[batch_idx, best_idx] - 1] = hypo + decoded[batch_idx, best_idx, tgt_len[batch_idx, best_idx] - 1] = eos_token_ids[0] + + return decoded, logprobs + + +def top_k_top_p_filtering(logits, top_k=0, top_p=1.0, filter_value=-float("Inf"), min_tokens_to_keep=1): + """ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering + Args: + logits: logits distribution shape (batch size, vocabulary size) + if top_k > 0: keep only top k tokens with highest probability (top-k filtering). + if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). + Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) + Make sure we keep at least min_tokens_to_keep per batch example in the output + From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 + """ + if top_k > 0: + top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check + # Remove all tokens with a probability less than the last token of the top-k + indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] + logits[indices_to_remove] = filter_value + + if top_p < 1.0: + sorted_logits, sorted_indices = torch.sort(logits, descending=True) + cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) + + # Remove tokens with cumulative probability above the threshold (token with 0 are kept) + sorted_indices_to_remove = cumulative_probs > top_p + if min_tokens_to_keep > 1: + # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below) + sorted_indices_to_remove[..., :min_tokens_to_keep] = 0 + # Shift the indices to the right to keep also the first token above the threshold + sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() + sorted_indices_to_remove[..., 0] = 0 + + # scatter sorted tensors to original indexing + indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove) + logits[indices_to_remove] = filter_value + return logits + + +class BeamHypotheses(object): + def __init__(self, n_hyp, max_length, length_penalty, early_stopping): + """ + Initialize n-best list of hypotheses. + """ + self.max_length = max_length - 1 # ignoring bos_token + self.length_penalty = length_penalty + self.early_stopping = early_stopping + self.n_hyp = n_hyp + self.hyp = [] + self.worst_score = 1e9 + + def __len__(self): + """ + Number of hypotheses in the list. + """ + return len(self.hyp) + + def add(self, hyp, sum_logprobs): + """ + Add a new hypothesis to the list. + """ + score = sum_logprobs / len(hyp) ** self.length_penalty + if len(self) < self.n_hyp or score > self.worst_score: + self.hyp.append((score, hyp)) + if len(self) > self.n_hyp: + sorted_scores = sorted([(s, idx) for idx, (s, _) in enumerate(self.hyp)]) + del self.hyp[sorted_scores[0][1]] + self.worst_score = sorted_scores[1][0] + else: + self.worst_score = min(score, self.worst_score) + + def is_done(self, best_sum_logprobs): + """ + If there are enough hypotheses and that none of the hypotheses being generated + can become better than the worst one in the heap, then we are done with this sentence. + """ + if len(self) < self.n_hyp: + return False + elif self.early_stopping: + return True + else: + return self.worst_score >= best_sum_logprobs / self.max_length ** self.length_penalty + + + + diff --git a/oscar/run_captioning.py b/oscar/run_captioning.py new file mode 100644 index 0000000..2671be1 --- /dev/null +++ b/oscar/run_captioning.py @@ -0,0 +1,882 @@ +# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license. + +from __future__ import absolute_import, division, print_function +import argparse +import base64 +import os.path as op +import random, time, json +import numpy as np +import torch +from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler +from tqdm import tqdm + +from oscar.utils.logger import setup_logger +from oscar.utils.tsv_file import TSVFile +from oscar.utils.tsv_file_ops import tsv_writer +from oscar.utils.misc import (mkdir, set_seed, + load_from_yaml_file, find_file_path_in_yaml) +from oscar.utils.caption_evaluate import (evaluate_on_coco_caption, + evaluate_on_nocaps, ScstRewardCriterion) +from oscar.utils.cbs import ConstraintFilter, ConstraintBoxesReader +from oscar.utils.cbs import FiniteStateMachineBuilder +from oscar.modeling.modeling_bert import BertForImageCaptioning +from transformers.pytorch_transformers import BertTokenizer, BertConfig +from transformers.pytorch_transformers import AdamW, WarmupLinearSchedule, WarmupConstantSchedule + + +class CaptionTSVDataset(Dataset): + def __init__(self, yaml_file, tokenizer=None, add_od_labels=True, + max_img_seq_length=50, max_seq_length=70, max_seq_a_length=40, + is_train=True, mask_prob=0.15, max_masked_tokens=3, **kwargs): + """Constructor. + Args: + yaml file with all required data (image feature, caption, labels, etc) + tokenizer: tokenizer for text processing. + add_od_labels: whether to add labels from yaml file to BERT. + max_img_seq_length: max image sequence length. + max_seq_length: max text sequence length. + max_seq_a_length: max caption sequence length. + is_train: train or test mode. + mask_prob: probability to mask a input token. + max_masked_tokens: maximum number of tokens to be masked in one sentence. + kwargs: other arguments. + """ + self.yaml_file = yaml_file + self.cfg = load_from_yaml_file(yaml_file) + self.root = op.dirname(yaml_file) + self.label_file = find_file_path_in_yaml(self.cfg['label'], self.root) + self.feat_file = find_file_path_in_yaml(self.cfg['feature'], self.root) + self.caption_file = find_file_path_in_yaml(self.cfg.get('caption'), self.root) + + assert op.isfile(self.feat_file) + if add_od_labels: assert op.isfile(self.label_file) + if is_train: assert op.isfile(self.caption_file) and tokenizer is not None + + self.label_tsv = None if not self.label_file else TSVFile(self.label_file) + self.feat_tsv = TSVFile(self.feat_file) + if self.caption_file and op.isfile(self.caption_file): + with open(self.caption_file, 'r') as f: + self.captions = json.load(f) + + self.tokenizer = tokenizer + self.tensorizer = CaptionTensorizer(self.tokenizer, max_img_seq_length, + max_seq_length, max_seq_a_length, mask_prob, max_masked_tokens, + is_train=is_train) + self.add_od_labels = add_od_labels + self.is_train = is_train + self.kwargs = kwargs + self.image_keys = self.prepare_image_keys() + self.key2index = self.prepare_image_key_to_index() + self.key2captions = self.prepare_image_key_to_captions() + + def get_valid_tsv(self): + # based on the order of file size + if self.label_tsv: + return self.label_tsv + if self.feat_tsv: + return self.feat_tsv + + def prepare_image_keys(self): + tsv = self.get_valid_tsv() + return [tsv.seek(i)[0] for i in range(tsv.num_rows())] + + def prepare_image_key_to_index(self): + tsv = self.get_valid_tsv() + return {tsv.seek(i)[0] : i for i in range(tsv.num_rows())} + + def prepare_image_key_to_captions(self): + if self.is_train: + key2captions = {key: [] for key in self.image_keys} + for cap in self.captions: + key2captions[cap['image_id']].append(cap['caption']) + return key2captions + + def get_image_index(self, idx): + if self.is_train: + img_cap_pair = self.captions[idx] + img_key = img_cap_pair['image_id'] + return self.key2index[img_key] + return idx + + def get_image_key(self, idx): + img_idx = self.get_image_index(idx) + return self.image_keys[img_idx] + + def get_image_features(self, img_idx): + feat_info = json.loads(self.feat_tsv.seek(img_idx)[1]) + num_boxes = feat_info['num_boxes'] + features = np.frombuffer(base64.b64decode(feat_info['features']), np.float32 + ).reshape((num_boxes, -1)) + return torch.Tensor(features) + + def get_caption(self, idx): + if self.is_train: + img_cap_pair = self.captions[idx] + return img_cap_pair['caption'] + return "" + + def get_od_labels(self, img_idx): + od_labels = None + if self.add_od_labels: + label_info = json.loads(self.label_tsv.seek(img_idx)[1]) + od_labels = " ".join([l['class'] for l in label_info]) + return od_labels + + def get_caption_file_in_coco_format(self): + cap_file = op.splitext(self.caption_file)[0] + '_coco_format.json' + return cap_file + + def get_captions_by_key(self, key): + assert self.is_train, "cannot get captions for inference" + return self.key2captions[key] + + def __getitem__(self, idx): + img_idx = self.get_image_index(idx) + img_key = self.image_keys[img_idx] + features = self.get_image_features(img_idx) + caption = self.get_caption(idx) + od_labels = self.get_od_labels(img_idx) + example = self.tensorizer.tensorize_example(caption, features, text_b=od_labels) + return img_key, example + + def __len__(self): + if self.is_train: + return len(self.captions) + return self.get_valid_tsv().num_rows() + + +class CaptionTSVDatasetWithConstraints(CaptionTSVDataset): + r""" + Providing inputs for inference with Constraint Beam Search + + nms_threshold: float, optional (default = 0.85) + NMS threshold for suppressing generic object class names during constraint filtering, + for two boxes with IoU higher than this threshold, "dog" suppresses "animal". + max_given_constraints: int, optional (default = 3) + Maximum number of constraints which can be specified for CBS decoding. Constraints are + selected based on the prediction confidence score of their corresponding bounding boxes. + """ + + def __init__( + self, yaml_file, + nms_threshold=0.85, + max_given_constraints=3, **kwargs + ): + super().__init__(yaml_file, **kwargs) + boxes_tsvpath = find_file_path_in_yaml(self.cfg['cbs_box'], self.root) + constraint2tokens_tsvpath = find_file_path_in_yaml(self.cfg['cbs_constraint'], self.root) + tokenforms_tsvpath = find_file_path_in_yaml(self.cfg['cbs_tokenforms'], self.root) + hierarchy_jsonpath = find_file_path_in_yaml(self.cfg['cbs_hierarchy'], self.root) + + self._boxes_reader = ConstraintBoxesReader(boxes_tsvpath) + self._constraint_filter = ConstraintFilter( + hierarchy_jsonpath, nms_threshold, max_given_constraints + ) + self._fsm_builder = FiniteStateMachineBuilder(self.tokenizer, + constraint2tokens_tsvpath, tokenforms_tsvpath, + max_given_constraints) + + def __getitem__(self, index): + img_key, example = super().__getitem__(index) + + # Apply constraint filtering to object class names. + constraint_boxes = self._boxes_reader[img_key] + + candidates = self._constraint_filter( + constraint_boxes["boxes"], constraint_boxes["class_names"], constraint_boxes["scores"] + ) + num_constraints = len(candidates) + fsm, nstates = self._fsm_builder.build(candidates) + + return img_key, example + (fsm, num_constraints, ) + + +class CaptionTensorizer(object): + def __init__(self, tokenizer, max_img_seq_length=50, max_seq_length=70, + max_seq_a_length=40, mask_prob=0.15, max_masked_tokens=3, + is_train=True): + """Constructor. + Args: + tokenizer: tokenizer for text processing. + max_img_seq_length: max image sequence length. + max_seq_length: max text sequence length. + max_seq_a_length: max caption sequence length. + is_train: train or test mode. + mask_prob: probability to mask a input token. + max_masked_tokens: maximum number of tokens to be masked in one sentence. + """ + self.tokenizer = tokenizer + self.is_train = is_train + self.max_img_seq_len = max_img_seq_length + self.max_seq_len = max_seq_length + self.max_seq_a_len = max_seq_a_length + self.mask_prob = mask_prob + self.max_masked_tokens = max_masked_tokens + self._triangle_mask = torch.tril(torch.ones((self.max_seq_len, + self.max_seq_len), dtype=torch.long)) + + def tensorize_example(self, text_a, img_feat, text_b=None, + cls_token_segment_id=0, pad_token_segment_id=0, + sequence_a_segment_id=0, sequence_b_segment_id=1): + if self.is_train: + tokens_a = self.tokenizer.tokenize(text_a) + else: + # fake tokens to generate masks + tokens_a = [self.tokenizer.mask_token] * (self.max_seq_a_len - 2) + if len(tokens_a) > self.max_seq_a_len - 2: + tokens_a = tokens_a[:(self.max_seq_a_len - 2)] + + tokens = [self.tokenizer.cls_token] + tokens_a + [self.tokenizer.sep_token] + segment_ids = [cls_token_segment_id] + [sequence_a_segment_id] * (len(tokens) - 1) + seq_a_len = len(tokens) + if text_b: + # pad text_a to keep it in fixed length for better inference. + padding_a_len = self.max_seq_a_len - seq_a_len + tokens += [self.tokenizer.pad_token] * padding_a_len + segment_ids += ([pad_token_segment_id] * padding_a_len) + + tokens_b = self.tokenizer.tokenize(text_b) + if len(tokens_b) > self.max_seq_len - len(tokens) - 1: + tokens_b = tokens_b[: (self.max_seq_len - len(tokens) - 1)] + tokens += tokens_b + [self.tokenizer.sep_token] + segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1) + + seq_len = len(tokens) + if self.is_train: + masked_pos = torch.zeros(self.max_seq_len, dtype=torch.int) + # randomly mask words for prediction, ignore [CLS] + candidate_masked_idx = list(range(1, seq_a_len)) # only mask text_a + random.shuffle(candidate_masked_idx) + num_masked = min(max(round(self.mask_prob * seq_a_len), 1), self.max_masked_tokens) + num_masked = int(num_masked) + masked_idx = candidate_masked_idx[:num_masked] + masked_idx = sorted(masked_idx) + masked_token = [tokens[i] for i in masked_idx] + for pos in masked_idx: + if random.random() <= 0.8: + # 80% chance to be a ['MASK'] token + tokens[pos] = self.tokenizer.mask_token + elif random.random() <= 0.5: + # 10% chance to be a random word ((1-0.8)*0.5) + from random import randint + i = randint(0, len(self.tokenizer.vocab)) + self.tokenizer._convert_id_to_token(i) + tokens[pos] = self.tokenizer._convert_id_to_token(i) + else: + # 10% chance to remain the same (1-0.8-0.1) + pass + + masked_pos[masked_idx] = 1 + # pad masked tokens to the same length + if num_masked < self.max_masked_tokens: + masked_token = masked_token + ([self.tokenizer.pad_token] * + (self.max_masked_tokens - num_masked)) + masked_ids = self.tokenizer.convert_tokens_to_ids(masked_token) + else: + masked_pos = torch.ones(self.max_seq_len, dtype=torch.int) + + # pad on the right for image captioning + padding_len = self.max_seq_len - seq_len + tokens = tokens + ([self.tokenizer.pad_token] * padding_len) + segment_ids += ([pad_token_segment_id] * padding_len) + input_ids = self.tokenizer.convert_tokens_to_ids(tokens) + + # image features + img_len = img_feat.shape[0] + if img_len > self.max_img_seq_len: + img_feat = img_feat[0 : self.max_img_seq_len, ] + img_len = img_feat.shape[0] + else: + padding_matrix = torch.zeros((self.max_img_seq_len - img_len, + img_feat.shape[1])) + img_feat = torch.cat((img_feat, padding_matrix), 0) + + # prepare attention mask: + # note that there is no attention from caption to image + # because otherwise it will violate the triangle attention + # for caption as caption will have full attention on image. + max_len = self.max_seq_len + self.max_img_seq_len + attention_mask = torch.zeros((max_len, max_len), dtype=torch.long) + # C: caption, L: label, R: image region + c_start, c_end = 0, seq_a_len + l_start, l_end = self.max_seq_a_len, seq_len + r_start, r_end = self.max_seq_len, self.max_seq_len + img_len + # triangle mask for caption to caption + attention_mask[c_start : c_end, c_start : c_end].copy_(self._triangle_mask[0 : seq_a_len, 0 : seq_a_len]) + # full attention for L-L, R-R + attention_mask[l_start : l_end, l_start : l_end] = 1 + attention_mask[r_start : r_end, r_start : r_end] = 1 + # full attention for C-L, C-R + attention_mask[c_start : c_end, l_start : l_end] = 1 + attention_mask[c_start : c_end, r_start : r_end] = 1 + # full attention for L-R: + attention_mask[l_start : l_end, r_start : r_end] = 1 + attention_mask[r_start : r_end, l_start : l_end] = 1 + + input_ids = torch.tensor(input_ids, dtype=torch.long) + segment_ids = torch.tensor(segment_ids, dtype=torch.long) + + if self.is_train: + masked_ids = torch.tensor(masked_ids, dtype=torch.long) + return (input_ids, attention_mask, segment_ids, img_feat, masked_pos, masked_ids) + return (input_ids, attention_mask, segment_ids, img_feat, masked_pos) + + +def build_dataset(yaml_file, tokenizer, args, is_train=True): + if not op.isfile(yaml_file): + yaml_file = op.join(args.data_dir, yaml_file) + assert op.isfile(yaml_file) + + if is_train: + return CaptionTSVDataset(yaml_file, tokenizer=tokenizer, + add_od_labels=args.add_od_labels, max_img_seq_length=args.max_img_seq_length, + max_seq_length=args.max_seq_length, max_seq_a_length=args.max_seq_a_length, + is_train=True, mask_prob=args.mask_prob, max_masked_tokens=args.max_masked_tokens) + if args.use_cbs: + dataset_class = CaptionTSVDatasetWithConstraints + else: + dataset_class = CaptionTSVDataset + return dataset_class(yaml_file, tokenizer=tokenizer, + add_od_labels=args.add_od_labels, max_img_seq_length=args.max_img_seq_length, + max_seq_length=args.max_seq_length, max_seq_a_length=args.max_gen_length, + is_train=False) + + +def save_checkpoint(model, tokenizer, args, epoch, global_step): + checkpoint_dir = op.join(args.output_dir, 'checkpoint-{}-{}'.format( + epoch, global_step)) + mkdir(checkpoint_dir) + model_to_save = model.module if hasattr(model, 'module') else model + save_num = 0 + while (save_num < 10): + try: + model_to_save.save_pretrained(checkpoint_dir) + torch.save(args, op.join(checkpoint_dir, 'training_args.bin')) + tokenizer.save_pretrained(checkpoint_dir) + logger.info("Save checkpoint to {}".format(checkpoint_dir)) + break + except: + save_num += 1 + if save_num == 10: + logger.info("Failed to save checkpoint after 10 trails.") + return checkpoint_dir + + +def compute_score_with_logits(logits, labels): + logits = torch.max(logits, -1)[1].data # argmax + scores = logits == labels + return scores + + +def train(args, train_dataset, val_dataset, model, tokenizer): + args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) + train_sampler = RandomSampler(train_dataset) + train_dataloader = DataLoader(train_dataset, sampler=train_sampler, + batch_size=args.train_batch_size, num_workers=args.num_workers) + + if args.max_steps > 0: + t_total = args.max_steps + args.num_train_epochs = args.max_steps // (len(train_dataloader) // \ + args.gradient_accumulation_steps) + 1 + else: + t_total = len(train_dataloader) // args.gradient_accumulation_steps \ + * args.num_train_epochs + + # Prepare optimizer and scheduler + no_decay = ['bias', 'LayerNorm.weight'] + grouped_parameters = [ + {'params': [p for n, p in model.named_parameters() if not \ + any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, + {'params': [p for n, p in model.named_parameters() if \ + any(nd in n for nd in no_decay)], 'weight_decay': 0.0} + ] + optimizer = AdamW(grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) + if args.scheduler == "constant": + scheduler = WarmupConstantSchedule( + optimizer, warmup_steps=args.warmup_steps) + elif args.scheduler == "linear": + scheduler = WarmupLinearSchedule( + optimizer, warmup_steps=args.warmup_steps, t_total=t_total) + else: + raise ValueError("Unknown scheduler type: {}".format(args.scheduler)) + + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + logger.info("***** Running training *****") + logger.info(" Num examples = %d", len(train_dataset)) + logger.info(" Num Epochs = %d", args.num_train_epochs) + logger.info(" Batch size per GPU = %d", args.per_gpu_train_batch_size) + logger.info(" Total train batch size (w. parallel, & accumulation) = %d", + args.train_batch_size * args.gradient_accumulation_steps) + logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) + logger.info(" Total optimization steps = %d", t_total) + + if args.scst: + scst_criterion = ScstRewardCriterion() + logger.info(" SCST training...") + + global_step, global_loss, global_acc =0, 0.0, 0.0 + model.zero_grad() + eval_log = [] + best_score = 0 + for epoch in range(int(args.num_train_epochs)): + for step, (img_keys, batch) in enumerate(train_dataloader): + batch = tuple(t.to(args.device) for t in batch) + + if not args.scst: + model.train() + inputs = {'input_ids': batch[0], 'attention_mask': batch[1], + 'token_type_ids': batch[2], 'img_feats': batch[3], + 'masked_pos': batch[4], 'masked_ids': batch[5] + } + outputs = model(**inputs) + loss, logits = outputs[:2] + masked_ids = inputs['masked_ids'] + masked_ids = masked_ids[masked_ids != 0] + batch_score = compute_score_with_logits(logits, masked_ids) + batch_acc = torch.sum(batch_score.float()) / torch.sum(inputs['masked_pos']) + else: + loss = scst_train_iter(args, train_dataset, model, scst_criterion, img_keys, batch, tokenizer) + batch_acc = scst_criterion.get_score() + + if args.n_gpu > 1: + loss = loss.mean() # mean() to average on multi-gpu parallel training + if args.gradient_accumulation_steps > 1: + loss = loss / args.gradient_accumulation_steps + loss.backward() + torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) + global_loss += loss.item() + global_acc += batch_acc + if (step + 1) % args.gradient_accumulation_steps == 0: + global_step += 1 + scheduler.step() + optimizer.step() + model.zero_grad() + if global_step % args.logging_steps == 0: + logger.info("Epoch: {}, global_step: {}, lr: {:.6f}, loss: {:.4f} ({:.4f}), " \ + "score: {:.4f} ({:.4f})".format(epoch, global_step, + optimizer.param_groups[0]["lr"], loss, global_loss / global_step, + batch_acc, global_acc / global_step) + ) + + if (args.save_steps > 0 and global_step % args.save_steps == 0) or \ + global_step == t_total: + checkpoint_dir = save_checkpoint(model, tokenizer, args, epoch, global_step) + # evaluation + if args.evaluate_during_training: + logger.info("Perform evaluation at step: %d" % (global_step)) + evaluate_file = evaluate(args, val_dataset, model, tokenizer, + checkpoint_dir) + with open(evaluate_file, 'r') as f: + res = json.load(f) + best_score = max(best_score, res['CIDEr']) + res['epoch'] = epoch + res['global_step'] = step + res['best_CIDEr'] = best_score + eval_log.append(res) + with open(args.output_dir + '/eval_logs.json', 'w') as f: + json.dump(eval_log, f) + return global_step, global_loss / global_step + + +def scst_train_iter(args, train_dataset, model, scst_criterion, img_keys, batch, tokenizer): + cls_token_id, sep_token_id, pad_token_id, mask_token_id = tokenizer.convert_tokens_to_ids( + [tokenizer.cls_token, tokenizer.sep_token, tokenizer.pad_token, + tokenizer.mask_token] + ) + inputs = {'is_decode': True, + 'input_ids': batch[0], 'attention_mask': batch[1], + 'token_type_ids': batch[2], 'img_feats': batch[3], + 'masked_pos': batch[4], + 'do_sample': False, + 'bos_token_id': cls_token_id, + 'pad_token_id': pad_token_id, + 'eos_token_ids': [sep_token_id, pad_token_id], + 'mask_token_id': mask_token_id, + # for adding od labels + 'add_od_labels': args.add_od_labels, 'od_labels_start_posid': args.max_seq_a_length, + + # hyperparameters of beam search + 'max_length': args.max_seq_a_length, + 'num_beams': 1, + "temperature": args.temperature, + "top_k": args.top_k, + "top_p": args.top_p, + "repetition_penalty": args.repetition_penalty, + "length_penalty": args.length_penalty, + "num_return_sequences": 1, + "num_keep_best": 1, + } + + model.eval() + with torch.no_grad(): + greedy_res_raw, _ = model(**inputs) + greedy_res_raw.squeeze_(1) # batch_size * max_len + + model.train() + inputs['do_sample'] = True + sample_res_raw, sample_logprobs = model(**inputs) + sample_res_raw.squeeze_(1) + sample_logprobs.squeeze_(1) + assert sample_logprobs.requires_grad == True + assert sample_res_raw.requires_grad == False + + def _ids_to_captions(all_ids): + captions = [] + for ids in all_ids: + c = tokenizer.decode(ids.tolist(), skip_special_tokens=True) + captions.append(c) + return captions + + greedy_res = _ids_to_captions(greedy_res_raw) + sample_res = _ids_to_captions(sample_res_raw) + gt_res = [train_dataset.get_captions_by_key(k) for k in img_keys] + + loss = scst_criterion(gt_res, greedy_res, sample_res, sample_logprobs) + return loss + + +def get_predict_file(output_dir, yaml_file, args): + cc = ['pred'] + # make sure it works with/without / in end of the path. + data = op.basename(op.join(args.data_dir, '')[:-1]) + split = op.basename(yaml_file) + assert split.endswith('.yaml') + split = split[:-5] + cc.append(data) + cc.append(split) + cc.append('beam{}'.format(args.num_beams)) + cc.append('max{}'.format(args.max_gen_length)) + if args.add_od_labels: + cc.append('odlabels') + if args.num_keep_best != 1: + cc.append('best{}'.format(args.num_keep_best)) + if args.use_cbs: + cc.append('cbs{}'.format(args.min_constraints_to_satisfy)) + if args.output_hidden_states: + cc.append('hidden') + return op.join(output_dir, '{}.tsv'.format('.'.join(cc))) + + +def get_evaluate_file(predict_file): + assert predict_file.endswith('.tsv') + fpath = op.splitext(predict_file)[0] + return fpath + '.eval.json' + + +def get_evaluate_method(predict_file): + if 'nocaps' in op.basename(predict_file): + return 'nocaps' + else: + return 'coco' + + +def evaluate(args, val_dataset, model, tokenizer, output_dir): + assert op.isdir(output_dir) + predict_file = get_predict_file(output_dir, val_dataset.yaml_file, args) + if op.isfile(predict_file): + logger.info('Skip predict. {} already exists'.format(predict_file)) + else: + test(args, val_dataset, model, tokenizer, predict_file) + + evaluate_file = get_evaluate_file(predict_file) + if op.isfile(evaluate_file): + logger.info('Skip evaluation. {} already exists'.format(evaluate_file)) + return evaluate_file + + eval_method = get_evaluate_method(predict_file) + if eval_method == 'coco': + gt_file = val_dataset.get_caption_file_in_coco_format() + result = evaluate_on_coco_caption(predict_file, gt_file, outfile=evaluate_file) + else: + split = 'val' if 'val' in op.basename(val_dataset.yaml_file) else 'test' + result = evaluate_on_nocaps(split, predict_file, + data_dir=args.data_dir, evaluate_file=evaluate_file) + logger.info("evaluation result: {}".format(str(result))) + return evaluate_file + + +def test(args, test_dataset, model, tokenizer, predict_file): + args.test_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + test_sampler = SequentialSampler(test_dataset) + cache_file = predict_file + + test_dataloader = DataLoader(test_dataset, sampler=test_sampler, + batch_size=args.test_batch_size, num_workers=args.num_workers) + + cls_token_id, sep_token_id, pad_token_id, mask_token_id, period_token_id = \ + tokenizer.convert_tokens_to_ids( [tokenizer.cls_token, + tokenizer.sep_token, tokenizer.pad_token, tokenizer.mask_token, '.'] + ) + model.eval() + + def gen_rows(): + time_meter = 0 + # restore existing results for long running inference tasks + exist_key2pred = {} + tmp_file = cache_file + '.tmp.copy' + if op.isfile(tmp_file): + with open(tmp_file, 'r') as fp: + for line in fp: + parts = line.strip().split('\t') + if len(parts) == 2: + exist_key2pred[parts[0]] = parts[1] + + with torch.no_grad(): + for step, (img_keys, batch) in tqdm(enumerate(test_dataloader)): + is_exist = True + for k in img_keys: + if k not in exist_key2pred: + is_exist = False + break + if is_exist: + for k in img_keys: + yield k, exist_key2pred[k] + continue + batch = tuple(t.to(args.device) for t in batch) + inputs = {'is_decode': True, + 'input_ids': batch[0], 'attention_mask': batch[1], + 'token_type_ids': batch[2], 'img_feats': batch[3], + 'masked_pos': batch[4], + 'do_sample': False, + 'bos_token_id': cls_token_id, + 'pad_token_id': pad_token_id, + 'eos_token_ids': [sep_token_id, pad_token_id], + 'mask_token_id': mask_token_id, + # for adding od labels + 'add_od_labels': args.add_od_labels, 'od_labels_start_posid': args.max_seq_a_length, + + # hyperparameters of beam search + 'max_length': args.max_gen_length, + 'num_beams': args.num_beams, + "temperature": args.temperature, + "top_k": args.top_k, + "top_p": args.top_p, + "repetition_penalty": args.repetition_penalty, + "length_penalty": args.length_penalty, + "num_return_sequences": args.num_return_sequences, + "num_keep_best": args.num_keep_best, + } + if args.use_cbs: + inputs.update({'use_cbs': True, + 'fsm': batch[5], + 'num_constraints': batch[6], + 'min_constraints_to_satisfy': args.min_constraints_to_satisfy, + }) + tic = time.time() + # captions, logprobs + outputs = model(**inputs) + time_meter += time.time() - tic + all_caps = outputs[0] # batch_size * num_keep_best * max_len + all_confs = torch.exp(outputs[1]) + + for img_key, caps, confs in zip(img_keys, all_caps, all_confs): + res = [] + for cap, conf in zip(caps, confs): + cap = tokenizer.decode(cap.tolist(), skip_special_tokens=True) + res.append({'caption': cap, 'conf': conf.item()}) + if isinstance(img_key, torch.Tensor): + img_key = img_key.item() + yield img_key, json.dumps(res) + + logger.info("Inference model computing time: {} seconds per batch".format(time_meter / (step+1))) + + tsv_writer(gen_rows(), cache_file) + return predict_file + + +def restore_training_settings(args): + assert not args.do_train + assert args.do_test or args.do_eval + # restore training settings, check hasattr for backward compatibility + train_args = torch.load(op.join(args.eval_model_dir, 'training_args.bin')) + if hasattr(train_args, 'max_seq_a_length'): + max_od_labels_len = train_args.max_seq_length - train_args.max_seq_a_length + max_seq_length = args.max_gen_length + max_od_labels_len + args.max_seq_length = max_seq_length + logger.warning('Override max_seq_length to {} = max_gen_length:{} + od_labels_len:{}'.format( + max_seq_length, args.max_gen_length, max_od_labels_len)) + + override_params = ['max_seq_a_length', 'do_lower_case', 'add_od_labels', + 'max_img_seq_length', 'img_feature_dim', + 'img_feature_type'] + for param in override_params: + if hasattr(train_args, param): + train_v = getattr(train_args, param) + test_v = getattr(args, param) + if train_v != test_v: + logger.warning('Override {} with train args: {} -> {}'.format(param, + test_v, train_v)) + setattr(args, param, train_v) + return args + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--data_dir", default='datasets/coco_caption', type=str, required=False, + help="The input data dir with all required files.") + parser.add_argument("--train_yaml", default='train.yaml', type=str, required=False, + help="yaml file for training.") + parser.add_argument("--test_yaml", default='test.yaml', type=str, required=False, + help="yaml file for testing.") + parser.add_argument("--val_yaml", default='val.yaml', type=str, required=False, + help="yaml file used for validation during training.") + parser.add_argument("--model_name_or_path", default=None, type=str, required=False, + help="Path to pre-trained model or model type.") + parser.add_argument("--output_dir", default='output/', type=str, required=False, + help="The output directory to save checkpoint and test results.") + parser.add_argument("--loss_type", default='sfmx', type=str, + help="Loss function types: support kl, x2, sfmx") + parser.add_argument("--config_name", default="", type=str, + help="Pretrained config name or path if not the same as model_name.") + parser.add_argument("--tokenizer_name", default="", type=str, + help="Pretrained tokenizer name or path if not the same as model_name.") + parser.add_argument("--max_seq_length", default=70, type=int, + help="The maximum total input sequence length after tokenization. " + "Sequences longer than this will be truncated, " + "sequences shorter will be padded.") + parser.add_argument("--max_seq_a_length", default=40, type=int, + help="The maximum sequence length for caption.") + parser.add_argument("--do_train", action='store_true', help="Whether to run training.") + parser.add_argument("--do_test", action='store_true', help="Whether to run inference.") + parser.add_argument("--do_eval", action='store_true', help="Whether to run evaluation.") + parser.add_argument("--do_lower_case", action='store_true', + help="Set this flag if you are using an uncased model.") + parser.add_argument("--mask_prob", default=0.15, type=float, + help= "Probability to mask input sentence during training.") + parser.add_argument("--max_masked_tokens", type=int, default=3, + help="The max number of masked tokens per sentence.") + parser.add_argument("--add_od_labels", default=False, action='store_true', + help="Whether to add object detection labels or not") + parser.add_argument("--drop_out", default=0.1, type=float, help="Drop out in BERT.") + parser.add_argument("--max_img_seq_length", default=50, type=int, + help="The maximum total input image sequence length.") + parser.add_argument("--img_feature_dim", default=2054, type=int, + help="The Image Feature Dimension.") + parser.add_argument("--img_feature_type", default='frcnn', type=str, + help="Image feature type.") + parser.add_argument("--per_gpu_train_batch_size", default=64, type=int, + help="Batch size per GPU/CPU for training.") + parser.add_argument("--per_gpu_eval_batch_size", default=64, type=int, + help="Batch size per GPU/CPU for evaluation.") + parser.add_argument("--output_mode", default='classification', type=str, + help="output mode, support classification or regression.") + parser.add_argument("--num_labels", default=2, type=int, + help="num_labels is 2 for classification and 1 for regression.") + parser.add_argument('--gradient_accumulation_steps', type=int, default=1, + help="Number of updates steps to accumulate before backward.") + parser.add_argument("--learning_rate", default=3e-5, type=float, help="The initial lr.") + parser.add_argument("--weight_decay", default=0.05, type=float, help="Weight deay.") + parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam.") + parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") + parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup.") + parser.add_argument("--scheduler", default='linear', type=str, help="constant or linear or") + parser.add_argument("--num_workers", default=4, type=int, help="Workers in dataloader.") + parser.add_argument("--num_train_epochs", default=40, type=int, + help="Total number of training epochs to perform.") + parser.add_argument("--max_steps", default=-1, type=int, + help="Total number of training steps. Override num_train_epochs.") + parser.add_argument('--logging_steps', type=int, default=20, help="Log every X steps.") + parser.add_argument('--save_steps', type=int, default=-1, + help="Save checkpoint every X steps. Will also perform evaluatin.") + parser.add_argument("--evaluate_during_training", action='store_true', + help="Run evaluation during training at each save_steps.") + parser.add_argument("--no_cuda", action='store_true', help="Avoid using CUDA.") + parser.add_argument('--seed', type=int, default=88, help="random seed for initialization.") + parser.add_argument('--scst', action='store_true', help='Self-critical sequence training') + # for generation + parser.add_argument("--eval_model_dir", type=str, default='', + help="Model directory for evaluation.") + parser.add_argument('--max_gen_length', type=int, default=20, + help="max length of generated sentences") + parser.add_argument('--output_hidden_states', action='store_true', + help="Turn on for fast decoding") + parser.add_argument('--num_return_sequences', type=int, default=1, + help="repeating times per image") + parser.add_argument('--num_beams', type=int, default=5, help="beam search width") + parser.add_argument('--num_keep_best', type=int, default=1, + help="number of hypotheses to keep in beam search") + parser.add_argument('--temperature', type=float, default=1, + help="temperature in softmax for sampling") + parser.add_argument('--top_k', type=int, default=0, + help="filter distribution for sampling") + parser.add_argument('--top_p', type=float, default=1, + help="filter distribution for sampling") + parser.add_argument('--repetition_penalty', type=int, default=1, + help="repetition penalty from CTRL paper (https://arxiv.org/abs/1909.05858)") + parser.add_argument('--length_penalty', type=int, default=1, + help="beam search length penalty") + # for Constrained Beam Search + parser.add_argument('--use_cbs', action='store_true', + help='Use constrained beam search for decoding') + parser.add_argument('--min_constraints_to_satisfy', type=int, default=2, + help="minimum number of constraints to satisfy") + args = parser.parse_args() + + global logger + + args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") + args.n_gpu = torch.cuda.device_count() + + output_dir = args.output_dir + mkdir(output_dir) + + logger = setup_logger("vlpretrain", output_dir, 0) + logger.warning("Device: %s, n_gpu: %s", args.device, args.n_gpu) + set_seed(args.seed, args.n_gpu) + + # Load pretrained model and tokenizer + config_class, model_class, tokenizer_class = BertConfig, BertForImageCaptioning, BertTokenizer + if args.do_train: + assert args.model_name_or_path is not None + config = config_class.from_pretrained(args.config_name if args.config_name else \ + args.model_name_or_path, num_labels=args.num_labels, finetuning_task='image_captioning') + if args.scst: + # avoid using too much memory + config.output_hidden_states = True + tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name \ + else args.model_name_or_path, do_lower_case=args.do_lower_case) + config.img_feature_dim = args.img_feature_dim + config.img_feature_type = args.img_feature_type + config.hidden_dropout_prob = args.drop_out + config.loss_type = args.loss_type + model = model_class.from_pretrained(args.model_name_or_path, + from_tf=bool('.ckpt' in args.model_name_or_path), config=config) + else: + checkpoint = args.eval_model_dir + assert op.isdir(checkpoint) + config = config_class.from_pretrained(checkpoint) + config.output_hidden_states = args.output_hidden_states + tokenizer = tokenizer_class.from_pretrained(checkpoint) + logger.info("Evaluate the following checkpoint: %s", checkpoint) + model = model_class.from_pretrained(checkpoint, config=config) + + model.to(args.device) + logger.info("Training/evaluation parameters %s", args) + if args.do_train: + train_dataset = build_dataset(op.join(args.data_dir, args.train_yaml), tokenizer, args) + val_dataset = build_dataset(op.join(args.data_dir, args.val_yaml), + tokenizer, args, is_train=False) + global_step, avg_loss = train(args, train_dataset, val_dataset, model, tokenizer) + logger.info("Training done: total_step = %s, avg loss = %s", global_step, avg_loss) + + # inference and evaluation + if args.do_test or args.do_eval: + args = restore_training_settings(args) + test_dataset = build_dataset(op.join(args.data_dir, args.test_yaml), + tokenizer, args, is_train=False) + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + if not args.do_eval: + predict_file = get_predict_file(checkpoint, test_dataset.yaml_file, args) + test(args, test_dataset, model, tokenizer, predict_file) + logger.info("Prediction results saved to: {}".format(predict_file)) + else: + evaluate_file = evaluate(args, test_dataset, model, tokenizer, + checkpoint) + logger.info("Evaluation results saved to: {}".format(evaluate_file)) + +if __name__ == "__main__": + main() diff --git a/oscar/run_gqa.py b/oscar/run_gqa.py new file mode 100644 index 0000000..a702028 --- /dev/null +++ b/oscar/run_gqa.py @@ -0,0 +1,1084 @@ +# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license. + +from __future__ import absolute_import, division, print_function + +import argparse +import glob +import logging +import os +import copy, time, json + +import sys +sys.path.insert(0, '.') + +import numpy as np +import torch +import torch.nn as nn +from torch.utils.data import (Dataset, DataLoader, RandomSampler, SequentialSampler, TensorDataset) +from torch.utils.data.distributed import DistributedSampler +import _pickle as cPickle + +from oscar.modeling.modeling_bert import ImageBertForSequenceClassification +from transformers.pytorch_transformers import WEIGHTS_NAME, BertTokenizer, BertConfig +from transformers.pytorch_transformers import AdamW, WarmupLinearSchedule, WarmupConstantSchedule + +from oscar.utils.misc import set_seed +from oscar.utils.task_utils import (_truncate_seq_pair, convert_examples_to_features_vqa, + output_modes, processors) + +logger = logging.getLogger(__name__) + +MODEL_CLASSES = { + 'bert': (BertConfig, ImageBertForSequenceClassification, BertTokenizer), +} + + +log_json = [] + + +def _load_dataset(args, name): + processor = processors[args.task_name]() + labels = processor.get_labels(args.label_file) + + if name == 'train': + if args.train_data_type == 'bal': + examples = processor.get_train_examples(args.data_dir, 'gqa_bal_qla_train.json') #[0: debug_size] + else: + examples = processor.get_train_examples(args.data_dir, 'gqa_all_qla_train.json') #[0: debug_size] + elif name == 'val': + if args.eval_data_type == 'bal': + examples = processor.get_dev_examples(args.data_dir, 'gqa_bal_qla_val.json') #[0: debug_size] + else: + examples = processor.get_dev_examples(args.data_dir, 'gqa_all_qla_val.json') #[0: debug_size] + elif name == 'train+val': # depreciated + if args.data_label_type == 'mask': + examples = processor.get_train_examples(args.data_dir, 'train+val2014_qla_mrcnn.json') + else: + examples = processor.get_train_examples(args.data_dir, 'train+val2014_qla.json') + elif name == 'test': # test-submission + if args.data_label_type == 'bal': + examples = processor.get_test_examples(args.data_dir, 'gqa_all_qla_submission.json') + else: + examples = processor.get_test_examples(args.data_dir, 'gqa_all_qla_submission.json') + elif name == 'test-dev': # test-dev set + if args.data_label_type == 'bal': + examples = processor.get_dev_examples(args.data_dir, 'gqa_bal_qla_testdev.json') + else: + examples = processor.get_dev_examples(args.data_dir, 'gqa_all_qla_testdev.json') + + return examples, labels + + +def _load_img_features(args): + t_start = time.time() + if args.img_feature_type == 'faster_r-cnn': + if args.img_feature_dim == 2048: # object features + feat_file_name = 'gqa_img_frcnn_feats_obj.pt' + else: # object + spatial features + feat_file_name = 'gqa_img_frcnn_feats.pt' + else: + feat_file_name = 'gqa_img_frcnn_feats.pt' + img_features = torch.load(os.path.join(args.data_dir, feat_file_name)) + t_end = time.time() + logger.info('Info: loading {0:s} features using {1:.2f} secs'.format(feat_file_name, (t_end - t_start))) + + return img_features + + +class GQADataset(Dataset): + """ GQA Dataset """ + + def __init__(self, args, name, img_features, tokenizer, label_pos_feats=None): + super(GQADataset, self).__init__() + assert name in ['train', 'val', 'test-dev', 'test', 'train+val'] + + #t_start = time.time() + #if args.img_feature_type == 'faster_r-cnn': + # if args.img_feature_dim == 2048: # object features + # feat_file_name = 'gqa_img_frcnn_feats_obj_{}.pt'.format(name) + # else: # object + spatial features + # feat_file_name = 'gqa_img_frcnn_feats_{}.pt'.format(name) + #else: + # feat_file_name = '{}_img_feats.pt'.format(name) + #self.img_features = torch.load(os.path.join(args.data_dir, feat_file_name)) + #t_end = time.time() + #logger.info('Info: loading {0:s} features using {1:.2f} secs'.format(feat_file_name, (t_end-t_start))) + + self.img_features = img_features + self.label_pos_feats = label_pos_feats + self.output_mode = output_modes[args.task_name] + self.tokenizer = tokenizer + self.args = args + self.name = name + + self.examples, self.labels = _load_dataset(args, name) + self.label_map = {label: i for i, label in enumerate(self.labels)} + + if self.args.load_fast: + self.features = self.tensorize(args, cls_token_at_end=bool(self.args.model_type in ['xlnet']), # xlnet has a cls token at the end + cls_token=self.tokenizer.cls_token, + sep_token=self.tokenizer.sep_token, + cls_token_segment_id=2 if self.args.model_type in ['xlnet'] else 0, + pad_on_left=bool(self.args.model_type in ['xlnet']), # pad on the left for xlnet + pad_token_segment_id=4 if self.args.model_type in ['xlnet'] else 0) + else: + pass + + logger.info('%s Data Examples: %d' % (name, len(self.examples))) + + def tensorize(self, cls_token_at_end=False, pad_on_left=False, + cls_token='[CLS]', sep_token='[SEP]', pad_token=0, + sequence_a_segment_id=0, sequence_b_segment_id=1, + cls_token_segment_id=1, pad_token_segment_id=0, + mask_padding_with_zero=True): + + # debug: + debug_size = 500 + features = [] + + for (ex_index, example) in enumerate(self.examples[0: ]): + if len(example.label) == 0: continue + if ex_index % 10000 == 0: logger.info("Tensorizing example %d of %d" % (ex_index, len(self.examples))) + + tokens_a = self.tokenizer.tokenize(example.text_a) + + tokens_b = None + if example.text_b: + tokens_b = self.tokenizer.tokenize(example.text_b) + # Modifies `tokens_a` and `tokens_b` in place so that the total length is less than the specified length. + # Account for [CLS], [SEP], [SEP] with "- 3" + _truncate_seq_pair(tokens_a, tokens_b, self.args.max_seq_length - 3) + else: + # Account for [CLS] and [SEP] with "- 2" + if len(tokens_a) > self.args.max_seq_length - 2: + tokens_a = tokens_a[:(self.args.max_seq_length - 2)] + + tokens = tokens_a + [sep_token] + segment_ids = [sequence_a_segment_id] * len(tokens) + + if tokens_b: + tokens += tokens_b + [sep_token] + segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1) + + if cls_token_at_end: + tokens = tokens + [cls_token] + segment_ids = segment_ids + [cls_token_segment_id] + else: + tokens = [cls_token] + tokens + segment_ids = [cls_token_segment_id] + segment_ids + + input_ids = self.tokenizer.convert_tokens_to_ids(tokens) + + # The mask has 1 for real tokens and 0 for padding tokens. Only real tokens are attended to. + input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) + + # Zero-pad up to the sequence length. + padding_length = self.args.max_seq_length - len(input_ids) + if pad_on_left: + input_ids = ([pad_token] * padding_length) + input_ids + input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask + segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids + else: + input_ids = input_ids + ([pad_token] * padding_length) + input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length) + segment_ids = segment_ids + ([pad_token_segment_id] * padding_length) + + assert len(input_ids) == self.args.max_seq_length + assert len(input_mask) == self.args.max_seq_length + assert len(segment_ids) == self.args.max_seq_length + + # image features + img_feat = self.img_features[example.img_key] # torch + #img_feat = self.img_features.item().get(example.img_key) # numpy + if img_feat.shape[0] > self.args.max_img_seq_length: + img_feat = img_feat[0:self.args.max_img_seq_length, ] + if self.args.max_img_seq_length > 0: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + # segment_ids += [sequence_b_segment_id] * img_feat.shape[0] + else: + if self.args.max_img_seq_length > 0: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + # segment_ids = segment_ids + [sequence_b_segment_id] * img_feat.shape[0] + padding_matrix = torch.zeros((self.args.max_img_seq_length - img_feat.shape[0], img_feat.shape[1])) + img_feat = torch.cat((img_feat, padding_matrix), 0) + if self.args.max_img_seq_length > 0: + input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_matrix.shape[0]) + # segment_ids = segment_ids + [pad_token_segment_id] * padding_matrix.shape[0] + + if self.args.output_mode == "classification": + label_id = [self.label_map[l] for l in example.label] + score = example.score + elif self.args.output_mode == "regression": + label_id = float(example.label) + else: + raise KeyError(self.args.output_mode) + + if ex_index < 5: + logger.info("*** Example ***") + logger.info("guid: %s" % (example.guid)) + logger.info("tokens: %s" % " ".join([str(x) for x in tokens])) + logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) + logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) + logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) + logger.info("label: %s (id = %s)" % (example.label, label_id)) + logger.info("score: %s (score = %s)" % (example.score, score)) + + new_scores = target_tensor(len(self.labels), label_id, score) + #features.append(InputFeat(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, score=score, img_feat=img_feat)) + features.append((torch.tensor(input_ids, dtype=torch.long), + torch.tensor(input_mask, dtype=torch.long), + torch.tensor(segment_ids, dtype=torch.long), + torch.tensor([label_id[0]], dtype=torch.long), + torch.tensor(new_scores, dtype=torch.float), img_feat)) + + return features + + def tensorize_example(self, example, cls_token_at_end=False, pad_on_left=False, + cls_token='[CLS]', sep_token='[SEP]', pad_token=0, + sequence_a_segment_id=0, sequence_b_segment_id=1, + cls_token_segment_id=1, pad_token_segment_id=0, + mask_padding_with_zero=True): + + tokens_a = self.tokenizer.tokenize(example.text_a) + + tokens_b = None + if example.text_b: + txt_b_arr = example.text_b.split(';') + txt_label_ixs = [] + for txt_b_ix, txt_b_ele in enumerate(txt_b_arr): + tokens_b_ele = self.tokenizer.tokenize(txt_b_ele) + txt_label_ixs.extend([txt_b_ix] * len(tokens_b_ele)) + txt_b = example.text_b.replace(';', ' ').strip() + tokens_b = self.tokenizer.tokenize(txt_b) + assert len(tokens_b) == len(txt_label_ixs) + + # Modifies `tokens_a` and `tokens_b` in place so that the total length is less than the specified length. + # Account for [CLS], [SEP], [SEP] with "- 3" + _truncate_seq_pair(tokens_a, tokens_b, self.args.max_seq_length - 3) + txt_label_ixs = txt_label_ixs[0:len(tokens_b)] + + # original + #if example.text_b: + # txt_b = example.text_b.replace(';', ' ').strip() + # tokens_b = self.tokenizer.tokenize(txt_b) + # _truncate_seq_pair(tokens_a, tokens_b, self.args.max_seq_length - 3) + else: # Account for [CLS] and [SEP] with "- 2" + if len(tokens_a) > self.args.max_seq_length - 2: + tokens_a = tokens_a[:(self.args.max_seq_length - 2)] + + tokens = tokens_a + [sep_token] + segment_ids = [sequence_a_segment_id] * len(tokens) + + if tokens_b: + tokens += tokens_b + [sep_token] + segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1) + + if cls_token_at_end: + tokens = tokens + [cls_token] + segment_ids = segment_ids + [cls_token_segment_id] + else: + tokens = [cls_token] + tokens + segment_ids = [cls_token_segment_id] + segment_ids + + input_ids = self.tokenizer.convert_tokens_to_ids(tokens) + + # The mask has 1 for real tokens and 0 for padding tokens. Only real tokens are attended to. + input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) + + # Zero-pad up to the sequence length. + padding_length = self.args.max_seq_length - len(input_ids) + if pad_on_left: + input_ids = ([pad_token] * padding_length) + input_ids + input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask + segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids + else: + input_ids = input_ids + ([pad_token] * padding_length) + input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length) + segment_ids = segment_ids + ([pad_token_segment_id] * padding_length) + + assert len(input_ids) == self.args.max_seq_length + assert len(input_mask) == self.args.max_seq_length + assert len(segment_ids) == self.args.max_seq_length + + # image features + if self.args.img_feature_type.startswith('dis_code'): + img_feat = self.img_features[example.img_key] + + if self.args.img_feature_type == 'dis_code_ln': # for discrete code image representation + img_feat = img_feat.reshape(-1, img_feat.shape[0]) + + if self.args.img_feature_type == 'dis_code_t': # transposed + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * 64 + else: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + else: + img_feat = self.img_features[example.img_key] #[:, 0:self.args.img_feature_dim] # torch + + if img_feat.shape[0] > self.args.max_img_seq_length: + img_feat = img_feat[0:self.args.max_img_seq_length, ] + if self.args.max_img_seq_length > 0: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + # segment_ids += [sequence_b_segment_id] * img_feat.shape[0] + else: + if self.args.max_img_seq_length > 0: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + # segment_ids = segment_ids + [sequence_b_segment_id] * img_feat.shape[0] + padding_matrix = torch.zeros((self.args.max_img_seq_length - img_feat.shape[0], img_feat.shape[1])) + img_feat = torch.cat((img_feat, padding_matrix), 0) + if self.args.max_img_seq_length > 0: + input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_matrix.shape[0]) + # segment_ids = segment_ids + [pad_token_segment_id] * padding_matrix.shape[0] + + if self.args.output_mode == "classification": + if (example.label is None): + label_id = [0] + score = [0] + elif len(example.label) == 0: + label_id = [0] + score = [0] + else: + label_id = [self.label_map[l] for l in example.label] + score = example.score + elif self.args.output_mode == "regression": + if len(example.label) == 0: + label_id = 0 + else: + label_id = float(example.label) + else: + raise KeyError(self.args.output_mode) + + if self.args.img_feature_type in ['dis_code', 'dis_code_t']: + img_feat = img_feat.type(torch.long) + elif self.args.img_feature_type in ['dis_code_ln']: + #img_feat = img_feat.reshape(-1, img_feat.shape[0]) + img_feat = img_feat.type(torch.float) + + return (torch.tensor(input_ids, dtype=torch.long), + torch.tensor(input_mask, dtype=torch.long), + torch.tensor(segment_ids, dtype=torch.long), + torch.tensor([label_id[0]], dtype=torch.long), + torch.tensor([label_id[0]], dtype=torch.long), + img_feat, + torch.tensor([example.q_id], dtype=torch.long)) + + def __getitem__(self, index): + if self.args.load_fast: + example = self.features[index] + else: + entry = self.examples[index] + example = self.tensorize_example(entry, + cls_token_at_end=bool(self.args.model_type in ['xlnet']), # xlnet has a cls token at the end + cls_token=self.tokenizer.cls_token, + sep_token=self.tokenizer.sep_token, + cls_token_segment_id=2 if self.args.model_type in ['xlnet'] else 0, + pad_on_left=bool(self.args.model_type in ['xlnet']), # pad on the left for xlnet + pad_token_segment_id=4 if self.args.model_type in ['xlnet'] else 0) + return example + + def __len__(self): + return len(self.examples) + + +def trim_batch(batch): + """ new batch func + :param batch: + :return: + """ + print('batch size', len(batch)) + + batch_size = len(batch) + batch_tensors = [] + for ele in batch[0]: + print(ele.shape, ele.size()) + zero_tensor = torch.zeros(([batch_size] + list(ele.size()))) + batch_tensors.append(zero_tensor) + + for b_id, b in enumerate(batch): + print(b_id, len(b)) + for ele_id, ele in enumerate(b): + print(ele_id, ele.shape) + batch_tensors[ele_id][b_id] = ele + return batch_tensors + + +def train(args, train_dataset, eval_dataset, model, tokenizer): + """ Train the model """ + args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) + train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) + train_dataloader = DataLoader(train_dataset, num_workers=args.workers, sampler=train_sampler, batch_size=args.train_batch_size) #, collate_fn=trim_batch) + + if args.max_steps > 0: + t_total = args.max_steps + args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 + else: + t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs + + # Prepare optimizer and schedule (linear warmup and decay) + no_decay = ['bias', 'LayerNorm.weight'] + optimizer_grouped_parameters = [ + {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, + {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} + ] + optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) + scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total) + + if args.fp16: + try: + from apex import amp + except ImportError: + raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") + model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) + + # multi-gpu training (should be after apex fp16 initialization) + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + # Distributed training (should be after apex fp16 initialization) + if args.local_rank != -1: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) + + # Train! + logger.info("***** Running training *****") + logger.info(" Num examples = %d", len(train_dataset)) + logger.info(" Num Epochs = %d", args.num_train_epochs) + logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) + logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", + args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) + logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) + logger.info(" Total optimization steps = %d", t_total) + + global_step = 0 + tr_loss, logging_loss = 0.0, 0.0 + model.zero_grad() + #train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) + set_seed(args.seed, args.n_gpu) # Added here for reproductibility (even between python 2 and 3) + + best_score = 0 + best_model = { + 'epoch': 0, + 'model_state': model.state_dict(), + 'optimizer_state': optimizer.state_dict() + } + + for epoch in range(int(args.num_train_epochs)): + #for epoch in train_iterator: + #epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) + total_loss = 0 + total_norm = 0 + count_norm = 0 + + t_start = time.time() + for step, batch in enumerate(train_dataloader): + #for step, batch in enumerate(epoch_iterator): + model.train() + batch = tuple(t.to(args.device) for t in batch) + inputs = {'input_ids': batch[0], + 'attention_mask': batch[1], + 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids + 'labels': batch[3], + 'img_feats': None if args.img_feature_dim == -1 else batch[5]} + outputs = model(**inputs) + + #loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc) + loss, logits = outputs[:2] + + if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training + + if args.gradient_accumulation_steps > 1: + loss = loss / args.gradient_accumulation_steps + + if args.fp16: + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) + else: + loss.backward() + total_norm += torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) + count_norm += 1 + + #batch_score = compute_score_with_logits(logits, batch[4]).sum() + #train_score += batch_score.item() + + tr_loss += loss.item() + total_loss += loss.item() + if (step + 1) % args.gradient_accumulation_steps == 0: + scheduler.step() # Update learning rate schedule + optimizer.step() + model.zero_grad() + global_step += 1 + + if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:# Log metrics + if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well + logger.info("Epoch: %d, global_step: %d" % (epoch, global_step)) + eval_result, eval_score = evaluate(args, model, eval_dataset, prefix=global_step) + if eval_score > best_score: + best_score = eval_score + best_model['epoch'] = epoch + best_model['model'] = copy.deepcopy(model) + + logger.info("EVALERR: {}%".format(100 * best_score)) + logging_loss = tr_loss + + #if args.max_steps > 0 and global_step > args.max_steps: + # epoch_iterator.close() + # break + + t_end = time.time() + logger.info('Train Time Cost: %.3f' % (t_end-t_start)) + + # evaluation + logger.info("Epoch: %d" % (epoch)) + eval_result, eval_score = evaluate(args, model, eval_dataset, prefix=global_step) + if eval_score > best_score: + best_score = eval_score + best_model['epoch'] = epoch + best_model['model'] = copy.deepcopy(model) + #best_model['optimizer'] = copy.deepcopy(optimizer.state_dict()) + + # save checkpoints + if args.local_rank in [-1, 0] and args.save_epoch > 0 and epoch % args.save_epoch == 0: # Save model checkpoint + output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(epoch)) + if not os.path.exists(output_dir): os.makedirs(output_dir) + model_to_save = best_model['model'].module if hasattr(model, 'module') else best_model['model'] # Take care of distributed/parallel training + + save_num = 0 + while (save_num < 10): + try: + model_to_save.save_pretrained(output_dir) + torch.save(args, os.path.join(output_dir, 'training_args.bin')) + tokenizer.save_pretrained(output_dir) + break + except: + save_num += 1 + logger.info("Saving model checkpoint {0} to {1}".format(epoch, output_dir)) + + epoch_log = {'epoch': epoch, 'eval_score': eval_score, 'best_score':best_score} + log_json.append(epoch_log) + + with open(args.output_dir + '/eval_logs.json', 'w') as fp: + json.dump(log_json, fp) + + logger.info("PROGRESS: {}%".format(round(100*(epoch + 1) / args.num_train_epochs, 4))) + logger.info("EVALERR: {}%".format(100*best_score)) + logger.info("LOSS: {}%".format(total_loss / len(train_dataset))) + + with open(args.output_dir + '/eval_logs.json', 'w') as fp: + json.dump(log_json, fp) + + if args.local_rank in [-1, 0]: # Save the final model checkpoint + output_dir = os.path.join(args.output_dir, 'best-{}'.format(best_model['epoch'])) + if not os.path.exists(output_dir): os.makedirs(output_dir) + model_to_save = best_model['model'].module if hasattr(model, 'module') else best_model['model'] # Take care of distributed/parallel training + + save_num = 0 + while (save_num < 10): + try: + model_to_save.save_pretrained(output_dir) + torch.save(args, os.path.join(output_dir, 'training_args.bin')) + tokenizer.save_pretrained(output_dir) + break + except: + save_num += 1 + logger.info("Saving the best model checkpoint epoch {} to {}".format(best_model['epoch'], output_dir)) + + return global_step, tr_loss / global_step + +def evaluate(args, model, eval_dataset=None, prefix=""): + # Loop to handle MNLI double evaluation (matched, mis-matched) + eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,) + eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == "mnli" else (args.output_dir,) + + #if args.n_gpu > 1: model = torch.nn.DataParallel(model) # debug: single-gpu or multi-gpus + + results = [] + t_start = time.time() + for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): + if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) + + args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + # Note that DistributedSampler samples randomly + eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) + eval_dataloader = DataLoader(eval_dataset, num_workers=args.workers, sampler=eval_sampler, batch_size=args.eval_batch_size) + + # Eval! + logger.info("***** Running evaluation {} *****".format(prefix)) + logger.info(" Num examples = %d", len(eval_dataset)) + logger.info(" Batch size = %d", args.eval_batch_size) + eval_loss = 0.0 + nb_eval_steps = 0 + preds = None + out_label_ids = None + num_data = 0 + correct_num = 0 + + for batch in eval_dataloader: + #for batch in tqdm(eval_dataloader, desc="Evaluating"): + model.eval() + batch = tuple(t.to(args.device) for t in batch) + + with torch.no_grad(): + inputs = {'input_ids': batch[0], + 'attention_mask': batch[1], + 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids + 'labels': batch[3], + 'img_feats': None if args.img_feature_dim == -1 else batch[5]} + outputs = model(**inputs) + tmp_eval_loss, logits = outputs[:2] + + eval_loss += tmp_eval_loss.mean().item() + + #logger.info('logits: %s, batch[3]: %s' % (str(logits.shape), str(batch[3].shape))) + #logger.info('correct: %s' % (str(logits.argmax(1) == batch[3].view(-1)))) + + correct = logits.argmax(1) == batch[3].view(-1) + correct_num += correct.sum().item() + num_data += logits.size(0) + + # debug + #val, idx = logits.max(1) + #logger.info('idx: %s, batch[4]: %s' % (str(idx.shape), str(batch[3].shape))) + #for i in range(idx.size(0)): + # logger.info('idx: %d, pred: %d, real: %d' % (idx[i].item(), eval_dataset.labels[idx[i].item()], batch[3][i].item())) + + nb_eval_steps += 1 + + acc = float(correct_num) / len(eval_dataloader.dataset) + + logger.info("Eval Results:") + logger.info("Eval Accuracy: %.3f" % (100*acc)) + logger.info("Eval Loss: %.3f" % (eval_loss)) + + t_end = time.time() + logger.info('Eva Time Cost: %.3f' % (t_end - t_start)) + + return results, acc + +def test(args, model, eval_dataset=None, prefix=""): + # Loop to handle MNLI double evaluation (matched, mis-matched) + eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,) + eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == "mnli" else (args.output_dir,) + + label2ans = cPickle.load(open(args.label2ans_file, 'rb')) + logger.info('label2ans: %d' % (len(label2ans))) + + results = [] + t_start = time.time() + for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): + if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) + + args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + # Note that DistributedSampler samples randomly + eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) + eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) + + # Eval + logger.info("***** Running Test {} *****".format(prefix)) + logger.info(" Num examples = %d", len(eval_dataset)) + logger.info(" Batch size = %d", args.eval_batch_size) + + for batch in eval_dataloader: + #for batch in tqdm(eval_dataloader, desc="Evaluating"): + model.eval() + batch = tuple(t.to(args.device) for t in batch) + + with torch.no_grad(): + inputs = {'input_ids': batch[0], + 'attention_mask': batch[1], + 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids + 'labels': None, + 'img_feats': None if args.img_feature_dim == -1 else batch[5]} + outputs = model(**inputs) + logits = outputs[0] + + val, idx = logits.max(1) + #logger.info('idx: %s, batch[6]: %s' % (str(idx.shape), str(batch[6].shape))) + + for i in range(idx.size(0)): + result = {} + result['questionId'] = str(batch[6][i].item()) + result['prediction'] = label2ans[eval_dataset.labels[idx[i].item()]] + results.append(result) + + #logger.info('q_id: {0}, answer: {1}'.format(result['question_id'], result['answer'])) + + with open(args.output_dir + ('/{}_results.json'.format(eval_dataset.name)), 'w') as fp: + json.dump(results, fp) + + t_end = time.time() + logger.info('# questions: %d' % (len(results))) + logger.info('Test Time Cost: %.3f' % (t_end - t_start)) + + +def load_and_cache_examples(args, task, tokenizer, evaluate=False): + processor = processors[task]() + output_mode = output_modes[task] + + label_list = processor.get_labels(args.label_file) + + t_start = time.time() + examples = processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir) + + #img_features = torch.load(os.path.join(args.data_dir, 'val_img_feats.pt' if evaluate else 'train_img_feats.pt')) + #img_features = torch.load(os.path.join(args.data_dir, 'val_img_frcnn_feats.pt' if evaluate else 'train_img_frcnn_feats.pt')) + img_features = np.load(os.path.join(args.data_dir, 'val_img_frcnn_feats.npy' if evaluate else 'train_img_frcnn_feats.npy')) + + features = convert_examples_to_features_vqa(examples, img_features, label_list, args.max_img_seq_length, args.max_seq_length, + tokenizer, output_mode, + cls_token_at_end=bool(args.model_type in ['xlnet']), # xlnet has a cls token at the end + cls_token=tokenizer.cls_token, + sep_token=tokenizer.sep_token, + cls_token_segment_id=2 if args.model_type in ['xlnet'] else 0, + pad_on_left=bool(args.model_type in ['xlnet']), # pad on the left for xlnet + pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0) + + #if args.local_rank in [-1, 0]: + # logger.info("Saving features into cached file %s", cached_features_file) + # torch.save(features, cached_features_file) + t_end = time.time() + logger.info('Info: loading features using %.5f secs' % (t_end-t_start)) + + + # Convert to Tensors and build dataset + all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) # batch*max_seq_len + all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) + all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) + if output_mode == "classification": + labels = torch.tensor([f.label_id[0] for f in features], dtype=torch.long) + targets = torch.tensor([target_tensor(len(label_list), f.label_id, f.score) for f in features], dtype=torch.float) + + if args.img_feature_dim > 0: # change here + t_start = time.time() + img_feat_np = np.zeros((labels.shape[0], args.max_img_seq_length, args.img_feature_dim)) + for f_id, f in enumerate(features): + img_feat_np[f_id] = f.img_feat + + img_feats = torch.from_numpy(img_feat_np) + + #img_feats = torch.empty((labels.shape[0], args.max_img_seq_length, args.img_feature_dim)) + #for f_id, f in enumerate(features): + # img_feats[f_id] = f.img_feat + + t_end = time.time() + logger.info('Info: convert image tensor features using %.5f secs' % (t_end - t_start)) + + #img_feats = torch.stack([f.img_feat[:,-args.img_feature_dim:] for f in features]) + #img_feats = torch.stack([f.img_feat for f in features]) + #img_feats = img_feats.type(torch.long) + + #print('targets:', targets.shape) + print('img_feats:', img_feats.shape) + elif output_mode == "regression": + all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float) + + if args.img_feature_dim == -1: + dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, labels, targets) + else: + dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, labels, targets, img_feats) + return dataset + +def target_tensor(len, labels, scores): + """ create the target by labels and scores """ + target = [0]*len + for id, l in enumerate(labels): + target[l] = scores[id] + + return target + + +def main(): + parser = argparse.ArgumentParser() + + ## Required parameters + parser.add_argument("--data_dir", default=None, type=str, required=True, + help="The input data dir. Should contain the .tsv files (or other data files) for the task.") + parser.add_argument("--model_type", default=None, type=str, required=True, + help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) + parser.add_argument("--model_name_or_path", default=None, type=str, required=True, + help="Path to pre-trained model or shortcut name") + parser.add_argument("--task_name", default=None, type=str, required=True, + help="The name of the task to train selected in the list: " + ", ".join(processors.keys())) + parser.add_argument("--output_dir", default=None, type=str, required=True, + help="The output directory where the model predictions and checkpoints will be written.") + parser.add_argument("--label_file", type=str, default=None, help="Label Dictionary") + parser.add_argument("--label2ans_file", type=str, default=None, help="Label to Answer Dictionary") + + parser.add_argument("--data_label_type", default='bal', type=str, help="bal or all") + parser.add_argument("--train_data_type", default='bal', type=str, help="bal or all") + parser.add_argument("--eval_data_type", default='bal', type=str, help="bal or all") + parser.add_argument("--loss_type", default='kl', type=str, help="kl or xe") + + parser.add_argument("--spatial_dim", default=6, type=int, help="spatial_dim") + + parser.add_argument("--max_label_pos_length", default=45, type=int, help="The maximum total input label position sequence length.") + + ## Other parameters + parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") + parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") + parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") + parser.add_argument("--max_seq_length", default=128, type=int, + help="The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.") + parser.add_argument("--do_train", action='store_true', help="Whether to run training.") + parser.add_argument("--do_train_val", action='store_true', help="Whether to run training.") + parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") + parser.add_argument("--do_test", action='store_true', help="Whether to run test on the test set.") + parser.add_argument("--do_test_dev", action='store_true', help="Whether to run test on the test-dev set.") + parser.add_argument("--evaluate_during_training", action='store_true', help="Rul evaluation during training at each logging step.") + parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") + + parser.add_argument("--drop_out", default=0.1, type=float, help="Drop out for BERT.") + parser.add_argument("--classifier", default='linear', type=str, help="linear or mlp") + parser.add_argument("--cls_hidden_scale", default=2, type=int, help="cls_hidden_scale: for classifier") + + parser.add_argument("--max_img_seq_length", default=30, type=int, help="The maximum total input image sequence length.") + parser.add_argument("--img_feature_dim", default=2054, type=int, help="The Image Feature Dimension.") + parser.add_argument("--img_feature_type", default='faster_r-cnn', type=str, help="faster_r-cnn or mask_r-cnn") + parser.add_argument("--code_voc", default=512, type=int, help="dis_code_voc: 256, 512") + parser.add_argument("--code_level", default='top', type=str, help="code level: top, botttom, both") + + parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") + parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") + parser.add_argument('--gradient_accumulation_steps', type=int, default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.") + parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") + parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") + parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") + parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") + parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") + parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.") + parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") + + parser.add_argument('--logging_steps', type=int, default=50, help="Log every X updates steps.") + parser.add_argument('--save_steps', type=int, default=50, help="Save checkpoint every X updates steps.") + parser.add_argument('--save_epoch', type=int, default=5, help="Save checkpoint every X epochs.") + parser.add_argument("--eval_all_checkpoints", action='store_true', + help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") + parser.add_argument("--no_cuda", action='store_true', help="Avoid using CUDA when available") + parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") + parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") + parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") + + parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") + parser.add_argument('--fp16_opt_level', type=str, default='O1', + help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." + "See details at https://nvidia.github.io/apex/amp.html") + parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") + parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.") + parser.add_argument('--server_port', type=str, default='', help="For distant debugging.") + + parser.add_argument("--philly", action='store_true', help="Use Philly: reset the output dir") + parser.add_argument("--load_fast", action='store_true', help="Load Tensor Fast") + parser.add_argument('-j', '--workers', default=0, type=int, metavar='N', help='number of data loading workers (default: 4)') + + #args = '--data_dir ../vqa/ban-vqa/data/qal_pairs --model_type bert --model_name_or_path bert-base-uncased --task_name vqa_text ' \ + # '--do_train --do_eval --do_lower_case --max_seq_length 40 --per_gpu_eval_batch_size 16 --per_gpu_train_batch_size 16 --learning_rate 2e-5 ' \ + # '--num_train_epochs 20.0 --output_dir ./results/vqa_text --label_file ../vqa/ban-vqa/data/cache/trainval_ans2label.pkl ' \ + # '--save_steps 5000 --overwrite_output_dir --max_img_seq_length 1 --img_feature_dim 565 --img_feature_type dis_code ' + + #args = '--data_dir ../vqa/ban-vqa/data/qal_pairs --model_type bert --model_name_or_path bert-base-uncased --task_name vqa_text ' \ + # '--do_train --do_eval --do_lower_case --max_seq_length 40 --per_gpu_eval_batch_size 16 --per_gpu_train_batch_size 16 --learning_rate 2e-5 ' \ + # '--num_train_epochs 20.0 --output_dir ./results/vqa_text --label_file ../vqa/ban-vqa/data/cache/trainval_ans2label.pkl ' \ + # '--save_steps 5000 --overwrite_output_dir --max_img_seq_length 10 --img_feature_dim 565 --img_feature_type other ' + + #args = parser.parse_args(args.split()) + + args = parser.parse_args() + + if args.philly: # use philly + logger.info('Info: Use Philly, all the output folders are reset.') + args.output_dir = os.path.join(os.getenv('PT_OUTPUT_DIR'), args.output_dir) + logger.info('OUTPUT_DIR:', args.output_dir) + + #if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir: + # raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir)) + + if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train: logger.info("Output Directory Exists.") + + # Setup distant debugging if needed + if args.server_ip and args.server_port: + # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script + import ptvsd + print("Waiting for debugger attach") + ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) + ptvsd.wait_for_attach() + + # Setup CUDA, GPU & distributed training + if args.local_rank == -1 or args.no_cuda: + device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") + args.n_gpu = torch.cuda.device_count() + else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs + torch.cuda.set_device(args.local_rank) + device = torch.device("cuda", args.local_rank) + torch.distributed.init_process_group(backend='nccl') + args.n_gpu = 1 + args.device = device + + # Setup logging + logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', + datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) + logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", + args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) + + # Set seed + set_seed(args.seed, args.n_gpu) + + # Prepare GLUE task + args.task_name = args.task_name.lower() + if args.task_name not in processors: + raise ValueError("Task not found: %s" % (args.task_name)) + + processor = processors[args.task_name]() + args.output_mode = output_modes[args.task_name] + label_list = processor.get_labels(args.label_file) + num_labels = len(label_list) + logger.info('Task Name: {}, #Labels: {}'.format(args.task_name, num_labels)) + + # Load pretrained model and tokenizer + if args.local_rank not in [-1, 0]: + torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab + + args.model_type = args.model_type.lower() + config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] + config = config_class.from_pretrained( + args.config_name if args.config_name else args.model_name_or_path, + num_labels=num_labels, finetuning_task=args.task_name, + ) + tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case) + + # discrete code + config.img_feature_dim = args.img_feature_dim + config.img_feature_type = args.img_feature_type + config.code_voc = args.code_voc + config.hidden_dropout_prob = args.drop_out + config.loss_type = args.loss_type + config.classifier = args.classifier + config.cls_hidden_scale = args.cls_hidden_scale + config.spatial_dim = args.spatial_dim + + # load discrete code + if args.img_feature_type in ['dis_code', 'dis_code_t']: + logger.info('Load discrete code from: {}'.format(args.data_dir)) + t_start = time.time() + train_code = torch.load(os.path.join(args.data_dir, 'vqvae', 'train.pt')) + t_end = time.time() + logger.info('Load time: %.3f' % (t_end - t_start)) + + if args.code_level == 'top': + config.code_dim = train_code['embeddings_t'].shape[0] + config.code_size = train_code['feats_top'][list(train_code['feats_top'].keys())[0]].shape[0] + elif args.code_level == 'bottom': + config.code_dim = train_code['embeddings_b'].shape[0] + config.code_size = train_code['feats_bottom'][list(train_code['feats_bottom'].keys())[0]].shape[0] + elif args.code_level == 'both': + config.code_dim = train_code['embeddings_t'].shape[0] + train_code['embeddings_b'].shape[0] + + model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config) + if args.img_feature_type in ['dis_code', 'dis_code_t']: + logger.info('Initializing the code embedding with {}'.format(args.code_level)) + if args.code_level == 'top': + model.init_code_embedding(train_code['embeddings_t'].t()) + elif args.code_level == 'bottom': + model.init_code_embedding(train_code['embeddings_b'].t()) + + if args.local_rank == 0: + torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab + + model.to(args.device) + + logger.info("Training/evaluation parameters %s", args) + + # load image features + img_features = _load_img_features(args) + label_pos_feats = None + + #if args.do_eval: + eval_dataset = GQADataset(args, 'val', img_features, tokenizer, label_pos_feats) + #eval_dataset = GQADataset(args, 'test-dev', img_features, tokenizer) # test-dev as val + + if args.do_test: + test_dataset = GQADataset(args, 'test', img_features, tokenizer, label_pos_feats) + + if args.do_test_dev: + test_dev_dataset = GQADataset(args, 'test-dev', img_features, tokenizer, label_pos_feats) + + # Training + if args.do_train: + train_dataset = GQADataset(args, 'train', img_features, tokenizer, label_pos_feats) + global_step, tr_loss = train(args, train_dataset, eval_dataset, model, tokenizer) + logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) + + # Training on train+val + if args.do_train_val: # depreciated + train_dataset = GQADataset(args, 'train+val', img_features, tokenizer) + global_step, tr_loss = train(args, train_dataset, eval_dataset, model, tokenizer) + logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) + + # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() + if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): + # Create output directory if needed + if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) + + logger.info("Saving model checkpoint to %s", args.output_dir) + # Save a trained model, configuration and tokenizer using `save_pretrained()`. They can then be reloaded using `from_pretrained()` + #model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training + #model_to_save.save_pretrained(args.output_dir) + + tokenizer.save_pretrained(args.output_dir) + + # Good practice: save your training arguments together with the trained model + torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) + + # Load a trained model and vocabulary that you have fine-tuned + #model = model_class.from_pretrained(args.output_dir) + #tokenizer = tokenizer_class.from_pretrained(args.output_dir) + #model.to(args.device) + + + # Evaluation + #results = {} + if args.do_eval and args.local_rank in [-1, 0]: + checkpoints = [args.output_dir] + if args.eval_all_checkpoints: + checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) + logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging + logger.info("Evaluate the following checkpoints: %s", checkpoints) + + for checkpoint in checkpoints: + global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" + model = model_class.from_pretrained(checkpoint, config=config) + model.to(args.device) + result, score = evaluate(args, model, eval_dataset, prefix=global_step) + #result = dict((k + '_{}'.format(global_step), v) for k, v in result.items()) + #results.update(result) + + # Test-Dev + if args.do_test_dev and args.local_rank in [-1, 0]: + checkpoints = [args.output_dir] + if args.eval_all_checkpoints: + checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) + logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging + logger.info("Evaluate the following checkpoints: %s", checkpoints) + + for checkpoint in checkpoints: + global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" + model = model_class.from_pretrained(checkpoint, config=config) + model.to(args.device) + result, score = evaluate(args, model, test_dev_dataset, prefix=global_step) + #test(args, model, test_dev_dataset, prefix=global_step) + + # Test-Submission + if args.do_test and args.local_rank in [-1, 0]: + checkpoints = [args.output_dir] + if args.eval_all_checkpoints: + checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) + logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging + logger.info("Evaluate the following checkpoints: %s", checkpoints) + + for checkpoint in checkpoints: + global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" + model = model_class.from_pretrained(checkpoint) + model.to(args.device) + test(args, model, test_dataset, prefix=global_step) + + +if __name__ == "__main__": + main() diff --git a/oscar/run_nlvr.py b/oscar/run_nlvr.py new file mode 100644 index 0000000..9f427b3 --- /dev/null +++ b/oscar/run_nlvr.py @@ -0,0 +1,925 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Finetuning the library models for sequence classification on VQA (Bert, XLM, XLNet). +Debug: +CUDA_VISIBLE_DEVICES=0,1,2,3 python ./examples/run_nlvr_baselines.py -j 4 --img_feature_dim 2054 --max_img_seq_length 45 --data_label_type mask --img_feature_type faster_r-cnn --data_dir ../data/nlvr/nlvr2/feats_deb --model_type bert --model_name_or_path bert-base-uncased --task_name nlvr --do_train --do_lower_case --max_seq_length 45 --per_gpu_eval_batch_size 16 --per_gpu_train_batch_size 8 --learning_rate 5e-5 --num_train_epochs 2 --output_dir results/nlvr_test --save_epoch 1 --seed 88 --evaluate_during_training --logging_steps 10 --drop_out 0.3 --weight_decay 0.05 --warmup_steps 4000 --loss_type xe --save_steps -1 --use_pair --eval_data_type all --train_data_type all --classifier mlp + +# load pre-trained model +CUDA_VISIBLE_DEVICES=0 python ./examples/run_vqa_baselines.py --data_dir ../vqa/ban-vqa/data/qal_pairs --model_type bert --model_name_or_path ../vqa/ban-vqa/data/qal_pairs/vqa_txt_model/ep_36 --task_name vqa_text --do_train --do_eval --do_low er_case --max_seq_length 96 --per_gpu_eval_batch_size 48 --per_gpu_train_batch_size 48 --learning_rate 2e-5 - -num_train_epochs 20.0 --output_dir ./results/vqa_text --label_file ../vqa/ban-vqa/data/cache/trainval_ans2la bel.pkl --save_steps 5000 --overwrite_output_dir --max_img_seq_length -1 --img_feature_dim -1 + +# test +CUDA_VISIBLE_DEVICES=0,1,2,3 python ./examples/run_vqa_baselines.py --img_feature_dim 2054 --max_img_seq_length 45 --data_dir ../vqa/ban-vqa/data/qal_pairs --model_type bert --model_name_or_path ./models/results/best-17 --task_name vqa_text --do_eval --do_lower_case --max_seq_length 128 --per_gpu_eval_batch_size 64 --per_gpu_train_batch_size 64 --learning_rate 5e-5 --num_train_epochs 40 --output_dir ./models/results --label_file ../vqa/ban-vqa/data/cache/trainval_ans2label.pkl --save_steps 50000 --img_feature_type faster_r-cnn --data_label_type mask --eval_all_checkpoints --do_test --label2ans_file ../vqa/ban-vqa/data/cache/trainval_label2ans.pkl + +""" + + +from __future__ import absolute_import, division, print_function + +import argparse +import glob +import logging +import os +import random, copy, time, json + +import sys +sys.path.insert(0, '.') + +import numpy as np +import torch +import torch.nn as nn +from torch.utils.data import (Dataset, DataLoader, RandomSampler, SequentialSampler, TensorDataset) +from torch.utils.data.distributed import DistributedSampler +from tqdm import tqdm, trange +import _pickle as cPickle + +from transformers.pytorch_transformers import (WEIGHTS_NAME, BertConfig, BertTokenizer) +from transformers.pytorch_transformers import AdamW, WarmupLinearSchedule + +from oscar.modeling.modeling_bert import ImageBertForMultipleChoice, ImageBertForSequenceClassification + +from torch.optim import Adamax +from oscar.utils.task_utils import (_truncate_seq_pair, output_modes, processors) + +logger = logging.getLogger(__name__) + +ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig,)), ()) + +MODEL_CLASSES = { + 'bert': (BertConfig, ImageBertForSequenceClassification, BertTokenizer), +} + + +log_json = [] + +def _load_dataset(args, name): + processor = processors[args.task_name]() + labels = processor.get_labels() + + if name == 'train': + examples = processor.get_train_examples(args.data_dir, args.use_label_seq, 'nlvr2_train.json') + elif name == 'val': + if args.eval_data_type == 'bal': + examples = processor.get_dev_examples(args.data_dir, args.use_label_seq, 'nlvr2_balanced_dev.json') + elif args.eval_data_type == 'unbal': + examples = processor.get_dev_examples(args.data_dir, args.use_label_seq, 'nlvr2_unbalanced_dev.json') + else: + examples = processor.get_dev_examples(args.data_dir, args.use_label_seq, 'nlvr2_dev.json') + elif name == 'test1': # test-submission + if args.data_label_type == 'bal': + examples = processor.get_test_examples(args.data_dir, args.use_label_seq, 'nlvr2_balanced_test1.json') + elif args.eval_data_type == 'unbal': + examples = processor.get_test_examples(args.data_dir, args.use_label_seq, 'nlvr2_unbalanced_test1.json') + else: + examples = processor.get_test_examples(args.data_dir, args.use_label_seq, 'nlvr2_test1.json') + elif name == 'val+test1': + if args.eval_data_type == 'bal': + examples = processor.get_dev_examples(args.data_dir, args.use_label_seq, 'nlvr2_balanced_dev.json') + elif args.eval_data_type == 'unbal': + examples = processor.get_dev_examples(args.data_dir, args.use_label_seq, 'nlvr2_unbalanced_dev.json') + else: + examples = processor.get_dev_examples(args.data_dir, args.use_label_seq, 'nlvr2_dev_test1.json') + + return examples, labels + +def _load_img_features(args): + t_start = time.time() + if args.img_feature_type == 'faster_r-cnn': + if args.img_feature_dim == 2048: # object features + feat_file_name = 'nlvr2_img_frcnn_feats.pt' + else: # object + spatial features + feat_file_name = 'nlvr2_img_frcnn_feats.pt' + else: + feat_file_name = 'nlvr2_img_frcnn_feats.pt' + img_features = torch.load(os.path.join(args.data_dir, feat_file_name)) + t_end = time.time() + logger.info('Info: loading {0:s} features using {1:.2f} secs'.format(feat_file_name, (t_end - t_start))) + + return img_features + + +class NLVRDataset(Dataset): + """ NLVR2 Dataset """ + + def __init__(self, args, name, img_features, tokenizer): + super(NLVRDataset, self).__init__() + assert name in ['train', 'val', 'test1', 'val+test1'] + + self.img_features = img_features + self.output_mode = output_modes[args.task_name] + self.tokenizer = tokenizer + self.args = args + self.name = name + + self.examples, self.labels = _load_dataset(args, name) + self.label_map = {label: i for i, label in enumerate(self.labels)} + + logger.info('%s Data Examples: %d' % (name, len(self.examples))) + + def tensorize_example(self, example, cls_token_at_end=False, pad_on_left=False, + cls_token='[CLS]', sep_token='[SEP]', pad_token=0, + sequence_a_segment_id=0, sequence_b_segment_id=1, + cls_token_segment_id=1, pad_token_segment_id=0, + mask_padding_with_zero=True): + + tokens_a = self.tokenizer.tokenize(example.text_a) + + tokens_b = None + if example.text_b: + text_b = example.text_b['left'] + ' ' + example.text_b['right'] + tokens_b = self.tokenizer.tokenize(text_b) + # Modifies `tokens_a` and `tokens_b` in place so that the total length is less than the specified length. + # Account for [CLS], [SEP], [SEP] with "- 3" + _truncate_seq_pair(tokens_a, tokens_b, self.args.max_seq_length - 3) + else: + # Account for [CLS] and [SEP] with "- 2" + if len(tokens_a) > self.args.max_seq_length - 2: + tokens_a = tokens_a[:(self.args.max_seq_length - 2)] + + tokens = tokens_a + [sep_token] + segment_ids = [sequence_a_segment_id] * len(tokens) + + if tokens_b: + tokens += tokens_b + [sep_token] + segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1) + + if cls_token_at_end: + tokens = tokens + [cls_token] + segment_ids = segment_ids + [cls_token_segment_id] + else: + tokens = [cls_token] + tokens + segment_ids = [cls_token_segment_id] + segment_ids + + input_ids = self.tokenizer.convert_tokens_to_ids(tokens) + + # The mask has 1 for real tokens and 0 for padding tokens. Only real tokens are attended to. + input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) + + # Zero-pad up to the sequence length. + padding_length = self.args.max_seq_length - len(input_ids) + if pad_on_left: + input_ids = ([pad_token] * padding_length) + input_ids + input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask + segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids + else: + input_ids = input_ids + ([pad_token] * padding_length) + input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length) + segment_ids = segment_ids + ([pad_token_segment_id] * padding_length) + + assert len(input_ids) == self.args.max_seq_length + assert len(input_mask) == self.args.max_seq_length + assert len(segment_ids) == self.args.max_seq_length + + # image features + if self.args.img_feature_type.startswith('dis_code'): + img_feat = self.img_features[example.img_key] + + if self.args.img_feature_type == 'dis_code_ln': # for discrete code image representation + img_feat = img_feat.reshape(-1, img_feat.shape[0]) + + if self.args.img_feature_type == 'dis_code_t': # transposed + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * 64 + else: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + else: + img_key_left = example.img_key['left'] + img_key_right = example.img_key['right'] + img_feat_left = self.img_features[img_key_left] + img_feat_right = self.img_features[img_key_right] + img_feat = torch.cat((img_feat_left, img_feat_right), 0) + if img_feat.shape[0] > 2*self.args.max_img_seq_length: + img_feat = img_feat[0: 2*self.args.max_img_seq_length, ] + if self.args.max_img_seq_length > 0: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + # segment_ids += [sequence_b_segment_id] * img_feat.shape[0] + else: + if self.args.max_img_seq_length > 0: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + # segment_ids = segment_ids + [sequence_b_segment_id] * img_feat.shape[0] + padding_matrix = torch.zeros((2*self.args.max_img_seq_length - img_feat.shape[0], img_feat.shape[1])) + img_feat = torch.cat((img_feat, padding_matrix), 0) + if self.args.max_img_seq_length > 0: + input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_matrix.shape[0]) + # segment_ids = segment_ids + [pad_token_segment_id] * padding_matrix.shape[0] + + if self.args.output_mode == "classification": + if (example.label is None): + label_id = [0] + score = [0] + else: + label_id = [example.label] #[self.label_map[l] for l in example.label] + score = example.score + elif self.args.output_mode == "regression": + if len(example.label) == 0: + label_id = 0 + else: + label_id = float(example.label) + else: + raise KeyError(self.args.output_mode) + + if self.args.img_feature_type in ['dis_code', 'dis_code_t']: + img_feat = img_feat.type(torch.long) + elif self.args.img_feature_type in ['dis_code_ln']: + #img_feat = img_feat.reshape(-1, img_feat.shape[0]) + img_feat = img_feat.type(torch.float) + + return (torch.tensor(input_ids, dtype=torch.long), + torch.tensor(input_mask, dtype=torch.long), + torch.tensor(segment_ids, dtype=torch.long), + torch.tensor([label_id[0]], dtype=torch.long), + img_feat, + torch.tensor([example.q_id], dtype=torch.long)) + + def tensorize_example_pair(self, example, cls_token_at_end=False, pad_on_left=False, + cls_token='[CLS]', sep_token='[SEP]', pad_token=0, + sequence_a_segment_id=0, sequence_b_segment_id=1, + cls_token_segment_id=1, pad_token_segment_id=0, + mask_padding_with_zero=True): + + tokens_a = self.tokenizer.tokenize(example.text_a) + + choices = [] + for choice_key in example.img_key: + tokens_b = None + + if example.text_b: + #tokens_b = self.tokenizer.tokenize(example.text_b[choice_key]) + # Modifies `tokens_a` and `tokens_b` in place so that the total length is less than the specified length. + # Account for [CLS], [SEP], [SEP] with "- 3" + #_truncate_seq_pair(tokens_a, tokens_b, self.args.max_seq_length - 3) + + tokens_b = self.tokenizer.tokenize(example.text_b[choice_key]) + _truncate_seq_pair(tokens_a, tokens_b, self.args.max_seq_length - 3) + else: + # Account for [CLS] and [SEP] with "- 2" + if len(tokens_a) > self.args.max_seq_length - 2: + tokens_a = tokens_a[:(self.args.max_seq_length - 2)] + + tokens = tokens_a + [sep_token] + segment_ids = [sequence_a_segment_id] * len(tokens) + + if tokens_b: + tokens += tokens_b + [sep_token] + segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1) + + if cls_token_at_end: + tokens = tokens + [cls_token] + segment_ids = segment_ids + [cls_token_segment_id] + else: + tokens = [cls_token] + tokens + segment_ids = [cls_token_segment_id] + segment_ids + + input_ids = self.tokenizer.convert_tokens_to_ids(tokens) + + # The mask has 1 for real tokens and 0 for padding tokens. Only real tokens are attended to. + input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) + + # Zero-pad up to the sequence length. + padding_length = self.args.max_seq_length - len(input_ids) + if pad_on_left: + input_ids = ([pad_token] * padding_length) + input_ids + input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask + segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids + else: + input_ids = input_ids + ([pad_token] * padding_length) + input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length) + segment_ids = segment_ids + ([pad_token_segment_id] * padding_length) + + assert len(input_ids) == self.args.max_seq_length + assert len(input_mask) == self.args.max_seq_length + assert len(segment_ids) == self.args.max_seq_length + + # img + img_key = example.img_key[choice_key] + img_feat = self.img_features[img_key] + + if img_feat.shape[0] > self.args.max_img_seq_length: + img_feat = img_feat[0: self.args.max_img_seq_length, ] + if self.args.max_img_seq_length > 0: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + # segment_ids += [sequence_b_segment_id] * img_feat.shape[0] + else: + if self.args.max_img_seq_length > 0: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + # segment_ids = segment_ids + [sequence_b_segment_id] * img_feat.shape[0] + padding_matrix = torch.zeros((self.args.max_img_seq_length - img_feat.shape[0], img_feat.shape[1])) + img_feat = torch.cat((img_feat, padding_matrix), 0) + if self.args.max_img_seq_length > 0: + input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_matrix.shape[0]) + # segment_ids = segment_ids + [pad_token_segment_id] * padding_matrix.shape[0] + + choices.append((tokens, input_ids, input_mask, segment_ids, img_feat)) + + if self.args.output_mode == "classification": + if example.label is None: + label_id = [0] + else: + label_id = [example.label] #[self.label_map[l] for l in example.label] + elif self.args.output_mode == "regression": + if len(example.label) == 0: + label_id = 0 + else: + label_id = float(example.label) + else: + raise KeyError(self.args.output_mode) + + choice_input_ids = [choice[1] for choice in choices] + choice_input_mask = [choice[2] for choice in choices] + choice_input_segs = [choice[3] for choice in choices] + choice_input_imgs = [choice[4] for choice in choices] + + choice_img_feats = torch.stack(choice_input_imgs) + + return (torch.tensor(choice_input_ids, dtype=torch.long), + torch.tensor(choice_input_mask, dtype=torch.long), + torch.tensor(choice_input_segs, dtype=torch.long), + torch.tensor(label_id[0], dtype=torch.long), + choice_img_feats, + torch.tensor([example.q_id], dtype=torch.long)) + + def __getitem__(self, index): + entry = self.examples[index] + if self.args.use_pair: + example = self.tensorize_example_pair(entry, + cls_token_at_end=bool(self.args.model_type in ['xlnet']), # xlnet has a cls token at the end + cls_token=self.tokenizer.cls_token, + sep_token=self.tokenizer.sep_token, + cls_token_segment_id=2 if self.args.model_type in ['xlnet'] else 0, + pad_on_left=bool(self.args.model_type in ['xlnet']), # pad on the left for xlnet + pad_token_segment_id=4 if self.args.model_type in ['xlnet'] else 0) + else: + example = self.tensorize_example(entry, + cls_token_at_end=bool(self.args.model_type in ['xlnet']), # xlnet has a cls token at the end + cls_token=self.tokenizer.cls_token, + sep_token=self.tokenizer.sep_token, + cls_token_segment_id=2 if self.args.model_type in ['xlnet'] else 0, + pad_on_left=bool(self.args.model_type in ['xlnet']), # pad on the left for xlnet + pad_token_segment_id=4 if self.args.model_type in ['xlnet'] else 0) + return example + + def __len__(self): + return len(self.examples) + + +def set_seed(args): + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + if args.n_gpu > 0: + torch.cuda.manual_seed_all(args.seed) + +def train(args, train_dataset, eval_dataset, model, tokenizer): + """ Train the model """ + + args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) + train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) + train_dataloader = DataLoader(train_dataset, num_workers=args.workers, sampler=train_sampler, batch_size=args.train_batch_size) + + if args.max_steps > 0: + t_total = args.max_steps + args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 + else: + t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs + + # Prepare optimizer and schedule (linear warmup and decay) + no_decay = ['bias', 'LayerNorm.weight'] + optimizer_grouped_parameters = [ + {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, + {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} + ] + if args.optim == 'AdamW': + optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) + elif args.optim == 'Adamax': + optimizer = Adamax(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) + scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total) + + if args.fp16: + try: + from apex import amp + except ImportError: + raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") + model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) + + # multi-gpu training (should be after apex fp16 initialization) + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + # Distributed training (should be after apex fp16 initialization) + if args.local_rank != -1: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) + + # Train! + logger.info("***** Running training *****") + logger.info(" Num examples = %d", len(train_dataset)) + logger.info(" Num Epochs = %d", args.num_train_epochs) + logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) + logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", + args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) + logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) + logger.info(" Total optimization steps = %d", t_total) + + global_step = 0 + tr_loss, logging_loss = 0.0, 0.0 + model.zero_grad() + #train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) + set_seed(args) # Added here for reproductibility (even between python 2 and 3) + + best_score = 0 + best_model = { + 'epoch': 0, + 'model_state': model.state_dict(), + 'optimizer_state': optimizer.state_dict() + } + + for epoch in range(int(args.num_train_epochs)): + #for epoch in train_iterator: + #epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) + total_loss = 0 + total_norm = 0 + count_norm = 0 + + t_start = time.time() + for step, batch in enumerate(train_dataloader): + #for step, batch in enumerate(epoch_iterator): + model.train() + batch = tuple(t.to(args.device) for t in batch) + inputs = {'input_ids': batch[0], + 'attention_mask': batch[1], + 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids + 'labels': batch[3], + 'img_feats': None if args.img_feature_dim == -1 else batch[4]} + outputs = model(**inputs) + + #loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc) + loss, logits = outputs[:2] + + if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training + + if args.gradient_accumulation_steps > 1: + loss = loss / args.gradient_accumulation_steps + + if args.fp16: + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) + else: + loss.backward() + total_norm += torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) + count_norm += 1 + + #batch_score = compute_score_with_logits(logits, batch[4]).sum() + #train_score += batch_score.item() + + tr_loss += loss.item() + total_loss += loss.item() + if (step + 1) % args.gradient_accumulation_steps == 0: + scheduler.step() # Update learning rate schedule + optimizer.step() + model.zero_grad() + global_step += 1 + + if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:# Log metrics + if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well + logger.info("Epoch: %d, global_step: %d" % (epoch, global_step)) + eval_result, eval_score = evaluate(args, model, eval_dataset, prefix=global_step) + if eval_score > best_score: + best_score = eval_score + best_model['epoch'] = epoch + best_model['model'] = copy.deepcopy(model) + + logger.info("EVALERR: {}%".format(100 * best_score)) + logging_loss = tr_loss + + #if args.max_steps > 0 and global_step > args.max_steps: + # epoch_iterator.close() + # break + + t_end = time.time() + logger.info('Train Time Cost: %.3f' % (t_end-t_start)) + + # evaluation + logger.info("Epoch: %d" % (epoch)) + eval_result, eval_score = evaluate(args, model, eval_dataset, prefix=global_step) + if eval_score > best_score: + best_score = eval_score + best_model['epoch'] = epoch + best_model['model'] = copy.deepcopy(model) + #best_model['optimizer'] = copy.deepcopy(optimizer.state_dict()) + + # save checkpoints + if args.local_rank in [-1, 0] and args.save_epoch > 0 and epoch % args.save_epoch == 0: # Save model checkpoint + output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(epoch)) + if not os.path.exists(output_dir): os.makedirs(output_dir) + model_to_save = best_model['model'].module if hasattr(model, 'module') else best_model['model'] # Take care of distributed/parallel training + + save_num = 0 + while (save_num < 10): + try: + model_to_save.save_pretrained(output_dir) + torch.save(args, os.path.join(output_dir, 'training_args.bin')) + tokenizer.save_pretrained(output_dir) + break + except: + save_num += 1 + logger.info("Saving model checkpoint {0} to {1}".format(epoch, output_dir)) + + epoch_log = {'epoch': epoch, 'eval_score': eval_score, 'best_score':best_score} + log_json.append(epoch_log) + + with open(args.output_dir + '/eval_logs.json', 'w') as fp: + json.dump(log_json, fp) + + logger.info("PROGRESS: {}%".format(round(100*(epoch + 1) / args.num_train_epochs, 4))) + logger.info("EVALERR: {}%".format(100*best_score)) + logger.info("LOSS: {}%".format(total_loss / len(train_dataset))) + + with open(args.output_dir + '/eval_logs.json', 'w') as fp: + json.dump(log_json, fp) + + if args.local_rank in [-1, 0]: # Save the final model checkpoint + output_dir = os.path.join(args.output_dir, 'best-{}'.format(best_model['epoch'])) + if not os.path.exists(output_dir): os.makedirs(output_dir) + model_to_save = best_model['model'].module if hasattr(model, 'module') else best_model['model'] # Take care of distributed/parallel training + + save_num = 0 + while (save_num < 10): + try: + model_to_save.save_pretrained(output_dir) + torch.save(args, os.path.join(output_dir, 'training_args.bin')) + tokenizer.save_pretrained(output_dir) + break + except: + save_num += 1 + logger.info("Saving the best model checkpoint epoch {} to {}".format(best_model['epoch'], output_dir)) + + return global_step, tr_loss / global_step + +def evaluate(args, model, eval_dataset=None, prefix=""): + # Loop to handle MNLI double evaluation (matched, mis-matched) + eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,) + eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == "mnli" else (args.output_dir,) + + #if args.n_gpu > 1: model = torch.nn.DataParallel(model) # debug: single-gpu or multi-gpus + + results = [] + t_start = time.time() + for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): + if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) + + args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + # Note that DistributedSampler samples randomly + eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) + eval_dataloader = DataLoader(eval_dataset, num_workers=args.workers, sampler=eval_sampler, batch_size=args.eval_batch_size) + + # Eval! + logger.info("***** Running evaluation {} *****".format(prefix)) + logger.info(" Num examples = %d", len(eval_dataset)) + logger.info(" Batch size = %d", args.eval_batch_size) + eval_loss = 0.0 + nb_eval_steps = 0 + num_data = 0 + correct_num = 0 + + for batch in eval_dataloader: + #for batch in tqdm(eval_dataloader, desc="Evaluating"): + model.eval() + batch = tuple(t.to(args.device) for t in batch) + + with torch.no_grad(): + inputs = {'input_ids': batch[0], + 'attention_mask': batch[1], + 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids + 'labels': batch[3], + 'img_feats': None if args.img_feature_dim == -1 else batch[4]} + outputs = model(**inputs) + tmp_eval_loss, logits = outputs[:2] + eval_loss += tmp_eval_loss.mean().item() + num_data += logits.size(0) + + #logger.info('logits: {}, batch[3]: {}'.format(logits.shape, batch[3].shape)) + + val, idx = logits.max(1) + batch_acc = torch.sum(idx == batch[3].view(-1)).item() + #logger.info('idx: {}, batch[3].view(-1):{}, batch_acc: {}'.format(idx.shape, batch[3].view(-1).shape, batch_acc)) + correct_num += batch_acc + + # correct = logits.argmax(1) == batch[3].view(-1) + # correct_num += correct.sum().item() + + nb_eval_steps += 1 + + acc = float(correct_num) / len(eval_dataloader.dataset) + + logger.info("Eval Results:") + logger.info("Eval Accuracy: {}".format(100*acc)) + logger.info("EVALERR: {}%".format(100 * acc)) + logger.info("Eval Loss: %.3f" % (eval_loss)) + + t_end = time.time() + logger.info('Eva Time Cost: %.3f' % (t_end - t_start)) + + return results, acc + +def test(args, model, eval_dataset=None, prefix=""): + # Loop to handle MNLI double evaluation (matched, mis-matched) + eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,) + eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == "mnli" else (args.output_dir,) + + label2ans = cPickle.load(open(args.label2ans_file, 'rb')) + logger.info('label2ans: %d' % (len(label2ans))) + + results = [] + t_start = time.time() + for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): + if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) + + args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + # Note that DistributedSampler samples randomly + eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) + eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) + + # Eval + logger.info("***** Running Test {} *****".format(prefix)) + logger.info(" Num examples = %d", len(eval_dataset)) + logger.info(" Batch size = %d", args.eval_batch_size) + + for batch in eval_dataloader: + #for batch in tqdm(eval_dataloader, desc="Evaluating"): + model.eval() + batch = tuple(t.to(args.device) for t in batch) + + with torch.no_grad(): + inputs = {'input_ids': batch[0], + 'attention_mask': batch[1], + 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids + 'labels': None, + 'img_feats': None if args.img_feature_dim == -1 else batch[5]} + outputs = model(**inputs) + logits = outputs[0] + + val, idx = logits.max(1) + #logger.info('idx: %s, batch[6]: %s' % (str(idx.shape), str(batch[6].shape))) + + for i in range(idx.size(0)): + result = {} + result['questionId'] = str(batch[6][i].item()) + result['prediction'] = label2ans[eval_dataset.labels[idx[i].item()]] + results.append(result) + + #logger.info('q_id: {0}, answer: {1}'.format(result['question_id'], result['answer'])) + + with open(args.output_dir + ('/{}_results.json'.format(eval_dataset.name)), 'w') as fp: + json.dump(results, fp) + + t_end = time.time() + logger.info('# questions: %d' % (len(results))) + logger.info('Test Time Cost: %.3f' % (t_end - t_start)) + + + +def main(): + parser = argparse.ArgumentParser() + + ## Required parameters + parser.add_argument("--data_dir", default=None, type=str, required=True, + help="The input data dir. Should contain the .tsv files (or other data files) for the task.") + parser.add_argument("--model_type", default=None, type=str, required=True, + help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) + parser.add_argument("--model_name_or_path", default=None, type=str, required=True, + help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS)) + parser.add_argument("--task_name", default=None, type=str, required=True, + help="The name of the task to train selected in the list: " + ", ".join(processors.keys())) + parser.add_argument("--output_dir", default=None, type=str, required=True, + help="The output directory where the model predictions and checkpoints will be written.") + + parser.add_argument("--data_label_type", default='bal', type=str, help="bal or all") + parser.add_argument("--train_data_type", default='bal', type=str, help="bal or all") + parser.add_argument("--eval_data_type", default='bal', type=str, help="bal or all") + parser.add_argument("--loss_type", default='kl', type=str, help="kl or xe") + parser.add_argument("--use_layernorm", action='store_true', help="use_layernorm") + parser.add_argument("--use_label_seq", action='store_true', help="use_label_seq") + parser.add_argument("--use_pair", action='store_true', help="use_pair") + parser.add_argument("--num_choice", default=2, type=int, help="num_choice") + + ## Other parameters + parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") + parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") + parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") + parser.add_argument("--max_seq_length", default=128, type=int, + help="The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.") + parser.add_argument("--do_train", action='store_true', help="Whether to run training.") + parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") + parser.add_argument("--do_test", action='store_true', help="Whether to run test on the test set.") + parser.add_argument("--evaluate_during_training", action='store_true', help="Rul evaluation during training at each logging step.") + parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") + + parser.add_argument("--drop_out", default=0.1, type=float, help="Drop out for BERT.") + parser.add_argument("--classifier", default='linear', type=str, help="linear or mlp") + parser.add_argument("--cls_hidden_scale", default=2, type=int, help="cls_hidden_scale: for classifier") + + parser.add_argument("--max_img_seq_length", default=30, type=int, help="The maximum total input image sequence length.") + parser.add_argument("--img_feature_dim", default=2054, type=int, help="The Image Feature Dimension.") + parser.add_argument("--img_feature_type", default='faster_r-cnn', type=str, help="faster_r-cnn or mask_r-cnn") + parser.add_argument("--code_voc", default=512, type=int, help="dis_code_voc: 256, 512") + parser.add_argument("--code_level", default='top', type=str, help="code level: top, botttom, both") + + parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") + parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") + parser.add_argument('--gradient_accumulation_steps', type=int, default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.") + parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") + parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") + parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") + parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") + parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") + parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.") + parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") + parser.add_argument("--optim", default='AdamW', type=str, help="optim: AdamW, Adamax") + + parser.add_argument('--logging_steps', type=int, default=50, help="Log every X updates steps.") + parser.add_argument('--save_steps', type=int, default=50, help="Save checkpoint every X updates steps.") + parser.add_argument('--save_epoch', type=int, default=5, help="Save checkpoint every X epochs.") + parser.add_argument("--eval_all_checkpoints", action='store_true', + help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") + parser.add_argument("--no_cuda", action='store_true', help="Avoid using CUDA when available") + parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") + parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") + parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") + + parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") + parser.add_argument('--fp16_opt_level', type=str, default='O1', + help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." + "See details at https://nvidia.github.io/apex/amp.html") + parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") + parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.") + parser.add_argument('--server_port', type=str, default='', help="For distant debugging.") + + parser.add_argument("--philly", action='store_true', help="Use Philly: reset the output dir") + parser.add_argument('-j', '--workers', default=0, type=int, metavar='N', help='number of data loading workers (default: 4)') + + args = parser.parse_args() + + if args.philly: # use philly + logger.info('Info: Use Philly, all the output folders are reset.') + args.output_dir = os.path.join(os.getenv('PT_OUTPUT_DIR'), args.output_dir) + logger.info('OUTPUT_DIR:', args.output_dir) + + if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train: logger.info("Output Directory Exists.") + + # Setup distant debugging if needed + if args.server_ip and args.server_port: + # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script + import ptvsd + print("Waiting for debugger attach") + ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) + ptvsd.wait_for_attach() + + # Setup CUDA, GPU & distributed training + if args.local_rank == -1 or args.no_cuda: + device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") + args.n_gpu = torch.cuda.device_count() + else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs + torch.cuda.set_device(args.local_rank) + device = torch.device("cuda", args.local_rank) + torch.distributed.init_process_group(backend='nccl') + args.n_gpu = 1 + args.device = device + + # Setup logging + logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', + datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) + logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", + args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) + + # Set seed + set_seed(args) + + # Prepare GLUE task + args.task_name = args.task_name.lower() + if args.task_name not in processors: + raise ValueError("Task not found: %s" % (args.task_name)) + + processor = processors[args.task_name]() + args.output_mode = output_modes[args.task_name] + label_list = processor.get_labels() + num_labels = len(label_list) + logger.info('Task Name: {}, #Labels: {}'.format(args.task_name, num_labels)) + + # Load pretrained model and tokenizer + if args.local_rank not in [-1, 0]: + torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab + + args.model_type = args.model_type.lower() + config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] + if args.use_pair: + model_class = ImageBertForMultipleChoice + config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name) + tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case) + + # new config: discrete code + config.img_feature_dim = args.img_feature_dim + config.img_feature_type = args.img_feature_type + config.code_voc = args.code_voc + config.hidden_dropout_prob = args.drop_out + config.loss_type = args.loss_type + config.use_layernorm = args.use_layernorm + config.classifier = args.classifier + config.cls_hidden_scale = args.cls_hidden_scale + config.num_choice = args.num_choice + + model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config) + + if args.local_rank == 0: + torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab + + total_params = sum(p.numel() for p in model.parameters()) + logger.info('Model Parameters: {}'.format(total_params)) + + model.to(args.device) + + logger.info("Training/Evaluation parameters %s", args) + + # load image features + img_features = _load_img_features(args) + + #if args.do_eval: + eval_dataset = NLVRDataset(args, 'val', img_features, tokenizer) + + if args.do_test: + test_dataset = NLVRDataset(args, 'test1', img_features, tokenizer) + + # Training + if args.do_train: + train_dataset = NLVRDataset(args, 'train', img_features, tokenizer) + global_step, tr_loss = train(args, train_dataset, eval_dataset, model, tokenizer) + logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) + + # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() + if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): + # Create output directory if needed + if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) + + logger.info("Saving model checkpoint to %s", args.output_dir) + # Save a trained model, configuration and tokenizer using `save_pretrained()`. They can then be reloaded using `from_pretrained()` + #model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training + #model_to_save.save_pretrained(args.output_dir) + + tokenizer.save_pretrained(args.output_dir) + + # Good practice: save your training arguments together with the trained model + torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) + + # Load a trained model and vocabulary that you have fine-tuned + #model = model_class.from_pretrained(args.output_dir) + #tokenizer = tokenizer_class.from_pretrained(args.output_dir) + #model.to(args.device) + + + # Evaluation + #results = {} + if args.do_eval and args.local_rank in [-1, 0]: + checkpoints = [args.output_dir] + if args.eval_all_checkpoints: + checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) + logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging + logger.info("Evaluate the following checkpoints: %s", checkpoints) + + for checkpoint in checkpoints: + global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" + model = model_class.from_pretrained(checkpoint, config=config) + model.to(args.device) + result, score = evaluate(args, model, eval_dataset, prefix=global_step) + #result = dict((k + '_{}'.format(global_step), v) for k, v in result.items()) + #results.update(result) + + # Test-Submission + if args.do_test and args.local_rank in [-1, 0]: + checkpoints = [args.output_dir] + if args.eval_all_checkpoints: + checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) + logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging + logger.info("Evaluate the following checkpoints: %s", checkpoints) + + for checkpoint in checkpoints: + global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" + model = model_class.from_pretrained(checkpoint) + model.to(args.device) + #test(args, model, test_dataset, prefix=global_step) + result, score = evaluate(args, model, test_dataset, prefix=global_step) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/oscar/run_retrieval.py b/oscar/run_retrieval.py new file mode 100644 index 0000000..d2bbf9b --- /dev/null +++ b/oscar/run_retrieval.py @@ -0,0 +1,623 @@ +# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license. + +from __future__ import absolute_import, division, print_function +import argparse +import os +import os.path as op +import random, json +import numpy as np +import torch +import torch.nn as nn +from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler +from tqdm import tqdm + +from oscar.utils.logger import setup_logger +from oscar.utils.misc import mkdir, set_seed +from oscar.modeling.modeling_bert import ImageBertForSequenceClassification +from transformers.pytorch_transformers import BertTokenizer, BertConfig +from transformers.pytorch_transformers import AdamW, WarmupLinearSchedule, WarmupConstantSchedule + + +class RetrievalDataset(Dataset): + """ Image/Text Retrieval Dataset""" + def __init__(self, tokenizer, args, split='train', is_train=True): + """ + tokenizer: tokenizer to process caption text. + args: configureation parameters including max_seq_length, etc. + split: used to infer the data used for training or testing. + All files are in .pt format of a dictionary with image keys and + image features (pytorch tensors), captions (list of str, support multiple + captions per image), labels (list of dictionary or str of all labels), + + """ + super(RetrievalDataset, self).__init__() + feature_file = op.join(args.data_dir, '{}_img_{}_feats.pt'.format(split, args.img_feature_type)) + caption_file = op.join(args.data_dir, '{}_captions.pt'.format(split)) + self.features = torch.load(feature_file) + self.captions = torch.load(caption_file) + self.img_keys = list(self.features.keys()) + if not type(self.captions[self.img_keys[0]]) == list: + self.captions = {k: json.loads(self.captions[k]) for k in self.img_keys} + assert len(self.features) == len(self.captions), \ + "the length of image features and captions does not match!" + + if args.add_od_labels: + label_file = op.join(args.data_dir, '{}_{}_labels.pt'.format(split, args.od_label_type)) + self.labels = torch.load(label_file) + + if is_train: + self.num_captions_per_img = args.num_captions_per_img_train + else: + self.num_captions_per_img = args.num_captions_per_img_val + if args.eval_img_keys_file: + # select a subset of image keys for evaluation. eg. COCO 1k and 5k + # eval_img_keys_file is a list of image keys saved in tsv file + with open(op.join(args.data_dir, args.eval_img_keys_file), 'r') as f: + img_keys = f.readlines() + self.img_keys = [int(k.strip()) for k in img_keys] + self.features = {k: self.features[k] for k in self.img_keys} + self.captions = {k: self.captions[k] for k in self.img_keys} + if args.add_od_labels: + self.labels = {k: self.labels[k] for k in self.img_keys} + + if args.eval_caption_index_file: + # hard negative image/caption indexs for retrieval re-rank setting. + # useful for mini val set to monitor the performance during training. + # However, it cannot be used together with cross image evaluation. + self.has_caption_indexs = True + assert not args.cross_image_eval + caption_index_file = op.join(args.data_dir, args.eval_caption_index_file) + self.caption_indexs = torch.load(caption_index_file) + if not type(self.caption_indexs[self.img_keys[0]]) == list: + self.caption_indexs = {k: json.loads(self.caption_indexs[k]) for k in self.img_keys} + else: + self.has_caption_indexs = False + self.is_train = is_train + self.output_mode = args.output_mode + self.tokenizer = tokenizer + self.max_seq_len = args.max_seq_length + self.max_img_seq_len = args.max_img_seq_length + self.args = args + + def get_image_caption_index(self, index): + # return img_idx to access features and [img_key, cap_idx] to access caption + if not self.is_train and self.args.cross_image_eval: + img_idx = index // (self.num_captions_per_img * len(self.img_keys)) + cap_idx = index % (self.num_captions_per_img * len(self.img_keys)) + img_idx1 = cap_idx // self.num_captions_per_img + cap_idx1 = cap_idx % self.num_captions_per_img + return img_idx, [self.img_keys[img_idx1], cap_idx1] + if not self.is_train and self.has_caption_indexs: + img_idx = index // self.num_captions_per_img + cap_idx = index % self.num_captions_per_img + img_key1, cap_idx1 = self.caption_indexs[self.img_keys[img_idx]][cap_idx] + return img_idx, [img_key1, cap_idx1] + img_idx = index // self.num_captions_per_img + cap_idx = index % self.num_captions_per_img + return img_idx, [self.img_keys[img_idx], cap_idx] + + def get_label(self, index): + img_idx, cap_idx = self.get_image_caption_index(index) + return 1 if self.img_keys[img_idx] == cap_idx[0] else 0 + + def get_od_labels(self, img_key): + if self.args.add_od_labels: + if type(self.labels[img_key]) == str: + od_labels = self.labels[img_key] + else: + od_labels = ' '.join([l['class'] for l in self.labels[img_key]]) + return od_labels + + def tensorize_example(self, text_a, img_feat, text_b=None, + cls_token_segment_id=0, pad_token_segment_id=0, + sequence_a_segment_id=0, sequence_b_segment_id=1): + tokens_a = self.tokenizer.tokenize(text_a) + if len(tokens_a) > self.args.max_seq_length - 2: + tokens_a = tokens_a[:(self.args.max_seq_length - 2)] + + tokens = [self.tokenizer.cls_token] + tokens_a + [self.tokenizer.sep_token] + segment_ids = [cls_token_segment_id] + [sequence_a_segment_id] * (len(tokens_a) + 1) + seq_a_len = len(tokens) + if text_b: + tokens_b = self.tokenizer.tokenize(text_b) + if len(tokens_b) > self.max_seq_len - len(tokens) - 1: + tokens_b = tokens_b[: (self.max_seq_len - len(tokens) - 1)] + tokens += tokens_b + [self.tokenizer.sep_token] + segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1) + + seq_len = len(tokens) + seq_padding_len = self.max_seq_len - seq_len + tokens += [self.tokenizer.pad_token] * seq_padding_len + segment_ids += [pad_token_segment_id] * seq_padding_len + input_ids = self.tokenizer.convert_tokens_to_ids(tokens) + + # image features + img_len = img_feat.shape[0] + if img_len > self.max_img_seq_len: + img_feat = img_feat[0 : self.max_img_seq_len, :] + img_len = img_feat.shape[0] + img_padding_len = 0 + else: + img_padding_len = self.max_img_seq_len - img_len + padding_matrix = torch.zeros((img_padding_len, img_feat.shape[1])) + img_feat = torch.cat((img_feat, padding_matrix), 0) + + # generate attention_mask + att_mask_type = self.args.att_mask_type + if att_mask_type == "CLR": + attention_mask = [1] * seq_len + [0] * seq_padding_len + \ + [1] * img_len + [0] * img_padding_len + else: + # use 2D mask to represent the attention + max_len = self.max_seq_len + self.max_img_seq_len + attention_mask = torch.zeros((max_len, max_len), dtype=torch.long) + # full attention of C-C, L-L, R-R + c_start, c_end = 0, seq_a_len + l_start, l_end = seq_a_len, seq_len + r_start, r_end = self.max_seq_len, self.max_seq_len + img_len + attention_mask[c_start : c_end, c_start : c_end] = 1 + attention_mask[l_start : l_end, l_start : l_end] = 1 + attention_mask[r_start : r_end, r_start : r_end] = 1 + if att_mask_type == 'CL': + attention_mask[c_start : c_end, l_start : l_end] = 1 + attention_mask[l_start : l_end, c_start : c_end] = 1 + elif att_mask_type == 'CR': + attention_mask[c_start : c_end, r_start : r_end] = 1 + attention_mask[r_start : r_end, c_start : c_end] = 1 + elif att_mask_type == 'LR': + attention_mask[l_start : l_end, r_start : r_end] = 1 + attention_mask[r_start : r_end, l_start : l_end] = 1 + else: + raise ValueError("Unsupported attention mask type {}".format(att_mask_type)) + + input_ids = torch.tensor(input_ids, dtype=torch.long) + attention_mask = torch.tensor(attention_mask, dtype=torch.long) + segment_ids = torch.tensor(segment_ids, dtype=torch.long) + return (input_ids, attention_mask, segment_ids, img_feat) + + def __getitem__(self, index): + if self.is_train: + img_idx, cap_idxs = self.get_image_caption_index(index) + img_key = self.img_keys[img_idx] + feature = self.features[img_key] + caption = self.captions[cap_idxs[0]][cap_idxs[1]] + od_labels = self.get_od_labels(img_key) + example = self.tensorize_example(caption, feature, text_b=od_labels) + + # select a negative pair + neg_img_indexs = list(range(0, img_idx)) + list(range(img_idx + 1, len(self.img_keys))) + img_idx_neg = random.choice(neg_img_indexs) + if random.random() <= 0.5: + # randomly select a negative caption from a different image. + cap_idx_neg = random.randint(0, self.num_captions_per_img - 1) + caption_neg = self.captions[self.img_keys[img_idx_neg]][cap_idx_neg] + example_neg = self.tensorize_example(caption_neg, feature, text_b=od_labels) + else: + # randomly select a negative image + feature_neg = self.features[self.img_keys[img_idx_neg]] + od_labels_neg = self.get_od_labels(self.img_keys[img_idx_neg]) + example_neg = self.tensorize_example(caption, feature_neg, text_b=od_labels_neg) + + example_pair = tuple(list(example) + [1] + list(example_neg) + [0]) + return index, example_pair + else: + img_idx, cap_idxs = self.get_image_caption_index(index) + img_key = self.img_keys[img_idx] + feature = self.features[img_key] + caption = self.captions[cap_idxs[0]][cap_idxs[1]] + od_labels = self.get_od_labels(img_key) + example = self.tensorize_example(caption, feature, text_b=od_labels) + label = 1 if img_key == cap_idxs[0] else 0 + return index, tuple(list(example) + [label]) + + def __len__(self): + if not self.is_train and self.args.cross_image_eval: + return len(self.img_keys) ** 2 * self.num_captions_per_img + return len(self.img_keys) * self.num_captions_per_img + + +def compute_score_with_logits(logits, labels): + if logits.shape[1] > 1: + logits = torch.max(logits, 1)[1].data # argmax + scores = logits == labels + else: + scores = torch.zeros_like(labels).cuda() + for i, (logit, label) in enumerate(zip(logits, labels)): + logit_ = torch.sigmoid(logit) + if (logit_ >= 0.5 and label == 1) or (logit_ < 0.5 and label == 0): + scores[i] = 1 + return scores + + +def compute_ranks(dataset, results): + labels = np.array([dataset.get_label(i) for i in range(len(dataset))]) + similarities = np.array([results[i] for i in range(len(dataset))]) + if dataset.has_caption_indexs: + num_captions_per_img = dataset.num_captions_per_img + else: + num_captions_per_img = len(dataset.img_keys) * dataset.num_captions_per_img + labels = np.reshape(labels, [-1, num_captions_per_img]) + similarities = np.reshape(similarities, [-1, num_captions_per_img]) + i2t_ranks, t2i_ranks = [], [] + for lab, sim in zip(labels, similarities): + inds = np.argsort(sim)[::-1] + rank = num_captions_per_img + for r, ind in enumerate(inds): + if lab[ind] == 1: + rank = r + break + i2t_ranks.append(rank) + if not dataset.has_caption_indexs: + labels = np.swapaxes(labels, 0, 1) + similarities = np.swapaxes(similarities, 0, 1) + for lab, sim in zip(labels, similarities): + inds = np.argsort(sim)[::-1] + rank = num_captions_per_img + for r, ind in enumerate(inds): + if lab[ind] == 1: + rank = r + break + t2i_ranks.append(rank) + return i2t_ranks, t2i_ranks + + +def save_checkpoint(model, tokenizer, args, epoch, global_step): + checkpoint_dir = op.join(args.output_dir, 'checkpoint-{}-{}'.format( + epoch, global_step)) + mkdir(checkpoint_dir) + model_to_save = model.module if hasattr(model, 'module') else model + save_num = 0 + while (save_num < 10): + try: + model_to_save.save_pretrained(checkpoint_dir) + torch.save(args, op.join(checkpoint_dir, 'training_args.bin')) + tokenizer.save_pretrained(checkpoint_dir) + logger.info("Save checkpoint to {}".format(checkpoint_dir)) + break + except: + save_num += 1 + if save_num == 10: + logger.info("Failed to save checkpoint after 10 trails.") + return + + +def train(args, train_dataset, val_dataset, model, tokenizer): + args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) + train_sampler = RandomSampler(train_dataset) + train_dataloader = DataLoader(train_dataset, sampler=train_sampler, + batch_size=args.train_batch_size, num_workers=args.num_workers) + + if args.max_steps > 0: + t_total = args.max_steps + args.num_train_epochs = args.max_steps // (len(train_dataloader) // \ + args.gradient_accumulation_steps) + 1 + else: + t_total = len(train_dataloader) // args.gradient_accumulation_steps \ + * args.num_train_epochs + + # Prepare optimizer and scheduler + no_decay = ['bias', 'LayerNorm.weight'] + grouped_parameters = [ + {'params': [p for n, p in model.named_parameters() if not \ + any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, + {'params': [p for n, p in model.named_parameters() if \ + any(nd in n for nd in no_decay)], 'weight_decay': 0.0} + ] + optimizer = AdamW(grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) + if args.scheduler == "constant": + scheduler = WarmupConstantSchedule( + optimizer, warmup_steps=args.warmup_steps) + elif args.scheduler == "linear": + scheduler = WarmupLinearSchedule( + optimizer, warmup_steps=args.warmup_steps, t_total=t_total) + else: + raise ValueError("Unknown scheduler type: {}".format(args.scheduler)) + + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + logger.info("***** Running training *****") + logger.info(" Num examples = %d", len(train_dataset)) + logger.info(" Num Epochs = %d", args.num_train_epochs) + logger.info(" Batch size per GPU = %d", args.per_gpu_train_batch_size) + logger.info(" Total train batch size (w. parallel, & accumulation) = %d", + args.train_batch_size * args.gradient_accumulation_steps) + logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) + logger.info(" Total optimization steps = %d", t_total) + + global_step, global_loss, global_acc =0, 0.0, 0.0 + model.zero_grad() + log_json = [] + best_score = 0 + for epoch in range(int(args.num_train_epochs)): + for step, (_, batch) in enumerate(train_dataloader): + model.train() + batch = tuple(t.to(args.device) for t in batch) + inputs = { + 'input_ids': torch.cat((batch[0], batch[5]), dim=0), + 'attention_mask': torch.cat((batch[1], batch[6]), dim=0), + 'token_type_ids': torch.cat((batch[2], batch[7]), dim=0), + 'img_feats': torch.cat((batch[3], batch[8]), dim=0), + 'labels': torch.cat((batch[4], batch[9]), dim=0) + } + outputs = model(**inputs) + loss, logits = outputs[:2] + if args.n_gpu > 1: + loss = loss.mean() # mean() to average on multi-gpu parallel training + if args.gradient_accumulation_steps > 1: + loss = loss / args.gradient_accumulation_steps + loss.backward() + torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) + batch_score = compute_score_with_logits(logits, inputs['labels']).sum() + batch_acc = batch_score.item() / (args.train_batch_size * 2) + global_loss += loss.item() + global_acc += batch_acc + if (step + 1) % args.gradient_accumulation_steps == 0: + global_step += 1 + scheduler.step() + optimizer.step() + model.zero_grad() + if global_step % args.logging_steps == 0: + logger.info("Epoch: {}, global_step: {}, lr: {:.6f}, loss: {:.4f} ({:.4f}), " \ + "score: {:.4f} ({:.4f})".format(epoch, global_step, + optimizer.param_groups[0]["lr"], loss, global_loss / global_step, + batch_acc, global_acc / global_step) + ) + + if (args.save_steps > 0 and global_step % args.save_steps == 0) or \ + global_step == t_total: + save_checkpoint(model, tokenizer, args, epoch, global_step) + # evaluation + if args.evaluate_during_training: + logger.info("Perform evaluation at step: %d" % (global_step)) + test_result = test(args, model, val_dataset) + eval_result = evaluate(val_dataset, test_result) + rank_accs = eval_result['i2t_retrieval'] + if rank_accs['R@1'] > best_score: + best_score = rank_accs['R@1'] + epoch_log = {'epoch': epoch, 'global_step': global_step, + 'R1': rank_accs['R@1'], 'R5': rank_accs['R@5'], + 'R10': rank_accs['R@10'], 'best_R1':best_score} + log_json.append(epoch_log) + with open(args.output_dir + '/eval_logs.json', 'w') as fp: + json.dump(log_json, fp) + return global_step, global_loss / global_step + + +def test(args, model, eval_dataset): + args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + eval_sampler = SequentialSampler(eval_dataset) + eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, + batch_size=args.eval_batch_size, num_workers=args.num_workers) + + logger.info("Num examples = {}".format(len(eval_dataset))) + logger.info("Evaluation batch size = {}".format(args.eval_batch_size)) + model.eval() + results = {} + softmax = nn.Softmax(dim=1) + for indexs, batch in tqdm(eval_dataloader): + batch = tuple(t.to(args.device) for t in batch) + with torch.no_grad(): + inputs = { + 'input_ids': batch[0], + 'attention_mask': batch[1], + 'token_type_ids': batch[2], + 'img_feats': batch[3], + 'labels': batch[4] + } + _, logits = model(**inputs)[:2] + if args.num_labels == 2: + probs = softmax(logits) + result = probs[:, 1] # the confidence to be a matched pair + else: + result = logits + result = [_.to(torch.device("cpu")) for _ in result] + results.update({idx.item(): res.item() for idx, res in zip(indexs, result)}) + return results + + +def evaluate(eval_dataset, test_results): + i2t_ranks, t2i_ranks = compute_ranks(eval_dataset, test_results) + rank = [1, 5, 10] + i2t_accs = [sum([_ < r for _ in i2t_ranks]) / len(i2t_ranks) for r in rank] + logger.info("I2T Retrieval: {:.4f} @ R1, {:.4f} @ R5, {:.4f} @ R10".format( + i2t_accs[0], i2t_accs[1], i2t_accs[2])) + eval_result = {"i2t_retrieval": {"R@1": i2t_accs[0], "R@5": i2t_accs[1], "R@10": i2t_accs[2]}} + if t2i_ranks: + t2i_accs = [sum([_ < r for _ in t2i_ranks]) / len(t2i_ranks) for r in rank] + logger.info("T2I Retrieval: {:.4f} @ R1, {:.4f} @ R5, {:.4f} @ R10".format( + t2i_accs[0], t2i_accs[1], t2i_accs[2])) + eval_result["t2i_retrieval"] = {"R@1": t2i_accs[0], "R@5": t2i_accs[1], "R@10": t2i_accs[2]} + return eval_result + + +def get_predict_file(args): + cc = [] + data = op.basename(op.join(args.data_dir, '')[:-1]) + if data != 'coco_ir': + cc.append(data) + cc.append(args.test_split) + if args.add_od_labels: + cc.append('wlabels{}'.format(args.od_label_type)) + return op.join(args.eval_model_dir, '{}.results.pt'.format('.'.join(cc))) + + +def restore_training_settings(args): + assert not args.do_train and (args.do_test or args.do_eval) + train_args = torch.load(op.join(args.eval_model_dir, 'training_args.bin')) + override_params = ['do_lower_case', 'img_feature_type', 'max_seq_length', + 'max_img_seq_length', 'add_od_labels', 'od_label_type'] + for param in override_params: + if hasattr(train_args, param): + train_v = getattr(train_args, param) + test_v = getattr(args, param) + if train_v != test_v: + logger.warning('Override {} with train args: {} -> {}'.format(param, + test_v, train_v)) + setattr(args, param, train_v) + return args + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--data_dir", default='datasets/coco_ir', type=str, required=False, + help="The input data dir with all required files.") + parser.add_argument("--model_name_or_path", default=None, type=str, required=False, + help="Path to pre-trained model or model type. required for training.") + parser.add_argument("--output_dir", default='output/', type=str, required=False, + help="The output directory to save checkpoint and test results.") + parser.add_argument("--loss_type", default='sfmx', type=str, + help="Loss function types: support kl, sfmx") + parser.add_argument("--config_name", default="", type=str, + help="Pretrained config name or path if not the same as model_name.") + parser.add_argument("--tokenizer_name", default="", type=str, + help="Pretrained tokenizer name or path if not the same as model_name.") + parser.add_argument("--max_seq_length", default=70, type=int, + help="The maximum total input sequence length after tokenization. " + "Sequences longer than this will be truncated, " + "sequences shorter will be padded." + "This number is calculated on COCO dataset" + "If add object detection labels, the suggested length should be 70.") + parser.add_argument("--do_train", action='store_true', help="Whether to run training.") + parser.add_argument("--do_test", action='store_true', help="Whether to run inference.") + parser.add_argument("--do_eval", action='store_true', help="Whether to run performance valuation." + "do not activate if we want to inference on dataset without gt labels.") + parser.add_argument("--test_split", default='test', type=str, help='data split name.') + parser.add_argument("--eval_img_keys_file", default='', type=str, + help="image key tsv to select a subset of images for evaluation. " + "This is useful in 5-folds evaluation. The topn index file is not " + "needed in this case.") + parser.add_argument("--eval_caption_index_file", default='', type=str, + help="index of a list of (img_key, cap_idx) for each image." + "this is used to perform re-rank using hard negative samples." + "useful for validation set to monitor the performance during training.") + parser.add_argument("--cross_image_eval", action='store_true', + help="perform cross image inference, ie. each image with all texts from other images.") + parser.add_argument("--add_od_labels", default=False, action='store_true', + help="Whether to add object detection labels or not.") + parser.add_argument("--od_label_type", default='vg', type=str, + help="label type, support vg, gt, oid") + parser.add_argument("--att_mask_type", default='CLR', type=str, + help="attention mask type, support ['CL', 'CR', 'LR', 'CLR']" + "C: caption, L: labels, R: image regions; CLR is full attention by default." + "CL means attention between caption and labels." + "please pay attention to the order CLR, which is the default concat order.") + parser.add_argument("--do_lower_case", action='store_true', + help="Set this flag if you are using an uncased model.") + parser.add_argument("--drop_out", default=0.1, type=float, help="Drop out in BERT.") + parser.add_argument("--max_img_seq_length", default=50, type=int, + help="The maximum total input image sequence length.") + parser.add_argument("--img_feature_dim", default=2054, type=int, + help="The Image Feature Dimension.") + parser.add_argument("--img_feature_type", default='frcnn', type=str, + help="Image feature type.") + parser.add_argument("--per_gpu_train_batch_size", default=32, type=int, + help="Batch size per GPU/CPU for training.") + parser.add_argument("--per_gpu_eval_batch_size", default=64, type=int, + help="Batch size per GPU/CPU for evaluation.") + parser.add_argument("--output_mode", default='classification', type=str, + help="output mode, support classification or regression.") + parser.add_argument("--num_labels", default=2, type=int, + help="num_labels is 2 for classification and 1 for regression.") + parser.add_argument("--num_captions_per_img_train", default=5, type=int, + help="number of positive matched captions for each training image.") + parser.add_argument("--num_captions_per_img_val", default=5, type=int, + help="number of captions for each testing image.") + parser.add_argument('--gradient_accumulation_steps', type=int, default=1, + help="Number of updates steps to accumulate before backward.") + parser.add_argument("--learning_rate", default=2e-5, type=float, help="The initial lr.") + parser.add_argument("--weight_decay", default=0.05, type=float, help="Weight deay.") + parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam.") + parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") + parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup.") + parser.add_argument("--scheduler", default='linear', type=str, help="constant or linear.") + parser.add_argument("--num_workers", default=4, type=int, help="Workers in dataloader.") + parser.add_argument("--num_train_epochs", default=20, type=int, + help="Total number of training epochs to perform.") + parser.add_argument("--max_steps", default=-1, type=int, + help="Total number of training steps. Override num_train_epochs.") + parser.add_argument('--logging_steps', type=int, default=20, help="Log every X steps.") + parser.add_argument('--save_steps', type=int, default=-1, + help="Save checkpoint every X steps. Will also perform evaluatin.") + parser.add_argument("--evaluate_during_training", action='store_true', + help="Run evaluation during training at each save_steps.") + parser.add_argument("--eval_model_dir", type=str, default='', + help="Model directory for evaluation.") + parser.add_argument("--no_cuda", action='store_true', help="Avoid using CUDA.") + parser.add_argument('--seed', type=int, default=88, help="random seed for initialization.") + args = parser.parse_args() + + global logger + mkdir(args.output_dir) + logger = setup_logger("vlpretrain", args.output_dir, 0) + + args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") + args.n_gpu = torch.cuda.device_count() + set_seed(args.seed, args.n_gpu) + logger.warning("Device: %s, n_gpu: %s", args.device, args.n_gpu) + logger.info('output_mode: {}, #Labels: {}'.format(args.output_mode, args.num_labels)) + + config_class, tokenizer_class = BertConfig, BertTokenizer + model_class = ImageBertForSequenceClassification + if args.do_train: + config = config_class.from_pretrained(args.config_name if args.config_name else \ + args.model_name_or_path, num_labels=args.num_labels, finetuning_task='ir') + tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name \ + else args.model_name_or_path, do_lower_case=args.do_lower_case) + config.img_feature_dim = args.img_feature_dim + config.img_feature_type = args.img_feature_type + config.hidden_dropout_prob = args.drop_out + config.loss_type = args.loss_type + model = model_class.from_pretrained(args.model_name_or_path, + from_tf=bool('.ckpt' in args.model_name_or_path), config=config) + else: + checkpoint = args.eval_model_dir + assert op.isdir(checkpoint) + config = config_class.from_pretrained(checkpoint) + tokenizer = tokenizer_class.from_pretrained(checkpoint) + logger.info("Evaluate the following checkpoint: %s", checkpoint) + model = model_class.from_pretrained(checkpoint, config=config) + + model.to(args.device) + logger.info("Training/evaluation parameters %s", args) + if args.do_train: + train_dataset = RetrievalDataset(tokenizer, args, 'train', is_train=True) + if args.evaluate_during_training: + val_dataset = RetrievalDataset(tokenizer, args, 'minival', is_train=False) + else: + val_dataset = None + global_step, avg_loss = train(args, train_dataset, val_dataset, model, tokenizer) + logger.info("Training done: total_step = %s, avg loss = %s", global_step, avg_loss) + + # inference and evaluation + if args.do_test or args.do_eval: + args = restore_training_settings(args) + test_dataset = RetrievalDataset(tokenizer, args, args.test_split, is_train=False) + checkpoint = args.eval_model_dir + assert op.isdir(checkpoint) + logger.info("Evaluate the following checkpoint: %s", checkpoint) + model = model_class.from_pretrained(checkpoint, config=config) + model.to(args.device) + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + pred_file = get_predict_file(args) + if op.isfile(pred_file): + logger.info("Prediction file exist, skip inference.") + if args.do_eval: + test_result = torch.load(pred_file) + else: + test_result = test(args, model, test_dataset) + torch.save(test_result, pred_file) + logger.info("Prediction results saved to {}.".format(pred_file)) + + if args.do_eval: + eval_result = evaluate(test_dataset, test_result) + result_file = op.splitext(pred_file)[0] + '.eval.json' + with open(result_file, 'w') as f: + json.dump(eval_result, f) + logger.info("Evaluation results saved to {}.".format(result_file)) + + +if __name__ == "__main__": + main() diff --git a/oscar/run_vqa.py b/oscar/run_vqa.py new file mode 100644 index 0000000..cd19ce6 --- /dev/null +++ b/oscar/run_vqa.py @@ -0,0 +1,1222 @@ +# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license. + +from __future__ import absolute_import, division, print_function + +import argparse +import glob +import logging +import os +import copy, time, json +import base64 + +import sys +sys.path.insert(0, '.') + +import numpy as np +import torch +import torch.nn as nn +from torch.utils.data import (Dataset, DataLoader, RandomSampler, SequentialSampler, TensorDataset) +from torch.utils.data.distributed import DistributedSampler +import _pickle as cPickle + +from oscar.modeling.modeling_bert import ImageBertForSequenceClassification +from transformers.pytorch_transformers import WEIGHTS_NAME, BertTokenizer, BertConfig +from transformers.pytorch_transformers import AdamW, WarmupLinearSchedule, WarmupConstantSchedule + +from oscar.utils.misc import set_seed +from oscar.utils.task_utils import (_truncate_seq_pair, convert_examples_to_features_vqa, + output_modes, processors) + +logger = logging.getLogger(__name__) + +MODEL_CLASSES = { + 'bert': (BertConfig, ImageBertForSequenceClassification, BertTokenizer), +} + + +log_json = [] +debug_size = 500 + + +def _load_dataset(args, name): + processor = processors[args.task_name]() + labels = processor.get_labels(args.label_file) + + if name == 'train': + if args.data_label_type == 'mask': + if args.use_vg: + #examples = processor.get_train_examples(args.data_dir, 'train2014_vg_qla_mrcnn.json') + examples = processor.get_train_examples(args.txt_data_dir, 'train2014_vg_qla_mrcnn.json') + else: + examples = processor.get_train_examples(args.txt_data_dir, 'train2014_qla_mrcnn.json') + else: + examples = processor.get_train_examples(args.txt_data_dir, 'train2014_qla.json') + elif name == 'val': + if args.data_label_type == 'mask': + if args.use_vg_dev: + examples = processor.get_dev_examples(args.txt_data_dir, 'vg_qla_mrcnn.json') + else: + examples = processor.get_dev_examples(args.txt_data_dir, 'val2014_qla_mrcnn.json') + else: + examples = processor.get_dev_examples(args.txt_data_dir, 'val2014_qla.json') + elif name == 'train+val': + if args.data_label_type == 'mask': + examples = processor.get_train_examples(args.txt_data_dir, 'train+val2014_qla_mrcnn.json') + #examples = processor.get_train_examples(args.data_dir, 'train+val2014_qla_mrcnn.json') + else: + examples = processor.get_train_examples(args.txt_data_dir, 'train+val2014_qla.json') + elif name == 'test2015': + if args.data_label_type == 'mask': + examples = processor.get_test_examples(args.data_dir, 'test2015_qla_mrcnn.json') + else: + examples = processor.get_test_examples(args.data_dir, 'test2014_qla.json') + elif name == 'test-dev2015': + if args.data_label_type == 'mask': + examples = processor.get_test_examples(args.data_dir, 'test-dev2015_qla_mrcnn.json') + else: + examples = processor.get_test_examples(args.data_dir, 'test2014_qla.json') + + return examples, labels + + +class VQADataset(Dataset): + """ VQA Dataset """ + + def __init__(self, args, name, tokenizer): + super(VQADataset, self).__init__() + assert name in ['train', 'val', 'test-dev2015', 'test2015', 'train+val'] + + self.args = args + self.name = name + + # load image features + t_start = time.time() + self.img_feature_file = None + self.img_feat_offset_map = None + + if args.img_feature_type == 'faster_r-cnn': + if args.img_feat_format == 'pt': + if args.img_feature_dim == 2048: # object features + self.img_features = torch.load(os.path.join(args.data_dir, '{}_img_frcnn_obj_feats.pt'.format(name))) + else: # object + spatial features + if args.use_vg_dev: + self.img_features = torch.load(os.path.join(args.data_dir, 'train+val_img_frcnn_feats.pt')) + else: + self.img_features = torch.load(os.path.join(args.data_dir, '{}_img_frcnn_feats.pt'.format(name))) + elif args.img_feat_format == 'tsv': + self.load_img_tsv_features() + elif args.img_feature_type == 'mask_r-cnn': + self.img_features = torch.load(os.path.join(args.data_dir, '{}_img_mask_rcnn_feats.pt'.format(name))) + elif args.img_feature_type.startswith('dis_code'): #in ['dis_code', 'dis_code_t']: # discrete code + self.img_features = torch.load(os.path.join(args.data_dir, 'vqvae', '{}.pt'.format(name)))['feats_{}'.format(args.code_level)] + else: + self.img_features = torch.load(os.path.join(args.data_dir, '{}_img_feats.pt'.format(name))) + t_end = time.time() + logger.info('Info: loading {0} features using {1:.2f} secs'.format(name, (t_end-t_start))) + + self.output_mode = output_modes[args.task_name] + self.tokenizer = tokenizer + + self.examples, self.labels = _load_dataset(args, name) + self.label_map = {label: i for i, label in enumerate(self.labels)} + + if self.args.load_fast: + self.features = self.tensorize(args, cls_token_at_end=bool(self.args.model_type in ['xlnet']), # xlnet has a cls token at the end + cls_token=self.tokenizer.cls_token, + sep_token=self.tokenizer.sep_token, + cls_token_segment_id=2 if self.args.model_type in ['xlnet'] else 0, + pad_on_left=bool(self.args.model_type in ['xlnet']), # pad on the left for xlnet + pad_token_segment_id=4 if self.args.model_type in ['xlnet'] else 0) + else: + pass + + logger.info('%s Data Examples: %d' % (name, len(self.examples))) + + + def tensorize(self, cls_token_at_end=False, pad_on_left=False, + cls_token='[CLS]', sep_token='[SEP]', pad_token=0, + sequence_a_segment_id=0, sequence_b_segment_id=1, + cls_token_segment_id=1, pad_token_segment_id=0, + mask_padding_with_zero=True): + + # debug: + debug_size = 500 + features = [] + + for (ex_index, example) in enumerate(self.examples[0: ]): + if len(example.label) == 0: continue + if ex_index % 10000 == 0: logger.info("Tensorizing example %d of %d" % (ex_index, len(self.examples))) + + tokens_a = self.tokenizer.tokenize(example.text_a) + + tokens_b = None + if example.text_b: + tokens_b = self.tokenizer.tokenize(example.text_b) + # Modifies `tokens_a` and `tokens_b` in place so that the total length is less than the specified length. + # Account for [CLS], [SEP], [SEP] with "- 3" + _truncate_seq_pair(tokens_a, tokens_b, self.args.max_seq_length - 3) + else: + # Account for [CLS] and [SEP] with "- 2" + if len(tokens_a) > self.args.max_seq_length - 2: + tokens_a = tokens_a[:(self.args.max_seq_length - 2)] + + tokens = tokens_a + [sep_token] + segment_ids = [sequence_a_segment_id] * len(tokens) + + if tokens_b: + tokens += tokens_b + [sep_token] + segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1) + + if cls_token_at_end: + tokens = tokens + [cls_token] + segment_ids = segment_ids + [cls_token_segment_id] + else: + tokens = [cls_token] + tokens + segment_ids = [cls_token_segment_id] + segment_ids + + input_ids = self.tokenizer.convert_tokens_to_ids(tokens) + + # The mask has 1 for real tokens and 0 for padding tokens. Only real tokens are attended to. + input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) + + # Zero-pad up to the sequence length. + padding_length = self.args.max_seq_length - len(input_ids) + if pad_on_left: + input_ids = ([pad_token] * padding_length) + input_ids + input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask + segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids + else: + input_ids = input_ids + ([pad_token] * padding_length) + input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length) + segment_ids = segment_ids + ([pad_token_segment_id] * padding_length) + + assert len(input_ids) == self.args.max_seq_length + assert len(input_mask) == self.args.max_seq_length + assert len(segment_ids) == self.args.max_seq_length + + # image features + img_feat = self.img_features[example.img_key] # torch + #img_feat = self.img_features.item().get(example.img_key) # numpy + if img_feat.shape[0] > self.args.max_img_seq_length: + img_feat = img_feat[0:self.args.max_img_seq_length, ] + if self.args.max_img_seq_length > 0: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + # segment_ids += [sequence_b_segment_id] * img_feat.shape[0] + else: + if self.args.max_img_seq_length > 0: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + # segment_ids = segment_ids + [sequence_b_segment_id] * img_feat.shape[0] + padding_matrix = torch.zeros((self.args.max_img_seq_length - img_feat.shape[0], img_feat.shape[1])) + img_feat = torch.cat((img_feat, padding_matrix), 0) + if self.args.max_img_seq_length > 0: + input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_matrix.shape[0]) + # segment_ids = segment_ids + [pad_token_segment_id] * padding_matrix.shape[0] + + if self.args.output_mode == "classification": + label_id = [self.label_map[l] for l in example.label] + score = example.score + elif self.args.output_mode == "regression": + label_id = float(example.label) + else: + raise KeyError(self.args.output_mode) + + if ex_index < 5: + logger.info("*** Example ***") + logger.info("guid: %s" % (example.guid)) + logger.info("tokens: %s" % " ".join([str(x) for x in tokens])) + logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) + logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) + logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) + logger.info("label: %s (id = %s)" % (example.label, label_id)) + logger.info("score: %s (score = %s)" % (example.score, score)) + + new_scores = target_tensor(len(self.labels), label_id, score) + #features.append(InputFeat(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, score=score, img_feat=img_feat)) + features.append((torch.tensor(input_ids, dtype=torch.long), + torch.tensor(input_mask, dtype=torch.long), + torch.tensor(segment_ids, dtype=torch.long), + torch.tensor([label_id[0]], dtype=torch.long), + torch.tensor(new_scores, dtype=torch.float), img_feat)) + + return features + + def tensorize_example(self, example, cls_token_at_end=False, pad_on_left=False, + cls_token='[CLS]', sep_token='[SEP]', pad_token=0, + sequence_a_segment_id=0, sequence_b_segment_id=1, + cls_token_segment_id=1, pad_token_segment_id=0, + mask_padding_with_zero=True): + + tokens_a = self.tokenizer.tokenize(example.text_a) + + tokens_b = None + if example.text_b: + tokens_b = self.tokenizer.tokenize(example.text_b) + # Modifies `tokens_a` and `tokens_b` in place so that the total length is less than the specified length. + # Account for [CLS], [SEP], [SEP] with "- 3" + _truncate_seq_pair(tokens_a, tokens_b, self.args.max_seq_length - 3) + else: + # Account for [CLS] and [SEP] with "- 2" + if len(tokens_a) > self.args.max_seq_length - 2: + tokens_a = tokens_a[:(self.args.max_seq_length - 2)] + + tokens = tokens_a + [sep_token] + segment_ids = [sequence_a_segment_id] * len(tokens) + + if tokens_b: + tokens += tokens_b + [sep_token] + segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1) + + if cls_token_at_end: + tokens = tokens + [cls_token] + segment_ids = segment_ids + [cls_token_segment_id] + else: + tokens = [cls_token] + tokens + segment_ids = [cls_token_segment_id] + segment_ids + + input_ids = self.tokenizer.convert_tokens_to_ids(tokens) + + # The mask has 1 for real tokens and 0 for padding tokens. Only real tokens are attended to. + input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) + + # Zero-pad up to the sequence length. + padding_length = self.args.max_seq_length - len(input_ids) + if pad_on_left: + input_ids = ([pad_token] * padding_length) + input_ids + input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask + segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids + else: + input_ids = input_ids + ([pad_token] * padding_length) + input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length) + segment_ids = segment_ids + ([pad_token_segment_id] * padding_length) + + assert len(input_ids) == self.args.max_seq_length + assert len(input_mask) == self.args.max_seq_length + assert len(segment_ids) == self.args.max_seq_length + + # image features + if self.args.img_feature_type.startswith('dis_code'): + img_feat = self.img_features[example.img_key] + + if self.args.img_feature_type == 'dis_code_ln': # for discrete code image representation + img_feat = img_feat.reshape(-1, img_feat.shape[0]) + + if self.args.img_feature_type == 'dis_code_t': # transposed + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * 64 + else: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + else: + if self.args.img_feat_format == 'pt': + img_feat = self.img_features[example.img_key] #[:, 0:self.args.img_feature_dim] # torch + elif self.args.img_feat_format == 'tsv': + img_features = self.get_img_feature(str(example.img_key)) + img_feat = torch.from_numpy(img_features) + + if img_feat.shape[0] > self.args.max_img_seq_length: + img_feat = img_feat[0:self.args.max_img_seq_length, ] + if self.args.max_img_seq_length > 0: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + # segment_ids += [sequence_b_segment_id] * img_feat.shape[0] + else: + if self.args.max_img_seq_length > 0: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + # segment_ids = segment_ids + [sequence_b_segment_id] * img_feat.shape[0] + padding_matrix = torch.zeros((self.args.max_img_seq_length - img_feat.shape[0], img_feat.shape[1])) + img_feat = torch.cat((img_feat, padding_matrix), 0) + if self.args.max_img_seq_length > 0: + input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_matrix.shape[0]) + # segment_ids = segment_ids + [pad_token_segment_id] * padding_matrix.shape[0] + + if self.args.output_mode == "classification": + if (example.label is None): + label_id = [0] + score = [0] + elif len(example.label) == 0: + label_id = [0] + score = [0] + else: + label_id = [self.label_map[l] for l in example.label] + score = example.score + elif self.args.output_mode == "regression": + if len(example.label) == 0: + label_id = 0 + else: + label_id = float(example.label) + else: + raise KeyError(self.args.output_mode) + + new_scores = target_tensor(len(self.labels), label_id, score) + + # features.append(InputFeat(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, score=score, img_feat=img_feat)) + if self.args.img_feature_type in ['dis_code', 'dis_code_t']: + img_feat = img_feat.type(torch.long) + elif self.args.img_feature_type in ['dis_code_ln']: + #img_feat = img_feat.reshape(-1, img_feat.shape[0]) + img_feat = img_feat.type(torch.float) + + return (torch.tensor(input_ids, dtype=torch.long), + torch.tensor(input_mask, dtype=torch.long), + torch.tensor(segment_ids, dtype=torch.long), + torch.tensor([label_id[0]], dtype=torch.long), + torch.tensor(new_scores, dtype=torch.float), + img_feat, + torch.tensor([example.q_id], dtype=torch.long)) + + def __getitem__(self, index): + if self.args.load_fast: + example = self.features[index] + else: + entry = self.examples[index] + example = self.tensorize_example(entry, + cls_token_at_end=bool(self.args.model_type in ['xlnet']), # xlnet has a cls token at the end + cls_token=self.tokenizer.cls_token, + sep_token=self.tokenizer.sep_token, + cls_token_segment_id=2 if self.args.model_type in ['xlnet'] else 0, + pad_on_left=bool(self.args.model_type in ['xlnet']), # pad on the left for xlnet + pad_token_segment_id=4 if self.args.model_type in ['xlnet'] else 0) + return example + + def __len__(self): + return len(self.examples) + + # tsv feature loading + def load_img_tsv_features(self): + self.check_img_feature_file() + self.check_img_feature_offset_map() + + def check_img_feature_file(self): + if self.img_feature_file is None: + img_feature_path = os.path.join(self.args.img_feat_dir, '{}_img_frcnn_feats.tsv'.format(self.name)) + t_s = time.time() + self.img_feature_file = open(img_feature_path, 'r') + t_e = time.time() + logger.info("Open {} image time: {}".format(self.name, (t_e - t_s))) + + def check_img_feature_offset_map(self): + """ load the image feature offset map """ + if self.img_feat_offset_map is None: + img_feature_path = os.path.join(self.args.img_feat_dir, '{}_img_frcnn_feats_offset_map.json'.format(self.name)) + t_s = time.time() + self.img_feat_offset_map = json.load(open(img_feature_path)) + t_e = time.time() + logger.info("Load {} images: {}, time: {}".format(self.name, len(self.img_feat_offset_map), (t_e - t_s))) + + def get_img_feature(self, image_id): + """ decode the image feature """ + self.check_img_feature_file() + self.check_img_feature_offset_map() + + if image_id in self.img_feat_offset_map: + img_offset = self.img_feat_offset_map[image_id] + self.img_feature_file.seek(img_offset, 0) + arr = [s.strip() for s in self.img_feature_file.readline().split('\t')] + num_boxes = int(arr[1]) + feat = np.frombuffer(base64.b64decode(arr[2]), dtype=np.float32).reshape((-1, self.args.img_feature_dim)) + return feat + + return None + + +def instance_bce_with_logits(logits, labels, reduction='mean'): + assert logits.dim() == 2 + + loss = nn.functional.binary_cross_entropy_with_logits(logits, labels, reduction=reduction) + if reduction == 'mean': + loss *= labels.size(1) + return loss + + +def compute_score_with_logits(logits, labels): + logits = torch.max(logits, 1)[1].data # argmax + one_hots = torch.zeros(*labels.size()).cuda() + one_hots.scatter_(1, logits.view(-1, 1), 1) + scores = (one_hots * labels) + return scores + + +def trim_batch(batch): + """ new batch func + :param batch: + :return: + """ + print('batch size', len(batch)) + + batch_size = len(batch) + batch_tensors = [] + for ele in batch[0]: + print(ele.shape, ele.size()) + zero_tensor = torch.zeros(([batch_size] + list(ele.size()))) + batch_tensors.append(zero_tensor) + + for b_id, b in enumerate(batch): + print(b_id, len(b)) + for ele_id, ele in enumerate(b): + print(ele_id, ele.shape) + batch_tensors[ele_id][b_id] = ele + return batch_tensors + + +def train(args, train_dataset, eval_dataset, model, tokenizer): + """ Train the model """ + #if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() + + args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) + train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) + train_dataloader = DataLoader(train_dataset, num_workers=args.workers, sampler=train_sampler, batch_size=args.train_batch_size) #, collate_fn=trim_batch) + + if args.max_steps > 0: + t_total = args.max_steps + args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 + else: + t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs + + # Prepare optimizer and schedule (linear warmup and decay) + no_decay = ['bias', 'LayerNorm.weight'] + optimizer_grouped_parameters = [ + {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, + {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} + ] + optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) + #scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total) # original + + if args.scheduler == "constant": + scheduler = WarmupConstantSchedule(optimizer, warmup_steps=args.warmup_steps) + elif args.scheduler == "linear": + scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total) + + if args.fp16: + try: + from apex import amp + except ImportError: + raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") + model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) + + # multi-gpu training (should be after apex fp16 initialization) + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + # Distributed training (should be after apex fp16 initialization) + if args.local_rank != -1: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) + + # Train! + logger.info("***** Running training *****") + logger.info(" Num examples = %d", len(train_dataset)) + logger.info(" Num Epochs = %d", args.num_train_epochs) + logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) + logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", + args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) + logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) + logger.info(" Total optimization steps = %d", t_total) + + global_step = 0 + tr_loss, logging_loss = 0.0, 0.0 + model.zero_grad() + #train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) + set_seed(args.seed, args.n_gpu) # Added here for reproductibility (even between python 2 and 3) + + best_score = 0 + best_model = { + 'epoch': 0, + 'model': copy.deepcopy(model), #model.state_dict(), + 'optimizer_state': optimizer.state_dict() + } + + #eval_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=True) + + for epoch in range(int(args.num_train_epochs)): + #for epoch in train_iterator: + #epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) + total_loss = 0 + train_score = 0 + total_norm = 0 + count_norm = 0 + + if args.adjust_dp and epoch>=3: + logger.info("change droput ratio {} to 0.3".format(args.drop_out)) + if hasattr(model, 'module'): + model.module.dropout.p = 0.3 + model.module.bert.dropout.p = 0.3 + model.module.bert.embeddings.dropout.p = 0.3 + else: + model.dropout.p = 0.3 + model.bert.dropout.p = 0.3 + model.bert.embeddings.dropout.p = 0.3 + + if args.adjust_loss and epoch>=args.adjust_loss_epoch: + logger.info("\t change loss type from kl to bce") + model.loss_type = 'bce' + + # debug + #epoch = 20 + #global_step = epoch*math.ceil(len(train_dataset)/(args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))) + + t_start = time.time() + for step, batch in enumerate(train_dataloader): + model.train() + batch = tuple(t.to(args.device) for t in batch) + inputs = {'input_ids': batch[0], + 'attention_mask': batch[1], + 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids + 'labels': batch[4], + 'img_feats': None if args.img_feature_dim == -1 else batch[5]} + outputs = model(**inputs) + + #loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc) + loss, logits = outputs[:2] + + #loss = instance_bce_with_logits(logits, batch[4]) + + if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training + + if args.gradient_accumulation_steps > 1: + loss = loss / args.gradient_accumulation_steps + + if args.fp16: + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) + else: + loss.backward() + total_norm += torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) + count_norm += 1 + + batch_score = compute_score_with_logits(logits, batch[4]).sum() + train_score += batch_score.item() + + tr_loss += loss.item() + if (step + 1) % args.gradient_accumulation_steps == 0: + scheduler.step() # Update learning rate schedule + optimizer.step() + model.zero_grad() + global_step += 1 + + if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:# Log metrics + if args.local_rank not in [-1, 0]: + torch.distributed.barrier() + + if args.local_rank in [-1, 0] and args.evaluate_during_training: + #if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well + logger.info("Epoch: %d, global_step: %d" % (epoch, global_step)) + eval_result, eval_score, upper_bound = evaluate(args, model, eval_dataset, prefix=global_step) + if eval_score > best_score: + best_score = eval_score + best_model['epoch'] = epoch + best_model['model'] = copy.deepcopy(model) + + logger.info("EVALERR: {}%".format(100 * best_score)) + + if args.local_rank == 0: + torch.distributed.barrier() + + logging_loss = tr_loss + + #if args.max_steps > 0 and global_step > args.max_steps: + # epoch_iterator.close() + # break + + # evaluation + logger.info("Epoch: %d, global_step: %d" % (epoch, global_step)) + eval_result, eval_score, upper_bound = evaluate(args, model, eval_dataset, prefix=global_step) + if eval_score > best_score: + best_score = eval_score + best_model['epoch'] = epoch + best_model['model'] = copy.deepcopy(model) + #best_model['optimizer'] = copy.deepcopy(optimizer.state_dict()) + + # save checkpoints + if (args.local_rank in [-1, 0]) and (args.save_epoch>0 and epoch%args.save_epoch == 0) and (epoch>args.save_after_epoch): + output_dir = os.path.join(args.output_dir, 'checkpoint-{}-{}'.format(epoch, global_step)) + if not os.path.exists(output_dir): os.makedirs(output_dir) + model_to_save = best_model['model'].module if hasattr(model, 'module') else best_model['model'] # Take care of distributed/parallel training + + save_num = 0 + while (save_num < 10): + try: + logger.info("Saving model attempt: {}".format(save_num)) + model_to_save.save_pretrained(output_dir) + torch.save(args, os.path.join(output_dir, 'training_args.bin')) + tokenizer.save_pretrained(output_dir) + break + except: + save_num += 1 + logger.info("Saving model checkpoint {0} to {1}".format(epoch, output_dir)) + + epoch_log = {'epoch': epoch, 'eval_score': eval_score, 'best_score':best_score} + log_json.append(epoch_log) + if args.local_rank in [-1, 0]: + with open(args.output_dir + '/eval_logs.json', 'w') as fp: + json.dump(log_json, fp) + + logger.info("PROGRESS: {}%".format(round(100*(epoch + 1) / args.num_train_epochs, 4))) + logger.info("EVALERR: {}%".format(100*best_score)) + + t_end = time.time() + logger.info('Epoch: %d, Train Time: %.3f' % (epoch, t_end - t_start)) + + #if args.max_steps > 0 and global_step > args.max_steps: + # train_iterator.close() + # break + + if args.local_rank in [-1, 0]: # Save the final model checkpoint + with open(args.output_dir + '/eval_logs.json', 'w') as fp: + json.dump(log_json, fp) + + output_dir = os.path.join(args.output_dir, 'best-{}'.format(best_model['epoch'])) + if not os.path.exists(output_dir): os.makedirs(output_dir) + model_to_save = best_model['model'].module if hasattr(model, 'module') else best_model['model'] # Take care of distributed/parallel training + + save_num = 0 + while (save_num < 10): + try: + model_to_save.save_pretrained(output_dir) + torch.save(args, os.path.join(output_dir, 'training_args.bin')) + tokenizer.save_pretrained(output_dir) + break + except: + save_num += 1 + logger.info("Saving the best model checkpoint epoch {} to {}".format(best_model['epoch'], output_dir)) + + return global_step, tr_loss / global_step + + +def evaluate(args, model, eval_dataset=None, prefix=""): + # Loop to handle MNLI double evaluation (matched, mis-matched) + eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,) + eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == "mnli" else (args.output_dir,) + + #if args.n_gpu > 1: model = torch.nn.DataParallel(model) # debug: single-gpu or multi-gpus + + results = [] + t_start = time.time() + for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): + if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) + + args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + # Note that DistributedSampler samples randomly + eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) + eval_dataloader = DataLoader(eval_dataset, num_workers=args.workers, sampler=eval_sampler, batch_size=args.eval_batch_size) + + # Eval! + logger.info("***** Running evaluation {} *****".format(prefix)) + logger.info(" Num examples = %d", len(eval_dataset)) + logger.info(" Batch size = %d", args.eval_batch_size) + eval_loss = 0.0 + nb_eval_steps = 0 + preds = None + out_label_ids = None + num_data = 0 + score = 0 + upper_bound = 0 + results_dict = {} + + for batch in eval_dataloader: + #for batch in tqdm(eval_dataloader, desc="Evaluating"): + model.eval() + batch = tuple(t.to(args.device) for t in batch) + + with torch.no_grad(): + inputs = {'input_ids': batch[0], + 'attention_mask': batch[1], + 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids + 'labels': batch[4], + 'img_feats': None if args.img_feature_dim == -1 else batch[5]} + outputs = model(**inputs) + tmp_eval_loss, logits = outputs[:2] + + eval_loss += tmp_eval_loss.mean().item() + + # batch_score = compute_score_with_logits(logits, batch[4]).sum() + batch_score = torch.sum( + compute_score_with_logits(logits, batch[4]), 1) + # update results_dict + results_dict.update( + {qa_ind: score for qa_ind, score in + zip(batch[6].view(-1).tolist(), batch_score.tolist())} + ) + score += batch_score.sum().item() + #upper_bound += (batch[4].max(1)[0]).sum().item() + num_data += logits.size(0) + + # debug + #val, idx = logits.max(1) + #logger.info('idx: %s, batch[4]: %s' % (str(idx.shape), str(batch[3].shape))) + #for i in range(idx.size(0)): + # logger.info('idx: %d, pred: %d, real: %d' % (idx[i].item(), eval_dataset.labels[idx[i].item()], batch[3][i].item())) + + nb_eval_steps += 1 + + #if preds is None: + # preds = logits.detach().cpu().numpy() + # out_label_ids = inputs['labels'].detach().cpu().numpy() + #else: + # preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) + # out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0) + + score = score / len(eval_dataloader.dataset) + upper_bound = upper_bound / len(eval_dataloader.dataset) + + logger.info("Eval Results:") + logger.info("Eval Score: %.3f" % (100*score)) + logger.info("EVALERR: {}%".format(100*score)) + logger.info("Eval Upper Bound: %.3f" % (100*upper_bound)) + # with open(os.path.join(args.data_dir, 'val_results.json'), + # 'w') as f: + # json.dump(results_dict, f) + + t_end = time.time() + logger.info('Eva Time Cost: %.3f' % (t_end - t_start)) + + #eval_loss = eval_loss / nb_eval_steps + #if args.output_mode == "classification": + # preds = np.argmax(preds, axis=1) + #elif args.output_mode == "regression": + # preds = np.squeeze(preds) + #result = compute_metrics(eval_task, preds, out_label_ids) + #results.update(result) + + #output_eval_file = os.path.join(eval_output_dir, "eval_results.txt") + #with open(output_eval_file, "w") as writer: + # logger.info("***** Eval results {} *****".format(prefix)) + # for key in sorted(result.keys()): + # logger.info(" %s = %s", key, str(result[key])) + # writer.write("%s = %s\n" % (key, str(result[key]))) + + return results, score, upper_bound + + +def test(args, model, eval_dataset=None, prefix=""): + # Loop to handle MNLI double evaluation (matched, mis-matched) + eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,) + eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == "mnli" else (args.output_dir,) + + label2ans = cPickle.load(open(args.label2ans_file, 'rb')) + logger.info('label2ans: %d' % (len(label2ans))) + + results = [] + t_start = time.time() + for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): + if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) + + args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + # Note that DistributedSampler samples randomly + eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) + eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) + + # Eval + logger.info("***** Running Test {} *****".format(prefix)) + logger.info(" Num examples = %d", len(eval_dataset)) + logger.info(" Batch size = %d", args.eval_batch_size) + + for batch in eval_dataloader: + #for batch in tqdm(eval_dataloader, desc="Evaluating"): + model.eval() + batch = tuple(t.to(args.device) for t in batch) + + with torch.no_grad(): + inputs = {'input_ids': batch[0], + 'attention_mask': batch[1], + 'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids + 'labels': None, + 'img_feats': None if args.img_feature_dim == -1 else batch[5]} + outputs = model(**inputs) + logits = outputs[0] + + val, idx = logits.max(1) + #logger.info('idx: %s, batch[6]: %s' % (str(idx.shape), str(batch[6].shape))) + + for i in range(idx.size(0)): + #logger.info('idx: %d, batch: %d' % (idx[i].item(), batch[6][i].item())) + result = {} + result['question_id'] = batch[6][i].item() + result['answer'] = label2ans[eval_dataset.labels[idx[i].item()]] #label2ans[idx[i].item()] + results.append(result) + + if len(results) % 2000 == 0: + logger.info("PROGRESS: {}%".format(round(100*len(results)/len(eval_dataset), 4))) + #logger.info('q_id: {0}, answer: {1}'.format(result['question_id'], result['answer'])) + + with open(args.output_dir + ('/{}_results.json'.format(eval_dataset.name)), 'w') as fp: + json.dump(results, fp) + + t_end = time.time() + logger.info('# questions: %d' % (len(results))) + logger.info('Test Time Cost: %.3f' % (t_end - t_start)) + + +def load_and_cache_examples(args, task, tokenizer, evaluate=False): + processor = processors[task]() + output_mode = output_modes[task] + + label_list = processor.get_labels(args.label_file) + + t_start = time.time() + examples = processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir) + + #img_features = torch.load(os.path.join(args.data_dir, 'val_img_feats.pt' if evaluate else 'train_img_feats.pt')) + #img_features = torch.load(os.path.join(args.data_dir, 'val_img_frcnn_feats.pt' if evaluate else 'train_img_frcnn_feats.pt')) + img_features = np.load(os.path.join(args.data_dir, 'val_img_frcnn_feats.npy' if evaluate else 'train_img_frcnn_feats.npy')) + + features = convert_examples_to_features_vqa(examples, img_features, label_list, args.max_img_seq_length, args.max_seq_length, + tokenizer, output_mode, + cls_token_at_end=bool(args.model_type in ['xlnet']), # xlnet has a cls token at the end + cls_token=tokenizer.cls_token, + sep_token=tokenizer.sep_token, + cls_token_segment_id=2 if args.model_type in ['xlnet'] else 0, + pad_on_left=bool(args.model_type in ['xlnet']), # pad on the left for xlnet + pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0) + + #if args.local_rank in [-1, 0]: + # logger.info("Saving features into cached file %s", cached_features_file) + # torch.save(features, cached_features_file) + t_end = time.time() + logger.info('Info: loading features using %.5f secs' % (t_end-t_start)) + + + # Convert to Tensors and build dataset + all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) # batch*max_seq_len + all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) + all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) + if output_mode == "classification": + labels = torch.tensor([f.label_id[0] for f in features], dtype=torch.long) + targets = torch.tensor([target_tensor(len(label_list), f.label_id, f.score) for f in features], dtype=torch.float) + + if args.img_feature_dim > 0: # change here + t_start = time.time() + img_feat_np = np.zeros((labels.shape[0], args.max_img_seq_length, args.img_feature_dim)) + for f_id, f in enumerate(features): + img_feat_np[f_id] = f.img_feat + + img_feats = torch.from_numpy(img_feat_np) + + #img_feats = torch.empty((labels.shape[0], args.max_img_seq_length, args.img_feature_dim)) + #for f_id, f in enumerate(features): + # img_feats[f_id] = f.img_feat + + t_end = time.time() + logger.info('Info: convert image tensor features using %.5f secs' % (t_end - t_start)) + + #img_feats = torch.stack([f.img_feat[:,-args.img_feature_dim:] for f in features]) + #img_feats = torch.stack([f.img_feat for f in features]) + #img_feats = img_feats.type(torch.long) + + #print('targets:', targets.shape) + print('img_feats:', img_feats.shape) + elif output_mode == "regression": + all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float) + + if args.img_feature_dim == -1: + dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, labels, targets) + else: + dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, labels, targets, img_feats) + return dataset + +def target_tensor(len, labels, scores): + """ create the target by labels and scores """ + target = [0]*len + for id, l in enumerate(labels): + target[l] = scores[id] + + return target + + +def main(): + parser = argparse.ArgumentParser() + + ## Required parameters + parser.add_argument("--data_dir", default=None, type=str, required=True, + help="The input data dir. Should contain the .tsv files (or other data files) for the task.") + parser.add_argument("--txt_data_dir", default=None, type=str, required=True, + help="The input text data dir. Should contain the .json files (or other data files) for the task.") + + parser.add_argument("--model_type", default=None, type=str, required=True, + help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) + parser.add_argument("--model_name_or_path", default=None, type=str, required=True, + help="Path to pre-trained model or shortcut name") + parser.add_argument("--task_name", default=None, type=str, required=True, + help="The name of the task to train selected in the list: " + ", ".join(processors.keys())) + parser.add_argument("--output_dir", default=None, type=str, required=True, + help="The output directory where the model predictions and checkpoints will be written.") + parser.add_argument("--label_file", type=str, default=None, help="Label Dictionary") + parser.add_argument("--label2ans_file", type=str, default=None, help="Label to Answer Dictionary") + + parser.add_argument("--img_feat_dir", default=None, type=str, help="The input img_feat_dir.") + parser.add_argument("--img_feat_format", default='pt', type=str, help="img_feat_format: pt or tsv.") + + parser.add_argument("--data_label_type", default='faster', type=str, help="faster or mask") + parser.add_argument("--loss_type", default='kl', type=str, help="kl or xe") + parser.add_argument("--use_vg", action='store_true', help="Use VG-QA or not.") + parser.add_argument("--use_vg_dev", action='store_true', help="Use VG-QA as validation.") + + ## Other parameters + parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") + parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") + parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") + parser.add_argument("--max_seq_length", default=128, type=int, + help="The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.") + parser.add_argument("--do_train", action='store_true', help="Whether to run training.") + parser.add_argument("--do_train_val", action='store_true', help="Whether to run training.") + parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") + parser.add_argument("--do_test", action='store_true', help="Whether to run test on the test set.") + parser.add_argument("--do_test_dev", action='store_true', help="Whether to run test on the test-dev set.") + parser.add_argument("--evaluate_during_training", action='store_true', help="Rul evaluation during training at each logging step.") + parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") + + parser.add_argument("--drop_out", default=0.1, type=float, help="Drop out for BERT.") + parser.add_argument("--adjust_dp",action='store_true', help="Adjust Drop out for BERT.") + + parser.add_argument("--adjust_loss", action='store_true', help="Adjust Loss Type for BERT.") + parser.add_argument("--adjust_loss_epoch", default=-1, type=int, help="Adjust Loss Type for BERT.") + parser.add_argument("--classifier", default='linear', type=str, help="linear or mlp") + parser.add_argument("--cls_hidden_scale", default=2, type=int, help="cls_hidden_scale: for classifier") + + parser.add_argument("--hard_label", action='store_true', help="Soft Label or Hard Label.") + + parser.add_argument("--max_img_seq_length", default=30, type=int, help="The maximum total input image sequence length.") + parser.add_argument("--img_feature_dim", default=2054, type=int, help="The Image Feature Dimension.") + parser.add_argument("--img_feature_type", default='faster_r-cnn', type=str, help="faster_r-cnn or mask_r-cnn") + parser.add_argument("--code_voc", default=512, type=int, help="dis_code_voc: 256, 512") + parser.add_argument("--code_level", default='top', type=str, help="code level: top, botttom, both") + + parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") + parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") + parser.add_argument('--gradient_accumulation_steps', type=int, default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.") + parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") + parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") + parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") + parser.add_argument("--scheduler", default='linear', type=str, help="constant or linear.") + parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") + parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") + parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.") + parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") + + parser.add_argument('--logging_steps', type=int, default=50, help="Log every X updates steps.") + parser.add_argument('--save_steps', type=int, default=-1, help="Save checkpoint every X updates steps.") + parser.add_argument('--save_epoch', type=int, default=5, help="Save checkpoint every X epochs.") + parser.add_argument('--save_after_epoch', type=int, default=-1, help="Save checkpoint after epoch.") + parser.add_argument("--eval_all_checkpoints", action='store_true', + help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") + parser.add_argument("--no_cuda", action='store_true', help="Avoid using CUDA when available") + parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") + parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") + parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") + + parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") + parser.add_argument('--fp16_opt_level', type=str, default='O1', + help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." + "See details at https://nvidia.github.io/apex/amp.html") + parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") + parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.") + parser.add_argument('--server_port', type=str, default='', help="For distant debugging.") + + parser.add_argument("--philly", action='store_true', help="Use Philly: reset the output dir") + parser.add_argument("--load_fast", action='store_true', help="Load Tensor Fast") + parser.add_argument('-j', '--workers', default=0, type=int, metavar='N', help='number of data loading workers (default: 4)') + + #args = '--data_dir ../vqa/ban-vqa/data/qal_pairs --model_type bert --model_name_or_path bert-base-uncased --task_name vqa_text ' \ + # '--do_train --do_eval --do_lower_case --max_seq_length 40 --per_gpu_eval_batch_size 16 --per_gpu_train_batch_size 16 --learning_rate 2e-5 ' \ + # '--num_train_epochs 20.0 --output_dir ./results/vqa_text --label_file ../vqa/ban-vqa/data/cache/trainval_ans2label.pkl ' \ + # '--save_steps 5000 --overwrite_output_dir --max_img_seq_length 1 --img_feature_dim 565 --img_feature_type dis_code ' + + #args = '--data_dir ../vqa/ban-vqa/data/qal_pairs --model_type bert --model_name_or_path bert-base-uncased --task_name vqa_text ' \ + # '--do_train --do_eval --do_lower_case --max_seq_length 40 --per_gpu_eval_batch_size 16 --per_gpu_train_batch_size 16 --learning_rate 2e-5 ' \ + # '--num_train_epochs 20.0 --output_dir ./results/vqa_text --label_file ../vqa/ban-vqa/data/cache/trainval_ans2label.pkl ' \ + # '--save_steps 5000 --overwrite_output_dir --max_img_seq_length 10 --img_feature_dim 565 --img_feature_type other ' + + #args = parser.parse_args(args.split()) + + args = parser.parse_args() + + if args.philly: # use philly + logger.info('Info: Use Philly, all the output folders are reset.') + args.output_dir = os.path.join(os.getenv('PT_OUTPUT_DIR'), args.output_dir) + logger.info('OUTPUT_DIR:', args.output_dir) + + #if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir: + # raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir)) + + if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train: logger.info("Output Directory Exists.") + + # Setup distant debugging if needed + if args.server_ip and args.server_port: + # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script + import ptvsd + logger.info("Waiting for debugger attach") + ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) + ptvsd.wait_for_attach() + + # Setup CUDA, GPU & distributed training + if args.local_rank == -1 or args.no_cuda: + device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") + args.n_gpu = torch.cuda.device_count() + else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs + torch.cuda.set_device(args.local_rank) + device = torch.device("cuda", args.local_rank) + torch.distributed.init_process_group(backend='nccl') + args.n_gpu = 1 + args.device = device + + # Setup logging + logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', + datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) + logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", + args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) + + # Set seed + set_seed(args.seed, args.n_gpu) + + # Prepare GLUE task + args.task_name = args.task_name.lower() + if args.task_name not in processors: + raise ValueError("Task not found: %s" % (args.task_name)) + + processor = processors[args.task_name]() + args.output_mode = output_modes[args.task_name] + label_list = processor.get_labels(args.label_file) + num_labels = len(label_list) + logger.info('Task Name: {}, #Labels: {}'.format(args.task_name, num_labels)) + + # Load pretrained model and tokenizer + if args.local_rank not in [-1, 0]: + torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab + + args.model_type = args.model_type.lower() + config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] + config = config_class.from_pretrained( + args.config_name if args.config_name else args.model_name_or_path, + num_labels=num_labels, finetuning_task=args.task_name, + ) + tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case) + + # discrete code + config.img_feature_dim = args.img_feature_dim + config.img_feature_type = args.img_feature_type + config.code_voc = args.code_voc + config.hidden_dropout_prob = args.drop_out + config.loss_type = args.loss_type + config.classifier = args.classifier + config.cls_hidden_scale = args.cls_hidden_scale + + # load discrete code + if args.img_feature_type in ['dis_code', 'dis_code_t']: + logger.info('Load discrete code from: {}'.format(args.data_dir)) + t_start = time.time() + train_code = torch.load(os.path.join(args.data_dir, 'vqvae', 'train.pt')) + t_end = time.time() + logger.info('Load time: %.3f' % (t_end - t_start)) + + if args.code_level == 'top': + config.code_dim = train_code['embeddings_t'].shape[0] + config.code_size = train_code['feats_top'][list(train_code['feats_top'].keys())[0]].shape[0] + elif args.code_level == 'bottom': + config.code_dim = train_code['embeddings_b'].shape[0] + config.code_size = train_code['feats_bottom'][list(train_code['feats_bottom'].keys())[0]].shape[0] + elif args.code_level == 'both': + config.code_dim = train_code['embeddings_t'].shape[0] + train_code['embeddings_b'].shape[0] + + model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config) + if args.img_feature_type in ['dis_code', 'dis_code_t']: + logger.info('Initializing the code embedding with {}'.format(args.code_level)) + if args.code_level == 'top': + model.init_code_embedding(train_code['embeddings_t'].t()) + elif args.code_level == 'bottom': + model.init_code_embedding(train_code['embeddings_b'].t()) + + if args.local_rank == 0: + torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab + + model.to(args.device) + + logger.info("Training/evaluation parameters %s", args) + + #if args.do_eval: + eval_dataset = VQADataset(args, 'val', tokenizer) + + if args.do_test: + test_dataset = VQADataset(args, 'test2015', tokenizer) + + if args.do_test_dev: + test_dev_dataset = VQADataset(args, 'test-dev2015', tokenizer) + + # Training + if args.do_train: + #train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False) + train_dataset = VQADataset(args, 'train', tokenizer) + global_step, tr_loss = train(args, train_dataset, eval_dataset, model, tokenizer) + logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) + + # Training on train+val + if args.do_train_val: + train_dataset = VQADataset(args, 'train+val', tokenizer) + global_step, tr_loss = train(args, train_dataset, eval_dataset, model, tokenizer) + logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) + + # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() + if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): + # Create output directory if needed + if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) + + logger.info("Saving model checkpoint to %s", args.output_dir) + # Save a trained model, configuration and tokenizer using `save_pretrained()`. They can then be reloaded using `from_pretrained()` + #model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training + #model_to_save.save_pretrained(args.output_dir) + + tokenizer.save_pretrained(args.output_dir) + + # Good practice: save your training arguments together with the trained model + torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) + + # Load a trained model and vocabulary that you have fine-tuned + #model = model_class.from_pretrained(args.output_dir) + #tokenizer = tokenizer_class.from_pretrained(args.output_dir) + #model.to(args.device) + + + # Evaluation + #results = {} + if args.do_eval and args.local_rank in [-1, 0]: + checkpoints = [args.output_dir] + if args.eval_all_checkpoints: + checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) + logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging + logger.info("Evaluate the following checkpoints: %s", checkpoints) + + for checkpoint in checkpoints: + global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" + model = model_class.from_pretrained(checkpoint, config=config) + model.to(args.device) + result, score, upper_bound = evaluate(args, model, eval_dataset, prefix=global_step) + #result = dict((k + '_{}'.format(global_step), v) for k, v in result.items()) + #results.update(result) + + # Test-Dev + if args.do_test_dev and args.local_rank in [-1, 0]: + checkpoints = [args.output_dir] + if args.eval_all_checkpoints: + checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) + logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging + logger.info("Evaluate the following checkpoints: %s", checkpoints) + + for checkpoint in checkpoints: + global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" + model = model_class.from_pretrained(checkpoint) + model.to(args.device) + test(args, model, test_dev_dataset, prefix=global_step) + + # Test + if args.do_test and args.local_rank in [-1, 0]: + checkpoints = [args.output_dir] + if args.eval_all_checkpoints: + checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) + logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging + logger.info("Evaluate the following checkpoints: %s", checkpoints) + + for checkpoint in checkpoints: + global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" + model = model_class.from_pretrained(checkpoint) + model.to(args.device) + test(args, model, test_dataset, prefix=global_step) + + +if __name__ == "__main__": + main() diff --git a/oscar/utils/__init__.py b/oscar/utils/__init__.py new file mode 100644 index 0000000..3dc1f76 --- /dev/null +++ b/oscar/utils/__init__.py @@ -0,0 +1 @@ +__version__ = "0.1.0" diff --git a/oscar/utils/caption_evaluate.py b/oscar/utils/caption_evaluate.py new file mode 100644 index 0000000..5699253 --- /dev/null +++ b/oscar/utils/caption_evaluate.py @@ -0,0 +1,293 @@ +# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license. + +from collections import OrderedDict, defaultdict +import json +import os.path as op +from pprint import pprint +import torch +import re +import subprocess +import tempfile +import time +from typing import Dict, Optional + +from coco_caption.pycocotools.coco import COCO +from coco_caption.pycocoevalcap.eval import COCOEvalCap +from coco_caption.pycocoevalcap.cider.cider import Cider + +CiderD_scorer = Cider(df='corpus') + + +def evaluate_on_nocaps(split, predict_file, data_dir='data/nocaps/', evaluate_file=None): + ''' + NOTE: Put the auth file in folder ~/.evalai/ + ''' + if not evaluate_file: + evaluate_file = op.splitext(predict_file)[0] + '.eval.json' + if op.isfile(evaluate_file): + print('{} already exists'.format(evaluate_file)) + with open(evaluate_file, 'r') as fp: + metrics = json.load(fp) + return metrics + + image_info_file = op.join(data_dir, + 'nocaps_{}_image_info.json'.format(split)) + image_info = json.load(open(image_info_file)) + open_image_id2id = {} + for it in image_info['images']: + open_image_id2id[it['open_images_id']] = it['id'] + predictions = [] + cap_id = 0 + with open(predict_file, 'r') as fp: + for line in fp: + p = line.strip().split('\t') + predictions.append( + {'image_id': open_image_id2id[p[0]], + 'caption': json.loads(p[1])[0]['caption'], + 'id': cap_id}) + cap_id += 1 + if split == 'test': + print('Are you sure to submit test split result at: {}'.format(predict_file)) + import ipdb;ipdb.set_trace() + nocapseval = NocapsEvaluator(phase=split) + metrics = nocapseval.evaluate(predictions) + pprint(metrics) + with open(evaluate_file, 'w') as fp: + json.dump(metrics, fp) + return metrics + + +def evaluate_on_coco_caption(res_file, label_file, outfile=None): + """ + res_tsv: TSV file, each row is [image_key, json format list of captions]. + Each caption is a dict, with fields "caption", "conf". + label_file: JSON file of ground truth captions in COCO format. + """ + assert label_file.endswith('.json') + if res_file.endswith('.tsv'): + res_file_coco = op.splitext(res_file)[0] + '_coco_format.json' + convert_tsv_to_coco_format(res_file, res_file_coco) + else: + raise ValueError('unknown prediction result file format: {}'.format(res_file)) + + coco = COCO(label_file) + cocoRes = coco.loadRes(res_file_coco) + cocoEval = COCOEvalCap(coco, cocoRes, 'corpus') + + # evaluate on a subset of images by setting + # cocoEval.params['image_id'] = cocoRes.getImgIds() + # please remove this line when evaluating the full validation set + cocoEval.params['image_id'] = cocoRes.getImgIds() + + # evaluate results + # SPICE will take a few minutes the first time, but speeds up due to caching + cocoEval.evaluate() + result = cocoEval.eval + if not outfile: + print(result) + else: + with open(outfile, 'w') as fp: + json.dump(result, fp, indent=4) + return result + + +def convert_tsv_to_coco_format(res_tsv, outfile, + sep='\t', key_col=0, cap_col=1): + results = [] + with open(res_tsv) as fp: + for line in fp: + parts = line.strip().split(sep) + key = parts[key_col] + if cap_col < len(parts): + caps = json.loads(parts[cap_col]) + assert len(caps) == 1, 'cannot evaluate multiple captions per image' + cap = caps[0].get('caption', '') + else: + # empty caption generated + cap = "" + results.append( + {'image_id': key, + 'caption': cap} + ) + with open(outfile, 'w') as fp: + json.dump(results, fp) + + +class ScstRewardCriterion(torch.nn.Module): + CIDER_REWARD_WEIGHT = 1 + + def __init__(self): + self.greedy_score = None + super().__init__() + + def forward(self, gt_res, greedy_res, sample_res, sample_logprobs): + batch_size = len(gt_res) + + # must keep order to get evaluation for each item in batch + res = OrderedDict() + for i in range(batch_size): + res[i] = [sample_res[i]] + for i in range(batch_size): + res[batch_size + i] = [greedy_res[i]] + + gts = OrderedDict() + for i in range(batch_size): + gts[i] = gt_res[i] + for i in range(batch_size): + gts[batch_size + i] = gt_res[i] + + _, batch_cider_scores = CiderD_scorer.compute_score(gts, res) + scores = self.CIDER_REWARD_WEIGHT * batch_cider_scores + # sample - greedy + reward = scores[:batch_size] - scores[batch_size:] + self.greedy_score = scores[batch_size:].mean() + + reward = torch.as_tensor(reward, device=sample_logprobs.device, dtype=torch.float) + loss = - sample_logprobs * reward + loss = loss.mean() + return loss + + def get_score(self): + return self.greedy_score + + +class NocapsEvaluator(object): + r""" + Code from https://github.com/nocaps-org/updown-baseline/blob/master/updown/utils/evalai.py + + A utility class to submit model predictions on nocaps splits to EvalAI, and retrieve model + performance based on captioning metrics (such as CIDEr, SPICE). + + Extended Summary + ---------------- + This class and the training script together serve as a working example for "EvalAI in the + loop", showing how evaluation can be done remotely on privately held splits. Annotations + (captions) and evaluation-specific tools (e.g. `coco-caption `_) + are not required locally. This enables users to select best checkpoint, perform early + stopping, learning rate scheduling based on a metric, etc. without actually doing evaluation. + + Parameters + ---------- + phase: str, optional (default = "val") + Which phase to evaluate on. One of "val" or "test". + + Notes + ----- + This class can be used for retrieving metrics on both, val and test splits. However, we + recommend to avoid using it for test split (at least during training). Number of allowed + submissions to test split on EvalAI are very less, and can exhaust in a few iterations! However, + the number of submissions to val split are practically infinite. + """ + + def __init__(self, phase: str = "val"): + + # Constants specific to EvalAI. + self._challenge_id = 355 + self._phase_id = 742 if phase == "val" else 743 + + def evaluate( + self, predictions, iteration: Optional[int] = None + ) -> Dict[str, Dict[str, float]]: + r""" + Take the model predictions (in COCO format), submit them to EvalAI, and retrieve model + performance based on captioning metrics. + + Parameters + ---------- + predictions: List[Prediction] + Model predictions in COCO format. They are a list of dicts with keys + ``{"image_id": int, "caption": str}``. + iteration: int, optional (default = None) + Training iteration where the checkpoint was evaluated. + + Returns + ------- + Dict[str, Dict[str, float]] + Model performance based on all captioning metrics. Nested dict structure:: + + { + "B1": {"in-domain", "near-domain", "out-domain", "entire"}, # BLEU-1 + "B2": {"in-domain", "near-domain", "out-domain", "entire"}, # BLEU-2 + "B3": {"in-domain", "near-domain", "out-domain", "entire"}, # BLEU-3 + "B4": {"in-domain", "near-domain", "out-domain", "entire"}, # BLEU-4 + "METEOR": {"in-domain", "near-domain", "out-domain", "entire"}, + "ROUGE-L": {"in-domain", "near-domain", "out-domain", "entire"}, + "CIDEr": {"in-domain", "near-domain", "out-domain", "entire"}, + "SPICE": {"in-domain", "near-domain", "out-domain", "entire"}, + } + + """ + # Save predictions as a json file first. + _, predictions_filename = tempfile.mkstemp(suffix=".json", text=True) + with open(predictions_filename, "w") as f: + json.dump(predictions, f) + + submission_command = ( + f"evalai challenge {self._challenge_id} phase {self._phase_id} " + f"submit --file {predictions_filename}" + ) + + submission_command_subprocess = subprocess.Popen( + submission_command.split(), + stdout=subprocess.PIPE, + stdin=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) + + # This terminal output will have submission ID we need to check. + submission_command_stdout = submission_command_subprocess.communicate(input=b"N\n")[ + 0 + ].decode("utf-8") + + submission_id_regex = re.search("evalai submission ([0-9]+)", submission_command_stdout) + try: + # Get an integer submission ID (as a string). + submission_id = submission_id_regex.group(0).split()[-1] # type: ignore + except: + # Very unlikely, but submission may fail because of some glitch. Retry for that. + return self.evaluate(predictions) + + if iteration is not None: + print(f"Submitted predictions for iteration {iteration}, submission id: {submission_id}.") + else: + print(f"Submitted predictions, submission_id: {submission_id}") + + # Placeholder stdout for a pending submission. + result_stdout: str = "The Submission is yet to be evaluated." + num_tries: int = 0 + + # Query every 10 seconds for result until it appears. + while "CIDEr" not in result_stdout: + + time.sleep(10) + result_stdout = subprocess.check_output( + ["evalai", "submission", submission_id, "result"] + ).decode("utf-8") + num_tries += 1 + + # Raise error if it takes more than 5 minutes. + if num_tries == 30: + raise ConnectionError("Unable to get results from EvalAI within 5 minutes!") + + # Convert result to json. + metrics = json.loads(result_stdout, encoding="utf-8") + + # keys: {"in-domain", "near-domain", "out-domain", "entire"} + # In each of these, keys: {"B1", "B2", "B3", "B4", "METEOR", "ROUGE-L", "CIDEr", "SPICE"} + metrics = { + "in-domain": metrics[0]["in-domain"], + "near-domain": metrics[1]["near-domain"], + "out-domain": metrics[2]["out-domain"], + "entire": metrics[3]["entire"], + } + + # Restructure the metrics dict for better tensorboard logging. + # keys: {"B1", "B2", "B3", "B4", "METEOR", "ROUGE-L", "CIDEr", "SPICE"} + # In each of these, keys: keys: {"in-domain", "near-domain", "out-domain", "entire"} + flipped_metrics: Dict[str, Dict[str, float]] = defaultdict(dict) + for key, val in metrics.items(): + for subkey, subval in val.items(): + flipped_metrics[subkey][key] = subval + + return flipped_metrics + diff --git a/oscar/utils/cbs.py b/oscar/utils/cbs.py new file mode 100644 index 0000000..b5a792e --- /dev/null +++ b/oscar/utils/cbs.py @@ -0,0 +1,852 @@ +# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license. +# Copyright (c) 2019, Yufei Wang, Karan Desai. Licensed under the MIT license. +# Code is modified from https://github.com/nocaps-org/updown-baseline + +import anytree +import base64 +import json +import numpy as np +import os.path as op +import torch +from typing import Callable, Dict, List, Optional, Tuple + +from oscar.modeling.modeling_utils import BeamHypotheses + +StepFunctionType = Callable[ + [torch.Tensor, List[torch.Tensor]], Tuple[torch.Tensor, List[torch.Tensor]] +] + + +def _enlarge_single_tensor(t, batch_size, num_fsm_states, beam_size): + # shape: (batch_size * beam_size, *) + _, *last_dims = t.size() + return ( + t.view(batch_size, 1, 1, *last_dims) + .expand(batch_size, num_fsm_states, beam_size, *last_dims) + .reshape(-1, *last_dims) + ) + + +class ConstrainedBeamSearch(object): + r""" + Implements Constrained Beam Search for decoding the most likely sequences conditioned on a + Finite State Machine with specified state transitions. + """ + + def __init__( + self, + eos_token_ids: List[int], + max_steps: int = 20, + beam_size: int = 5, + per_node_beam_size: Optional[int] = None, + use_hypo: bool = False, + tokenizer=None, + ): + self._eos_token_ids = eos_token_ids + self.max_steps = max_steps + self.beam_size = beam_size + self.per_node_beam_size = per_node_beam_size or self.beam_size + self.num_keep_best = 1 + self.length_penalty = 1 + self.use_hypo = use_hypo + self.tokenizer = tokenizer + + def search( + self, + start_predictions: torch.Tensor, + start_state: List[torch.Tensor], + step: StepFunctionType, + fsm: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor]: + r""" + Given a starting state, a step function, and an FSM adjacency matrix, apply Constrained + Beam Search to find most likely target sequences satisfying specified constraints in FSM. + + .. note:: + + If your step function returns ``-inf`` for some log probabilities + (like if you're using a masked log-softmax) then some of the "best" + sequences returned may also have ``-inf`` log probability. Specifically + this happens when the beam size is smaller than the number of actions + with finite log probability (non-zero probability) returned by the step function. + Therefore if you're using a mask you may want to check the results from ``search`` + and potentially discard sequences with non-finite log probability. + + Parameters + ---------- + start_predictions : torch.Tensor + A tensor containing the initial predictions with shape ``(batch_size, )``. These are + usually just ``@@BOUNDARY@@`` token indices. + start_state : ``Dict[str, torch.Tensor]`` + The initial state passed to the ``step`` function. Each value of the state dict + should be a tensor of shape ``(batch_size, *)``, where ``*`` means any other + number of dimensions. + step : ``StepFunctionType`` + A function that is responsible for computing the next most likely tokens, given the + current state and the predictions from the last time step. The function should accept + two arguments. The first being a tensor of shape ``(group_size,)``, representing the + index of the predicted tokens from the last time step, and the second being the + current state. The ``group_size`` will be ``batch_size * beam_size * num_fsm_states`` + except in the initial step, for which it will just be ``batch_size``. The function is + expected to return a tuple, where the first element is a tensor of shape + ``(group_size, vocab_size)`` containing the log probabilities of the tokens for the + next step, and the second element is the updated state. The tensor in the state should + have shape ``(group_size, *)``, where ``*`` means any other number of dimensions. + + Returns + ------- + Tuple[torch.Tensor, torch.Tensor] + Tuple of ``(predictions, log_probabilities)``, where ``predictions`` + has shape ``(batch_size, num_fsm_states, beam_size, max_steps)`` + and ``log_probabilities`` has shape ``(batch_size, num_fsm_states, beam_size)``. + """ + # shape: (batch_size, num_fsm_states, num_fsm_states, vocab_size) + batch_size, num_fsm_states, _, vocab_size = fsm.size() + + # generated hypotheses + generated_hyps = [ + [BeamHypotheses(self.num_keep_best, self.max_steps, self.length_penalty, early_stopping=False) + for _ in range(num_fsm_states)] + for bb in range(batch_size) + ] + + # List of (batch_size, num_fsm_states, beam_size) tensors. One for each time step. Does not + # include the start symbols, which are implicit. + predictions: List[torch.Tensor] = [] + + # List of (batch_size, num_fsm_states, beam_size) tensors. One for each time step. None for + # the first. Stores the index n for the parent prediction. + backpointers: List[torch.Tensor] = [] + + # Calculate the first timestep. This is done outside the main loop because we are going + # from a single decoder input (the output from the encoder) to the top `beam_size` + # decoder outputs per FSM state. On the other hand, within the main loop we are going + # from the `beam_size` elements of the beam (per FSM state) to `beam_size`^2 candidates + # from which we will select the top `beam_size` elements for the next iteration. + + curr_ids = ( + start_predictions.expand(batch_size, self.beam_size*num_fsm_states) + .reshape(batch_size*self.beam_size*num_fsm_states, 1) + ) + # shape: start_class_log_probabilities (batch_size, vocab_size) + start_class_logits, state = step(curr_ids, start_state) + start_class_log_probabilities = torch.nn.functional.log_softmax(start_class_logits, dim=-1) + start_class_log_probabilities = start_class_log_probabilities[:batch_size, :] + vocab_size = start_class_log_probabilities.size(-1) + + start_state_predictions = start_class_log_probabilities.view( + batch_size, 1, vocab_size + ).expand(batch_size, num_fsm_states, vocab_size) + + start_state_predictions = start_state_predictions.masked_fill( + (1 - fsm[:, 0, :, :]).to(dtype=torch.bool), float("-inf") + ) + + # (batch_size, num_fsm_states, beam_size) + start_top_log_probabilities, start_predicted_classes = start_state_predictions.topk( + self.beam_size + ) + # shape: (batch_size, num_fsm_states, beam_size) + last_log_probabilities = start_top_log_probabilities + + predictions.append(start_predicted_classes.view(batch_size, -1)) + + log_probs_after_end = torch.full((1, vocab_size), float("-inf")).to( + start_predictions.device + ) + log_probs_after_end[:, self._eos_token_ids] = 0.0 + + #state = { + #key: _enlarge_single_tensor(value, batch_size, num_fsm_states, self.beam_size) + #for (key, value) in state.items() + #} + + step_state_mask = fsm.view( + batch_size, num_fsm_states, num_fsm_states, 1, vocab_size + ).expand(batch_size, num_fsm_states, num_fsm_states, self.beam_size, vocab_size) + + curr_len = curr_ids.shape[1] + for timestep in range(self.max_steps - curr_len - 1): + # shape: (batch_size * beam_size * num_fsm_states, ) + last_predictions = predictions[-1].reshape( + batch_size * self.beam_size * num_fsm_states + ) + cur_finished = (last_predictions==self._eos_token_ids[0]) + for eos_token in self._eos_token_ids[1:]: + cur_finished = (cur_finished | (last_predictions==eos_token)) + if cur_finished.all(): + break + + curr_ids = torch.cat([curr_ids, last_predictions.unsqueeze(-1)], dim=1) + + class_logits, state = step(curr_ids, state) + class_log_probabilities = torch.nn.functional.log_softmax(class_logits, dim=-1) + #last_predictions_expanded = ( + #last_predictions.view(-1) + #.unsqueeze(-1) + #.expand(batch_size * num_fsm_states * self.beam_size, vocab_size) + #) + cur_finished_expanded = ( + cur_finished.unsqueeze(-1) + .expand(batch_size * num_fsm_states * self.beam_size, vocab_size) + ) + + cleaned_log_probabilities = torch.where( + #last_predictions_expanded == self._eos_token_ids, + cur_finished_expanded, + log_probs_after_end, + class_log_probabilities, + ) + cleaned_log_probabilities = cleaned_log_probabilities.view( + batch_size, num_fsm_states, self.beam_size, vocab_size + ) + + device = start_predictions.device + restricted_predicted_classes = torch.LongTensor( + batch_size, num_fsm_states, self.beam_size + ).to(start_predictions.device) + restricted_beam_log_probs = torch.FloatTensor( + batch_size, num_fsm_states, self.beam_size + ).to(start_predictions.device) + restricted_beam_indices = torch.LongTensor( + batch_size, num_fsm_states, self.beam_size + ).to(start_predictions.device) + + expanded_last_log_probabilities = last_log_probabilities.view( + batch_size, num_fsm_states, self.beam_size, 1 + ).expand(batch_size, num_fsm_states, self.beam_size, self.per_node_beam_size) + + for i in range(num_fsm_states): + # shape (batch_size, num_fsm_states, self.beam_size, vocab_size) + state_log_probabilities = cleaned_log_probabilities + + state_log_probabilities = state_log_probabilities.masked_fill( + (1 - step_state_mask[:, :, i, :, :]).to(dtype=torch.bool), -1e20 + ) + top_log_probabilities, predicted_classes = state_log_probabilities.topk( + self.per_node_beam_size + ) + summed_top_log_probabilities = ( + top_log_probabilities + expanded_last_log_probabilities + ) + # shape: (batch_size, old_num_fsm_states * beam_size * per_node_beam_size) + reshaped_summed = summed_top_log_probabilities.reshape(batch_size, -1) + + # shape: (batch_size, old_num_fsm_states * beam_size * per_node_beam_size) + reshaped_predicted_classes = predicted_classes.reshape(batch_size, -1) + + if not self.use_hypo: + # shape (batch_size, beam_size) + state_beam_log_probs, state_beam_indices = reshaped_summed.topk(self.beam_size) + # shape (batch_size, beam_size) + state_predicted_classes = reshaped_predicted_classes.gather(1, state_beam_indices) + else: + # shape (batch_size, beam_size*per_node_beam_size) + candidate_beam_log_probs, candidate_beam_indices = reshaped_summed.topk( + self.beam_size*self.per_node_beam_size, sorted=True, largest=True) + # shape (batch_size, beam_size*per_node_beam_size) + candidate_predicted_classes = reshaped_predicted_classes.gather(1, candidate_beam_indices) + next_batch_beam = [] + for batch_ex in range(batch_size): + next_sent_beam = [] + for word_id, beam_id, log_prob in zip(candidate_predicted_classes[batch_ex], + candidate_beam_indices[batch_ex], + candidate_beam_log_probs[batch_ex]): + if word_id.item() in self._eos_token_ids: + generated_hyps[batch_ex][i].add( + curr_ids[batch_ex * self.beam_size*num_fsm_states + beam_id/self.per_node_beam_size, :].clone(), + log_prob.item() + ) + else: + next_sent_beam.append((word_id, beam_id, log_prob)) + if len(next_sent_beam) == self.beam_size: + break + assert len(next_sent_beam) == self.beam_size + next_batch_beam.extend(next_sent_beam) + state_predicted_classes = torch.tensor([x[0] for x in next_batch_beam], + device=device).reshape(batch_size, self.beam_size) + state_beam_indices = torch.tensor([x[1] for x in next_batch_beam], + device=device).reshape(batch_size, self.beam_size) + state_beam_log_probs = torch.tensor([x[2] for x in next_batch_beam], + device=device).reshape(batch_size, self.beam_size) + + restricted_predicted_classes[:, i, :] = state_predicted_classes + restricted_beam_indices[:, i, :] = state_beam_indices + restricted_beam_log_probs[:, i, :] = state_beam_log_probs + + restricted_predicted_classes = restricted_predicted_classes.view(batch_size, -1) + predictions.append(restricted_predicted_classes) + + backpointer = restricted_beam_indices / self.per_node_beam_size + backpointers.append(backpointer.view(batch_size, -1)) + + last_log_probabilities = restricted_beam_log_probs.view(batch_size, num_fsm_states, -1) + + def track_back_state(state_tensor): + _, *last_dims = state_tensor.size() + # shape: (batch_size, beam_size, *) + expanded_backpointer = backpointer.view( + batch_size, num_fsm_states * self.beam_size, *([1] * len(last_dims)) + ).expand(batch_size, num_fsm_states * self.beam_size, *last_dims) + + # shape: (batch_size * beam_size, *) + return ( + state_tensor.reshape(batch_size, num_fsm_states * self.beam_size, *last_dims) + .gather(1, expanded_backpointer) + .reshape(batch_size * num_fsm_states * self.beam_size, *last_dims) + ) + # reorder states + if state is not None: + state = tuple(track_back_state(value) for value in state) + curr_ids = track_back_state(curr_ids) + + last_predictions = predictions[-1].reshape( + batch_size * self.beam_size * num_fsm_states + ) + curr_ids = torch.cat([curr_ids, last_predictions.unsqueeze(-1)], dim=1) + # Reconstruct the sequences. + # shape: [(batch_size, beam_size, 1)] + reconstructed_predictions = [predictions[-1].unsqueeze(2)] + + # shape: (batch_size, beam_size) + cur_backpointers = backpointers[-1] + + for timestep in range(len(predictions) - 2, 0, -1): + # shape: (batch_size, beam_size, 1) + cur_preds = predictions[timestep].gather(1, cur_backpointers).unsqueeze(2) + + reconstructed_predictions.append(cur_preds) + + # shape: (batch_size, beam_size) + cur_backpointers = backpointers[timestep - 1].gather(1, cur_backpointers) + + # shape: (batch_size, beam_size, 1) + final_preds = predictions[0].gather(1, cur_backpointers).unsqueeze(2) + + reconstructed_predictions.append(final_preds) + + # shape: (batch_size, beam_size, max_steps) + all_predictions = torch.cat(list(reversed(reconstructed_predictions)), 2) + all_predictions = all_predictions.view(batch_size, num_fsm_states, self.beam_size, -1) + assert (all_predictions == curr_ids.reshape(batch_size, num_fsm_states, + self.beam_size, -1)[:,:,:,1:]).all() + + if self.use_hypo: + decoded = all_predictions.new(batch_size, num_fsm_states, 1, + self.max_steps).fill_(self._eos_token_ids[0]) + scores = last_log_probabilities.new(batch_size, num_fsm_states, + 1).fill_(-1e5) + for batch_ex in range(batch_size): + for i in range(num_fsm_states): + beam = all_predictions[batch_ex, i, 0, :] + log_prob = last_log_probabilities[batch_ex, i, 0] + generated_hyps[batch_ex][i].add( + beam.clone(), + log_prob.item() + ) + hyps = generated_hyps[batch_ex][i].hyp + assert len(hyps) == 1 + score, sent = hyps[0] + decoded[batch_ex, i, 0, :len(sent)] = sent + scores[batch_ex, i, 0] = score + all_predictions = decoded + last_log_probabilities = scores + + # pad to the same length, otherwise DataParallel will give error + pad_len = self.max_steps - all_predictions.shape[-1] + if pad_len > 0: + padding_ids = all_predictions.new( + batch_size, num_fsm_states, self.beam_size, + pad_len).fill_(self._eos_token_ids[0]) + all_predictions = torch.cat([all_predictions, padding_ids], dim=-1) + + return all_predictions, last_log_probabilities + + +def select_best_beam_with_constraints( + beams: torch.Tensor, + beam_log_probabilities: torch.Tensor, + given_constraints: torch.Tensor, + min_constraints_to_satisfy: int, +) -> Tuple[torch.Tensor, torch.Tensor]: + r""" + Select the best beam which satisfies specified minimum constraints out of a total number of + given constraints. + + .. note:: + + The implementation of this function goes hand-in-hand with the FSM building implementation + in :meth:`~updown.utils.constraints.FiniteStateMachineBuilder.build` - it defines which + state satisfies which (basically, how many) constraints. If the "definition" of states + change, then selection of beams also changes accordingly. + + Parameters + ---------- + beams: torch.Tensor + A tensor of shape ``(batch_size, num_states, beam_size, max_decoding_steps)`` containing + decoded beams by :class:`~updown.modules.cbs.ConstrainedBeamSearch`. These beams are + sorted according to their likelihood (descending) in ``beam_size`` dimension. + beam_log_probabilities: torch.Tensor + A tensor of shape ``(batch_size, num_states, beam_size)`` containing likelihood of decoded + beams. + given_constraints: torch.Tensor + A tensor of shape ``(batch_size, )`` containing number of constraints given at the start + of decoding. + min_constraints_to_satisfy: int + Minimum number of constraints to satisfy. This is either 2, or ``given_constraints`` if + they are less than 2. Beams corresponding to states not satisfying at least these number + of constraints will be dropped. Only up to 3 supported. + + Returns + ------- + Tuple[torch.Tensor, torch.Tensor] + Decoded sequence (beam) which has highest likelihood among beams satisfying constraints. + """ + batch_size, num_states, beam_size, max_decoding_steps = beams.size() + + best_beams: List[torch.Tensor] = [] + best_beam_log_probabilities: List[torch.Tensor] = [] + + for i in range(batch_size): + # fmt: off + valid_states = [ + s for s in range(2 ** given_constraints[i].item()) + if bin(s).count("1") >= min(given_constraints[i], min_constraints_to_satisfy) + ] + # fmt: on + + valid_beams = beams[i, valid_states, 0, :] + valid_beam_log_probabilities = beam_log_probabilities[i, valid_states, 0] + + selected_index = torch.argmax(valid_beam_log_probabilities) + best_beams.append(valid_beams[selected_index, :]) + best_beam_log_probabilities.append(valid_beam_log_probabilities[selected_index]) + + # shape: (batch_size, max_decoding_steps) + return (torch.stack(best_beams).long().to(beams.device), + torch.stack(best_beam_log_probabilities).to(beams.device)) + + +def load_wordforms(wordforms_tsvpath): + wordforms = {} + with open(wordforms_tsvpath, "r") as fp: + for line in fp: + parts = line.strip().split('\t') + wordforms[parts[0]] = parts[1].split(',') + return wordforms + + +class ConstraintBoxesReader(object): + r""" + A reader for annotation files containing detected bounding boxes. + For our use cases, the detections are from an object detector trained using Open Images. + """ + def __init__(self, boxes_tsvpath): + self._image_key_to_boxes = {} + with open(boxes_tsvpath, 'r') as fp: + for line in fp: + parts = line.strip().split('\t') + img_key = parts[0] + labels = json.loads(parts[1]) + boxes, class_names, scores = [], [], [] + for box in labels: + boxes.append(box['rect']) + class_names.append(box['class'].lower()) + scores.append(box['conf']) + boxes = np.array(boxes) + scores = np.array(scores) + self._image_key_to_boxes[img_key] = {"boxes": boxes, "class_names": class_names, "scores": scores} + + def __len__(self): + return len(self._image_key_to_boxes) + + def __getitem__(self, image_key): + # Some images may not have any boxes, handle that case too. + if image_key not in self._image_key_to_boxes: + return {"boxes": np.array([]), "class_names": [], "scores": + np.array([])} + else: + return self._image_key_to_boxes[image_key] + + +class ConstraintFilter(object): + r""" + A helper class to perform constraint filtering for providing sensible set of constraint words + while decoding. + + Extended Summary + ---------------- + The original work proposing `Constrained Beam Search `_ + selects constraints randomly. + + We remove certain categories from a fixed set of "blacklisted" categories, which are either + too rare, not commonly uttered by humans, or well covered in COCO. We resolve overlapping + detections (IoU >= 0.85) by removing the higher-order of the two objects (e.g. , a "dog" would + suppress a ‘mammal’) based on the Open Images class hierarchy (keeping both if equal). + Finally, we take the top-k objects based on detection confidence as constraints. + + Parameters + ---------- + hierarchy_jsonpath: str + Path to a JSON file containing a hierarchy of Open Images object classes. + nms_threshold: float, optional (default = 0.85) + NMS threshold for suppressing generic object class names during constraint filtering, + for two boxes with IoU higher than this threshold, "dog" suppresses "animal". + max_given_constraints: int, optional (default = 3) + Maximum number of constraints which can be specified for CBS decoding. Constraints are + selected based on the prediction confidence score of their corresponding bounding boxes. + """ + + # fmt: off + BLACKLIST: List[str] = [ + "auto part", "bathroom accessory", "bicycle wheel", "boy", "building", "clothing", + "door handle", "fashion accessory", "footwear", "girl", "hiking equipment", "human arm", + "human beard", "human body", "human ear", "human eye", "human face", "human foot", + "human hair", "human hand", "human head", "human leg", "human mouth", "human nose", + "land vehicle", "mammal", "man", "person", "personal care", "plant", "plumbing fixture", + "seat belt", "skull", "sports equipment", "tire", "tree", "vehicle registration plate", + "wheel", "woman", "__background__", + ] + # fmt: on + + REPLACEMENTS: Dict[str, str] = { + "band-aid": "bandaid", + "wood-burning stove": "wood burning stove", + "kitchen & dining room table": "table", + "salt and pepper shakers": "salt and pepper", + "power plugs and sockets": "power plugs", + "luggage and bags": "luggage", + } + + def __init__( + self, hierarchy_jsonpath, nms_threshold, max_given_constraints + ): + def __read_hierarchy(node, parent=None): + # Cast an ``anytree.AnyNode`` (after first level of recursion) to dict. + attributes = dict(node) + children = attributes.pop("Subcategory", []) + + node = anytree.AnyNode(parent=parent, **attributes) + for child in children: + __read_hierarchy(child, parent=node) + return node + + # Read the object class hierarchy as a tree, to make searching easier. + self._hierarchy = __read_hierarchy(json.load(open(hierarchy_jsonpath))) + + self._nms_threshold = nms_threshold + self._max_given_constraints = max_given_constraints + + def __call__(self, boxes: np.ndarray, class_names: List[str], scores: np.ndarray) -> List[str]: + + # Remove padding boxes (which have prediction confidence score = 0), and remove boxes + # corresponding to all blacklisted classes. These will never become CBS constraints. + keep_indices = [] + for i in range(len(class_names)): + if scores[i] > 0 and class_names[i] not in self.BLACKLIST: + keep_indices.append(i) + + boxes = boxes[keep_indices] + class_names = [class_names[i] for i in keep_indices] + scores = scores[keep_indices] + + # Perform non-maximum suppression according to category hierarchy. For example, for highly + # overlapping boxes on a dog, "dog" suppresses "animal". + keep_indices = self._nms(boxes, class_names) + boxes = boxes[keep_indices] + class_names = [class_names[i] for i in keep_indices] + scores = scores[keep_indices] + + # Retain top-k constraints based on prediction confidence score. + class_names_and_scores = sorted(list(zip(class_names, scores)), key=lambda t: -t[1]) + class_names_and_scores = class_names_and_scores[: self._max_given_constraints] + + # Replace class name according to ``self.REPLACEMENTS``. + class_names = [self.REPLACEMENTS.get(t[0], t[0]) for t in class_names_and_scores] + + # Drop duplicates. + class_names = list(set(class_names)) + return class_names + + def _nms(self, boxes: np.ndarray, class_names: List[str]): + if len(class_names) == 0: + return [] + + # For object class, get the height of its corresponding node in the hierarchy tree. + # Less height => finer-grained class name => higher score. + heights = np.array( + [ + anytree.search.findall(self._hierarchy, lambda node: node.LabelName.lower() in c)[0].height + for c in class_names + ] + ) + # Get a sorting of the heights in ascending order, i.e. higher scores first. + score_order = heights.argsort() + + # Compute areas for calculating intersection over union. Add 1 to avoid division by zero + # for zero area (padding/dummy) boxes. + x1, y1, x2, y2 = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3] + areas = (x2 - x1 + 1) * (y2 - y1 + 1) + + # Fill "keep_boxes" with indices of boxes to keep, move from left to right in + # ``score_order``, keep current box index (score_order[0]) and suppress (discard) other + # indices of boxes having lower IoU threshold with current box from ``score_order``. + # list. Note the order is a sorting of indices according to scores. + keep_box_indices = [] + + while score_order.size > 0: + # Keep the index of box under consideration. + current_index = score_order[0] + keep_box_indices.append(current_index) + + # For the box we just decided to keep (score_order[0]), compute its IoU with other + # boxes (score_order[1:]). + xx1 = np.maximum(x1[score_order[0]], x1[score_order[1:]]) + yy1 = np.maximum(y1[score_order[0]], y1[score_order[1:]]) + xx2 = np.minimum(x2[score_order[0]], x2[score_order[1:]]) + yy2 = np.minimum(y2[score_order[0]], y2[score_order[1:]]) + + intersection = np.maximum(0.0, xx2 - xx1 + 1) * np.maximum(0.0, yy2 - yy1 + 1) + union = areas[score_order[0]] + areas[score_order[1:]] - intersection + + # Perform NMS for IoU >= 0.85. Check score, boxes corresponding to object + # classes with smaller/equal height in hierarchy cannot be suppressed. + keep_condition = np.logical_or( + heights[score_order[1:]] >= heights[score_order[0]], + intersection / union <= self._nms_threshold, + ) + + # Only keep the boxes under consideration for next iteration. + score_order = score_order[1:] + score_order = score_order[np.where(keep_condition)[0]] + + return keep_box_indices + + +class FiniteStateMachineBuilder(object): + r""" + A helper class to build a Finite State Machine for Constrained Beam Search, as per the + state transitions shown in Figures 7 through 9 from our + `paper appendix `_. + + The FSM is constructed on a per-example basis, and supports up to three constraints, + with each constraint being an Open Image class having up to three words (for example + ``salt and pepper``). Each word in the constraint may have several word-forms (for + example ``dog``, ``dogs``). + + .. note:: Providing more than three constraints may work but it is not tested. + + **Details on Finite State Machine Representation** + + .. image:: ../_static/fsm.jpg + + The FSM is representated as an adjacency matrix. Specifically, it is a tensor of shape + ``(num_total_states, num_total_states, vocab_size)``. In this, ``fsm[S1, S2, W] = 1`` indicates + a transition from "S1" to "S2" if word "W" is decoded. For example, consider **Figure 9**. + The decoding is at initial state (``q0``), constraint word is ``D1``, while any other word + in the vocabulary is ``Dx``. Then we have:: + + fsm[0, 0, D1] = 0 and fsm[0, 1, D1] = 1 # arrow from q0 to q1 + fsm[0, 0, Dx] = 1 and fsm[0, 1, Dx] = 0 # self-loop on q0 + + Consider up to "k" (3) constraints and up to "w" (3) words per constraint. We define these + terms (as members in the class). + + .. code-block:: + + _num_main_states = 2 ** k (8) + _total_states = num_main_states * w (24) + + First eight states are considered as "main states", and will always be a part of the FSM. For + less than "k" constraints, some states will be unreachable, hence "useless". These will be + ignored automatically. + + For any multi-word constraint, we use extra "sub-states" after first ``2 ** k`` states. We + make connections according to **Figure 7-8** for such constraints. We dynamically trim unused + sub-states to save computation during decoding. That said, ``num_total_states`` dimension is + at least 8. + + A state "q" satisfies number of constraints equal to the number of "1"s in the binary + representation of that state. For example: + + - state "q0" (000) satisfies 0 constraints. + - state "q1" (001) satisfies 1 constraint. + - state "q2" (010) satisfies 1 constraint. + - state "q3" (011) satisfies 2 constraints. + + and so on. Only main states fully satisfy constraints. + + Parameters + ---------- + tokenizer: BertTokenizer + wordforms_tsvpath: str + Path to a TSV file containing two fields: first is the name of Open Images object class + and second field is a comma separated list of words (possibly singular and plural forms + of the word etc.) which could be CBS constraints. + max_given_constraints: int, optional (default = 3) + Maximum number of constraints which could be given while cbs decoding. Up to three + supported. + max_words_per_constraint: int, optional (default = 3) + Maximum number of words per constraint for multi-word constraints. Note that these are + for multi-word object classes (for example: ``fire hydrant``) and not for multiple + "word-forms" of a word, like singular-plurals. Up to three supported. + """ + + def __init__( + self, + tokenizer, + constraint2tokens_tsvpath, + tokenforms_tsvpath, + max_given_constraints, + max_words_per_constraint = 4, + ): + self._tokenizer = tokenizer + self._max_given_constraints = max_given_constraints + self._max_words_per_constraint = max_words_per_constraint + + self._num_main_states = 2 ** max_given_constraints + self._num_total_states = self._num_main_states * max_words_per_constraint + + self._wordforms: Dict[str, List[str]] = load_wordforms(tokenforms_tsvpath) + self._constraint2tokens = load_wordforms(constraint2tokens_tsvpath) + + def build(self, constraints: List[str]): + r""" + Build a finite state machine given a list of constraints. + + Parameters + ---------- + constraints: List[str] + A list of up to three (possibly) multi-word constraints, in our use-case these are + Open Images object class names. + + Returns + ------- + Tuple[torch.Tensor, int] + A finite state machine as an adjacency matrix, index of the next available unused + sub-state. This is later used to trim the unused sub-states from FSM. + """ + assert len(constraints) <= self._max_given_constraints + fsm = torch.zeros(self._num_total_states, self._num_total_states, dtype=torch.uint8) + + # Self loops for all words on main states. + fsm[range(self._num_main_states), range(self._num_main_states)] = 1 + + fsm = fsm.unsqueeze(-1).repeat(1, 1, self._tokenizer.vocab_size) + + substate_idx = self._num_main_states + for i, constraint in enumerate(constraints): + fsm, substate_idx = self._add_nth_constraint(fsm, i + 1, substate_idx, constraint) + + return fsm, substate_idx + + def _add_nth_constraint(self, fsm: torch.Tensor, n: int, substate_idx: int, constraint: str): + r""" + Given an (incomplete) FSM matrix with transitions for "(n - 1)" constraints added, add + all transitions for the "n-th" constraint. + + Parameters + ---------- + fsm: torch.Tensor + A tensor of shape ``(num_total_states, num_total_states, vocab_size)`` representing an + FSM under construction. + n: int + The cardinality of constraint to be added. Goes as 1, 2, 3... (not zero-indexed). + substate_idx: int + An index which points to the next unused position for a sub-state. It starts with + ``(2 ** num_main_states)`` and increases according to the number of multi-word + constraints added so far. The calling method, :meth:`build` keeps track of this. + constraint: str + A (possibly) multi-word constraint, in our use-case it is an Open Images object class + name. + + Returns + ------- + Tuple[torch.Tensor, int] + FSM with added connections for the constraint and updated ``substate_idx`` pointing to + the next unused sub-state. + """ + #words = constraint.split() + words = [] + for w in constraint.split(): + words.extend(self._constraint2tokens[w]) + #TODO: set max_words_per_constraint + #assert len(words) <= self._max_words_per_constraint + if len(words) > self._max_words_per_constraint: + words = words[:self._max_words_per_constraint] + connection_stride = 2 ** (n - 1) + + from_state = 0 + while from_state < self._num_main_states: + for _ in range(connection_stride): + word_from_state = from_state + for i, word in enumerate(words): + # fmt: off + # Connect to a sub-state for all tokens in multi-word constraint except last. + if i != len(words) - 1: + fsm = self._connect( + fsm, word_from_state, substate_idx, word, reset_state=from_state + ) + word_from_state = substate_idx + substate_idx += 1 + else: + fsm = self._connect( + fsm, word_from_state, from_state + connection_stride, word, + reset_state=from_state, + ) + # fmt: on + from_state += 1 + from_state += connection_stride + return fsm, substate_idx + + def _connect( + self, fsm: torch.Tensor, from_state: int, to_state: int, word: str, reset_state: int = None + ): + r""" + Add a connection between two states for a particular word (and all its word-forms). This + means removing self-loop from ``from_state`` for all word-forms of ``word`` and connecting + them to ``to_state``. + + Extended Summary + ---------------- + In case of multi-word constraints, we return back to the ``reset_state`` for any utterance + other than ``word``, to satisfy a multi-word constraint if all words are decoded + consecutively. For example: for "fire hydrant" as a constraint between Q0 and Q1, we reach + a sub-state "Q8" on decoding "fire". Go back to main state "Q1" on decoding "hydrant" + immediately after, else we reset back to main state "Q0". + + Parameters + ---------- + fsm: torch.Tensor + A tensor of shape ``(num_total_states, num_total_states, vocab_size)`` representing an + FSM under construction. + from_state: int + Origin state to make a state transition. + to_state: int + Destination state to make a state transition. + word: str + The word which serves as a constraint for transition between given two states. + reset_state: int, optional (default = None) + State to reset otherwise. This is only valid if ``from_state`` is a sub-state. + + Returns + ------- + torch.Tensor + FSM with the added connection. + """ + wordforms = self._wordforms.get(word, [word]) + #wordform_indices = [self._vocabulary.get_token_index(w) for w in wordforms] + wordform_indices = self._tokenizer.convert_tokens_to_ids(wordforms) + + for wordform_index in wordform_indices: + fsm[from_state, to_state, wordform_index] = 1 + fsm[from_state, from_state, wordform_index] = 0 + + if reset_state is not None: + fsm[from_state, from_state, :] = 0 + fsm[from_state, reset_state, :] = 1 + for wordform_index in wordform_indices: + fsm[from_state, reset_state, wordform_index] = 0 + + return fsm + diff --git a/oscar/utils/logger.py b/oscar/utils/logger.py new file mode 100644 index 0000000..9deee1c --- /dev/null +++ b/oscar/utils/logger.py @@ -0,0 +1,102 @@ +# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license. + +import logging +from logging import StreamHandler, Handler, getLevelName +import os +import sys + + +# this class is a copy of logging.FileHandler except we end self.close() +# at the end of each emit. While closing file and reopening file after each +# write is not efficient, it allows us to see partial logs when writing to +# fused Azure blobs, which is very convenient +class FileHandler(StreamHandler): + """ + A handler class which writes formatted logging records to disk files. + """ + def __init__(self, filename, mode='a', encoding=None, delay=False): + """ + Open the specified file and use it as the stream for logging. + """ + # Issue #27493: add support for Path objects to be passed in + filename = os.fspath(filename) + #keep the absolute path, otherwise derived classes which use this + #may come a cropper when the current directory changes + self.baseFilename = os.path.abspath(filename) + self.mode = mode + self.encoding = encoding + self.delay = delay + if delay: + #We don't open the stream, but we still need to call the + #Handler constructor to set level, formatter, lock etc. + Handler.__init__(self) + self.stream = None + else: + StreamHandler.__init__(self, self._open()) + + def close(self): + """ + Closes the stream. + """ + self.acquire() + try: + try: + if self.stream: + try: + self.flush() + finally: + stream = self.stream + self.stream = None + if hasattr(stream, "close"): + stream.close() + finally: + # Issue #19523: call unconditionally to + # prevent a handler leak when delay is set + StreamHandler.close(self) + finally: + self.release() + + def _open(self): + """ + Open the current base file with the (original) mode and encoding. + Return the resulting stream. + """ + return open(self.baseFilename, self.mode, encoding=self.encoding) + + def emit(self, record): + """ + Emit a record. + + If the stream was not opened because 'delay' was specified in the + constructor, open it before calling the superclass's emit. + """ + if self.stream is None: + self.stream = self._open() + StreamHandler.emit(self, record) + self.close() + + def __repr__(self): + level = getLevelName(self.level) + return '<%s %s (%s)>' % (self.__class__.__name__, self.baseFilename, level) + + +def setup_logger(name, save_dir, distributed_rank, filename="log.txt"): + logger = logging.getLogger(name) + logger.setLevel(logging.DEBUG) + # don't log results for the non-master process + if distributed_rank > 0: + return logger + ch = logging.StreamHandler(stream=sys.stdout) + ch.setLevel(logging.DEBUG) + formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s") + ch.setFormatter(formatter) + logger.addHandler(ch) + + if save_dir: + fh = FileHandler(os.path.join(save_dir, filename)) + fh.setLevel(logging.DEBUG) + fh.setFormatter(formatter) + logger.addHandler(fh) + + return logger + diff --git a/oscar/utils/misc.py b/oscar/utils/misc.py new file mode 100644 index 0000000..6b6df2e --- /dev/null +++ b/oscar/utils/misc.py @@ -0,0 +1,46 @@ +# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license. + +import errno +import os +import os.path as op +import yaml +import random +import torch +import numpy as np + + +def mkdir(path): + # if it is the current folder, skip. + if path == '': + return + try: + os.makedirs(path) + except OSError as e: + if e.errno != errno.EEXIST: + raise + + +def set_seed(seed, n_gpu): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if n_gpu > 0: + torch.cuda.manual_seed_all(seed) + + +def load_from_yaml_file(yaml_file): + with open(yaml_file, 'r') as fp: + return yaml.load(fp) + + +def find_file_path_in_yaml(fname, root): + if fname is not None: + if op.isfile(fname): + return fname + elif op.isfile(op.join(root, fname)): + return op.join(root, fname) + else: + raise FileNotFoundError( + errno.ENOENT, os.strerror(errno.ENOENT), op.join(root, fname) + ) + diff --git a/oscar/utils/task_utils.py b/oscar/utils/task_utils.py new file mode 100644 index 0000000..67b510f --- /dev/null +++ b/oscar/utils/task_utils.py @@ -0,0 +1,442 @@ +# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license. + +from __future__ import absolute_import, division, print_function + +import csv, json +import logging +import os +import sys +from io import open +import _pickle as cPickle +import torch + +logger = logging.getLogger(__name__) + + +class InputInstance(object): + """A single training/test example for simple sequence classification.""" + + def __init__(self, guid, text_a, text_b=None, label=None, score=None, img_key=None, q_id=None): + """Constructs a InputExample. + + Args: + guid: Unique id for the example. + text_a: string. The untokenized text of the first sequence. For single + sequence tasks, only this sequence must be specified. + text_b: (Optional) string. The untokenized text of the second sequence. + Only must be specified for sequence pair tasks. + label: (Optional) string. The label of the example. This should be + specified for train and dev examples, but not for test examples. + """ + + self.guid = guid + self.text_a = text_a + self.text_b = text_b + self.label = label + self.score = score + self.img_key = img_key + self.q_id = q_id + + +class InputFeat(object): + """A single set of features of data.""" + + def __init__(self, input_ids, input_mask, segment_ids, label_id, score, img_feat): + self.input_ids = input_ids + self.input_mask = input_mask + self.segment_ids = segment_ids + self.label_id = label_id + self.score = score + self.img_feat = img_feat + + +class DataProcessor(object): + """Base class for data converters for sequence classification data sets.""" + + def get_train_examples(self, data_dir): + """Gets a collection of `InputExample`s for the train set.""" + raise NotImplementedError() + + def get_dev_examples(self, data_dir): + """Gets a collection of `InputExample`s for the dev set.""" + raise NotImplementedError() + + def get_labels(self): + """Gets the list of labels for this data set.""" + raise NotImplementedError() + + @classmethod + def _read_tsv(cls, input_file, quotechar=None): + """Reads a tab separated value file.""" + with open(input_file, "r", encoding="utf-8-sig") as f: + reader = csv.reader(f, delimiter="\t", quotechar=quotechar) + lines = [] + for line in reader: + if sys.version_info[0] == 2: + line = list(unicode(cell, 'utf-8') for cell in line) + lines.append(line) + return lines + + +class VQATextProcessor(DataProcessor): + """ Processor for the VQA Text data set. """ + + def get_train_examples(self, data_dir, file_name='train2014_qla.json'): + """ See base class.""" + + lines = json.load(open(os.path.join(data_dir, file_name))) + return self._create_examples(lines, "train") + + #return self._create_examples(self._read_tsv(os.path.join(data_dir, "train2014_qla.tsv")), "train") + + def get_dev_examples(self, data_dir, file_name='val2014_qla.json'): + """ See base class.""" + + lines = json.load(open(os.path.join(data_dir, file_name))) + return self._create_examples(lines, "dev") + + #return self._create_examples(self._read_tsv(os.path.join(data_dir, "val2014_qla.tsv")), "dev") + + def get_test_examples(self, data_dir, file_name='test2015_qla.json'): + """ See base class.""" + + lines = json.load(open(os.path.join(data_dir, file_name))) + return self._create_examples(lines, "test") + + def get_labels(self, label_file): + """ See base class.""" + + ans2label = cPickle.load(open(label_file, 'rb')) + return list(ans2label.values()) + #return ["entailment", "not_entailment"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training and dev sets.""" + + examples = [] + for (i, line) in enumerate(lines): + if set_type!='test' and len(line['an']) == 0: continue + + guid = "%s-%s" % (set_type, str(i)) + text_a = line['q'] + text_b = line['o'].replace(';', ' ').strip() #line['o'] + label = None if set_type.startswith('test') else line['an'] + score = None if set_type.startswith('test') else line['s'] + img_key = line['img_id'] + q_id = int(line['q_id']) if set_type.startswith('test') else 0 + examples.append(InputInstance(guid=guid, text_a=text_a, text_b=text_b, label=label, score=score, img_key=img_key, q_id=q_id)) + return examples + +class VQATextAProcessor(DataProcessor): + """ Processor for the VQA Text data set. """ + + def get_train_examples(self, data_dir, file_name='train2014_qla.json'): + """ See base class.""" + + lines = json.load(open(os.path.join(data_dir, file_name))) + return self._create_examples(lines, "train") + + #return self._create_examples(self._read_tsv(os.path.join(data_dir, "train2014_qla.tsv")), "train") + + def get_dev_examples(self, data_dir, file_name='val2014_qla.json'): + """ See base class.""" + + lines = json.load(open(os.path.join(data_dir, file_name))) + return self._create_examples(lines, "dev") + + #return self._create_examples(self._read_tsv(os.path.join(data_dir, "val2014_qla.tsv")), "dev") + + def get_test_examples(self, data_dir, file_name='test2015_qla.json'): + """ See base class.""" + + lines = json.load(open(os.path.join(data_dir, file_name))) + return self._create_examples(lines, "test") + + def get_labels(self, label_file): + """ See base class.""" + + ans2label = cPickle.load(open(label_file, 'rb')) + return list(ans2label.values()) + + def _create_examples(self, lines, set_type): + """Creates examples for the training and dev sets.""" + + examples = [] + for (i, line) in enumerate(lines): + if set_type!='test' and len(line['an']) == 0: continue + + guid = "%s-%s" % (set_type, str(i)) + text_a = line['q'] + text_b = None # line['o'] # or None + label = None if set_type.startswith('test') else line['an'] + score = None if set_type.startswith('test') else line['s'] + img_key = line['img_id'] + q_id = int(line['q_id']) if set_type.startswith('test') else 0 + examples.append(InputInstance(guid=guid, text_a=text_a, text_b=text_b, label=label, score=score, img_key=img_key, q_id=q_id)) + return examples + +class GQAProcessor(DataProcessor): + """ Processor for the GQA data set. """ + + def get_train_examples(self, data_dir, file_name='train2014_qla.json'): + """ See base class.""" + + lines = json.load(open(os.path.join(data_dir, file_name))) + return self._create_examples(lines, "train") + + #return self._create_examples(self._read_tsv(os.path.join(data_dir, "train2014_qla.tsv")), "train") + + def get_dev_examples(self, data_dir, file_name='val2014_qla.json'): + """ See base class.""" + + lines = json.load(open(os.path.join(data_dir, file_name))) + return self._create_examples(lines, "dev") + + #return self._create_examples(self._read_tsv(os.path.join(data_dir, "val2014_qla.tsv")), "dev") + + def get_test_examples(self, data_dir, file_name='test2015_qla.json'): + """ See base class.""" + + lines = json.load(open(os.path.join(data_dir, file_name))) + return self._create_examples(lines, "test") + + def get_labels(self, label_file='trainval_testdev_all_ans2label.pkl'): + """ See base class.""" + + ans2label = cPickle.load(open(label_file, 'rb')) + return list(ans2label.values()) + + def _create_examples(self, lines, set_type): + """Creates examples for the training and dev sets.""" + + examples = [] + for (i, line) in enumerate(lines): + if set_type!='test' and len(line['an']) == 0: continue + + guid = "%s-%s" % (set_type, str(i)) + text_a = line['q'] + text_b = line['o'] # or None + label = None if set_type.startswith('test') else line['an'] + score = 0 + img_key = line['img_id'] + q_id = int(line['q_id']) if set_type.startswith('test') else 0 + examples.append(InputInstance(guid=guid, text_a=text_a, text_b=text_b, label=label, score=score, img_key=img_key, q_id=q_id)) + return examples + +class NLVRProcessor(DataProcessor): + """ Processor for the NLVR data set. """ + + def get_train_examples(self, data_dir, use_label_seq=True, file_name='nlvr2_train.json'): + """ See base class.""" + + lines = json.load(open(os.path.join(data_dir, file_name))) + return self._create_examples(lines, "train", use_label_seq) + + #return self._create_examples(self._read_tsv(os.path.join(data_dir, "train2014_qla.tsv")), "train") + + def get_dev_examples(self, data_dir, use_label_seq=True, file_name='nlvr2_dev.json'): + """ See base class.""" + + lines = json.load(open(os.path.join(data_dir, file_name))) + return self._create_examples(lines, "dev", use_label_seq) + + #return self._create_examples(self._read_tsv(os.path.join(data_dir, "val2014_qla.tsv")), "dev") + + def get_test_examples(self, data_dir, use_label_seq=True, file_name='nlvr2_test1.json'): + """ See base class.""" + + lines = json.load(open(os.path.join(data_dir, file_name))) + return self._create_examples(lines, "test", use_label_seq) + + def get_labels(self, label_file=None): + """ See base class.""" + + #ans2label = cPickle.load(open(label_file, 'rb')) + #return list(ans2label.values()) + return [0, 1] + + def _create_examples(self, lines, set_type, use_label_seq=True): + """ Creates examples for the training and dev sets. """ + + examples = [] + for (i, line) in enumerate(lines): + guid = "%s-%s" % (set_type, str(i)) + text_a = line['q'] + text_b = line['o'] if use_label_seq else None + label = line['label'] #None if set_type.startswith('test') else line['label'] + score = 0 + img_key = line['img_id'] #[line['img_left'], line['img_left']] + q_id = 0 #int(line['q_id']) if set_type.startswith('test') else 0 + examples.append(InputInstance(guid=guid, text_a=text_a, text_b=text_b, label=label, score=score, img_key=img_key, q_id=q_id)) + return examples + + +def convert_examples_to_features_vqa(examples, img_feats, label_list, max_img_seq_length, max_seq_length, + tokenizer, output_mode, + cls_token_at_end=False, pad_on_left=False, + cls_token='[CLS]', sep_token='[SEP]', pad_token=0, + sequence_a_segment_id=0, sequence_b_segment_id=1, + cls_token_segment_id=1, pad_token_segment_id=0, + mask_padding_with_zero=True): + """ Loads a data file into a list of `InputBatch`s + `cls_token_at_end` define the location of the CLS token: + - False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP] + - True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS] + `cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet) + """ + + label_map = {label:i for i, label in enumerate(label_list)} + + features = [] + #debug: + debug_size = 500 + + for (ex_index, example) in enumerate(examples[0: ]): + if len(example.label) == 0: continue + if ex_index % 10000 == 0: + logger.info("Writing example %d of %d" % (ex_index, len(examples))) + + tokens_a = tokenizer.tokenize(example.text_a) + + tokens_b = None + if example.text_b: + tokens_b = tokenizer.tokenize(example.text_b) + # Modifies `tokens_a` and `tokens_b` in place so that the total + # length is less than the specified length. + # Account for [CLS], [SEP], [SEP] with "- 3" + _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) + else: + # Account for [CLS] and [SEP] with "- 2" + if len(tokens_a) > max_seq_length - 2: + tokens_a = tokens_a[:(max_seq_length - 2)] + + # The convention in BERT is: + # (a) For sequence pairs: + # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] + # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 + # (b) For single sequences: + # tokens: [CLS] the dog is hairy . [SEP] + # type_ids: 0 0 0 0 0 0 0 + # + # Where "type_ids" are used to indicate whether this is the first + # sequence or the second sequence. The embedding vectors for `type=0` and + # `type=1` were learned during pre-training and are added to the wordpiece + # embedding vector (and position vector). This is not *strictly* necessary + # since the [SEP] token unambiguously separates the sequences, but it makes + # it easier for the model to learn the concept of sequences. + # + # For classification tasks, the first vector (corresponding to [CLS]) is + # used as as the "sentence vector". Note that this only makes sense because + # the entire model is fine-tuned. + tokens = tokens_a + [sep_token] + segment_ids = [sequence_a_segment_id] * len(tokens) + + if tokens_b: + tokens += tokens_b + [sep_token] + segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1) + + if cls_token_at_end: + tokens = tokens + [cls_token] + segment_ids = segment_ids + [cls_token_segment_id] + else: + tokens = [cls_token] + tokens + segment_ids = [cls_token_segment_id] + segment_ids + + input_ids = tokenizer.convert_tokens_to_ids(tokens) + + # The mask has 1 for real tokens and 0 for padding tokens. Only real + # tokens are attended to. + input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) + + # Zero-pad up to the sequence length. + padding_length = max_seq_length - len(input_ids) + if pad_on_left: + input_ids = ([pad_token] * padding_length) + input_ids + input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask + segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids + else: + input_ids = input_ids + ([pad_token] * padding_length) + input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length) + segment_ids = segment_ids + ([pad_token_segment_id] * padding_length) + + assert len(input_ids) == max_seq_length + assert len(input_mask) == max_seq_length + assert len(segment_ids) == max_seq_length + + # image features + #img_feat = img_feats[example.img_key] # torch + img_feat = img_feats.item().get(example.img_key) # numpy + if img_feat.shape[0] > max_img_seq_length: + img_feat = img_feat[0:max_img_seq_length, ] + if max_img_seq_length > 0: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + #segment_ids += [sequence_b_segment_id] * img_feat.shape[0] + else: + if max_img_seq_length > 0: + input_mask = input_mask + [1 if mask_padding_with_zero else 0] * img_feat.shape[0] + #segment_ids = segment_ids + [sequence_b_segment_id] * img_feat.shape[0] + padding_matrix = torch.zeros((max_img_seq_length - img_feat.shape[0], img_feat.shape[1])) + img_feat = torch.cat((img_feat, padding_matrix), 0) + if max_img_seq_length > 0: + input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_matrix.shape[0]) + #segment_ids = segment_ids + [pad_token_segment_id] * padding_matrix.shape[0] + + if output_mode == "classification": + label_id = [label_map[l] for l in example.label] + score = example.score + elif output_mode == "regression": + label_id = float(example.label) + else: + raise KeyError(output_mode) + + if ex_index < 5: + logger.info("*** Example ***") + logger.info("guid: %s" % (example.guid)) + logger.info("tokens: %s" % " ".join([str(x) for x in tokens])) + logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) + logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) + logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) + logger.info("label: %s (id = %s)" % (example.label, label_id)) + logger.info("score: %s (score = %s)" % (example.score, score)) + + features.append(InputFeat(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, score=score, img_feat=img_feat)) + return features + + +def _truncate_seq_pair(tokens_a, tokens_b, max_length): + """Truncates a sequence pair in place to the maximum length.""" + + # This is a simple heuristic which will always truncate the longer sequence + # one token at a time. This makes more sense than truncating an equal percent + # of tokens from each, since if one sequence is very short then each token + # that's truncated likely contains more information than a longer sequence. + while True: + total_length = len(tokens_a) + len(tokens_b) + if total_length <= max_length: + break + if len(tokens_a) > len(tokens_b): + tokens_a.pop() + else: + tokens_b.pop() + + +processors = { + "vqa_text": VQATextProcessor, + "vqa_text_a": VQATextAProcessor, + "gqa": GQAProcessor, + "nlvr": NLVRProcessor +} + +output_modes = { + "vqa_text": "classification", + "vqa_text_a": "classification", + "gqa": "classification", + "nlvr": "classification" +} + +GLUE_TASKS_NUM_LABELS = { + "vqa_text": 3129, + "vqa_text_a": 3129, + "gqa": 1853, + "nlvr": 2 +} diff --git a/oscar/utils/tsv_file.py b/oscar/utils/tsv_file.py new file mode 100644 index 0000000..3563bb1 --- /dev/null +++ b/oscar/utils/tsv_file.py @@ -0,0 +1,85 @@ +# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license. + +import logging +import os +import os.path as op + + +def generate_lineidx(filein, idxout): + idxout_tmp = idxout + '.tmp' + with open(filein, 'r') as tsvin, open(idxout_tmp,'w') as tsvout: + fsize = os.fstat(tsvin.fileno()).st_size + fpos = 0 + while fpos!=fsize: + tsvout.write(str(fpos)+"\n") + tsvin.readline() + fpos = tsvin.tell() + os.rename(idxout_tmp, idxout) + + +class TSVFile(object): + def __init__(self, tsv_file, generate_lineidx=False): + self.tsv_file = tsv_file + self.lineidx = op.splitext(tsv_file)[0] + '.lineidx' + self._fp = None + self._lineidx = None + # the process always keeps the process which opens the file. + # If the pid is not equal to the currrent pid, we will re-open the file. + self.pid = None + # generate lineidx if not exist + if not op.isfile(self.lineidx) and generate_lineidx: + generate_lineidx(self.tsv_file, self.lineidx) + + def __del__(self): + if self._fp: + self._fp.close() + + def __str__(self): + return "TSVFile(tsv_file='{}')".format(self.tsv_file) + + def __repr__(self): + return str(self) + + def num_rows(self): + self._ensure_lineidx_loaded() + return len(self._lineidx) + + def seek(self, idx): + self._ensure_tsv_opened() + self._ensure_lineidx_loaded() + try: + pos = self._lineidx[idx] + except: + logging.info('{}-{}'.format(self.tsv_file, idx)) + raise + self._fp.seek(pos) + return [s.strip() for s in self._fp.readline().split('\t')] + + def seek_first_column(self, idx): + self._ensure_tsv_opened() + self._ensure_lineidx_loaded() + pos = self._lineidx[idx] + self._fp.seek(pos) + return read_to_character(self._fp, '\t') + + def __getitem__(self, index): + return self.seek(index) + + def __len__(self): + return self.num_rows() + + def _ensure_lineidx_loaded(self): + if self._lineidx is None: + logging.info('loading lineidx: {}'.format(self.lineidx)) + with open(self.lineidx, 'r') as fp: + self._lineidx = [int(i.strip()) for i in fp.readlines()] + + def _ensure_tsv_opened(self): + if self._fp is None: + self._fp = open(self.tsv_file, 'r') + self.pid = os.getpid() + + if self.pid != os.getpid(): + logging.info('re-open {} because the process id changed'.format(self.tsv_file)) + self._fp = open(self.tsv_file, 'r') + self.pid = os.getpid() diff --git a/oscar/utils/tsv_file_ops.py b/oscar/utils/tsv_file_ops.py new file mode 100644 index 0000000..f520aef --- /dev/null +++ b/oscar/utils/tsv_file_ops.py @@ -0,0 +1,18 @@ +# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license. + +import os +from .misc import mkdir + + +def tsv_writer(values, tsv_file_name, sep='\t'): + mkdir(os.path.dirname(tsv_file_name)) + tsv_file_name_tmp = tsv_file_name + '.tmp' + with open(tsv_file_name_tmp, 'wb') as fp: + assert values is not None + for value in values: + assert value is not None + v = sep.join(map(lambda v: v.decode() if type(v) == bytes else str(v), value)) + '\n' + v = v.encode() + fp.write(v) + os.rename(tsv_file_name_tmp, tsv_file_name) + diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..7bd1d73 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,8 @@ +tqdm +pyyaml +matplotlib +requests +scikit-image +anytree +regex +boto3 diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..b9105f5 --- /dev/null +++ b/setup.py @@ -0,0 +1,47 @@ +#!/usr/bin/python + +from __future__ import print_function +import os +import sys +import re +import os.path as op +from setuptools import find_packages, setup + +# change directory to this module path +try: + this_file = __file__ +except NameError: + this_file = sys.argv[0] +this_file = os.path.abspath(this_file) +if op.dirname(this_file): + os.chdir(op.dirname(this_file)) +script_dir = os.getcwd() + +def readme(fname): + """Read text out of a file in the same directory as setup.py. + """ + return open(op.join(script_dir, fname)).read() + + +def find_version(fname): + version_file = readme(fname) + version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", + version_file, re.M) + if version_match: + return version_match.group(1) + raise RuntimeError("Unable to find version string.") + + +setup( + name="oscar", + version=find_version("oscar/__init__.py"), + url='https://github.com/xjli/Oscar', + description="Oscar for vision and language tasks", + long_description=readme('README.md'), + packages=find_packages(), + classifiers=[ + 'Intended Audience :: Developers', + "Programming Language :: Python", + 'Topic :: Software Development', + ] +) diff --git a/transformers b/transformers new file mode 160000 index 0000000..067923d --- /dev/null +++ b/transformers @@ -0,0 +1 @@ +Subproject commit 067923d3267325f525f4e46f357360c191ba562e