|
| 1 | +# general settings |
| 2 | +name: DefocusDeblur_DualPixel_16bit_Restormer |
| 3 | +model_type: ImageCleanModel |
| 4 | +scale: 1 |
| 5 | +num_gpu: 8 # set num_gpu: 0 for cpu mode |
| 6 | +manual_seed: 100 |
| 7 | + |
| 8 | +# dataset and data loader settings |
| 9 | +datasets: |
| 10 | + train: |
| 11 | + name: TrainSet |
| 12 | + type: Dataset_DefocusDeblur_DualPixel_16bit |
| 13 | + dataroot_gt: ./Defocus_Deblurring/Datasets/train/DPDD/target_crops |
| 14 | + dataroot_lqL: ./Defocus_Deblurring/Datasets/train/DPDD/inputL_crops |
| 15 | + dataroot_lqR: ./Defocus_Deblurring/Datasets/train/DPDD/inputR_crops |
| 16 | + geometric_augs: true |
| 17 | + |
| 18 | + filename_tmpl: '{}' |
| 19 | + io_backend: |
| 20 | + type: disk |
| 21 | + |
| 22 | + # data loader |
| 23 | + use_shuffle: true |
| 24 | + num_worker_per_gpu: 8 |
| 25 | + batch_size_per_gpu: 8 |
| 26 | + |
| 27 | + ### -------------Progressive training-------------------------- |
| 28 | + mini_batch_sizes: [8,5,4,2,1,1] # Batch size per gpu |
| 29 | + iters: [92000,64000,48000,36000,36000,24000] |
| 30 | + gt_size: 384 # Max patch size for progressive training |
| 31 | + gt_sizes: [128,160,192,256,320,384] # Patch sizes for progressive training. |
| 32 | + ### ------------------------------------------------------------ |
| 33 | + |
| 34 | + ### ------- Training on single fixed-patch size 128x128--------- |
| 35 | + # mini_batch_sizes: [8] |
| 36 | + # iters: [300000] |
| 37 | + # gt_size: 128 |
| 38 | + # gt_sizes: [128] |
| 39 | + ### ------------------------------------------------------------ |
| 40 | + |
| 41 | + dataset_enlarge_ratio: 1 |
| 42 | + prefetch_mode: ~ |
| 43 | + |
| 44 | + val: |
| 45 | + name: ValSet |
| 46 | + type: Dataset_DefocusDeblur_DualPixel_16bit |
| 47 | + dataroot_gt: ./Defocus_Deblurring/Datasets/val/DPDD/target_crops |
| 48 | + dataroot_lqL: ./Defocus_Deblurring/Datasets/val/DPDD/inputL_crops |
| 49 | + dataroot_lqR: ./Defocus_Deblurring/Datasets/val/DPDD/inputR_crops |
| 50 | + io_backend: |
| 51 | + type: disk |
| 52 | + |
| 53 | +# network structures |
| 54 | +network_g: |
| 55 | + type: Restormer |
| 56 | + inp_channels: 6 |
| 57 | + out_channels: 3 |
| 58 | + dim: 48 |
| 59 | + num_blocks: [4,6,6,8] |
| 60 | + num_refinement_blocks: 4 |
| 61 | + heads: [1,2,4,8] |
| 62 | + ffn_expansion_factor: 2.66 |
| 63 | + bias: False |
| 64 | + LayerNorm_type: WithBias |
| 65 | + dual_pixel_task: True |
| 66 | + |
| 67 | + |
| 68 | +# path |
| 69 | +path: |
| 70 | + pretrain_network_g: ~ |
| 71 | + strict_load_g: true |
| 72 | + resume_state: ~ |
| 73 | + |
| 74 | +# training settings |
| 75 | +train: |
| 76 | + total_iter: 300000 |
| 77 | + warmup_iter: -1 # no warm up |
| 78 | + use_grad_clip: true |
| 79 | + |
| 80 | + # Split 300k iterations into two cycles. |
| 81 | + # 1st cycle: fixed 3e-4 LR for 92k iters. |
| 82 | + # 2nd cycle: cosine annealing (3e-4 to 1e-6) for 208k iters. |
| 83 | + scheduler: |
| 84 | + type: CosineAnnealingRestartCyclicLR |
| 85 | + periods: [92000, 208000] |
| 86 | + restart_weights: [1,1] |
| 87 | + eta_mins: [0.0003,0.000001] |
| 88 | + |
| 89 | + mixing_augs: |
| 90 | + mixup: false |
| 91 | + mixup_beta: 1.2 |
| 92 | + use_identity: true |
| 93 | + |
| 94 | + optim_g: |
| 95 | + type: AdamW |
| 96 | + lr: !!float 3e-4 |
| 97 | + weight_decay: !!float 1e-4 |
| 98 | + betas: [0.9, 0.999] |
| 99 | + |
| 100 | + # losses |
| 101 | + pixel_opt: |
| 102 | + type: L1Loss |
| 103 | + loss_weight: 1 |
| 104 | + reduction: mean |
| 105 | + |
| 106 | +# validation settings |
| 107 | +val: |
| 108 | + window_size: 8 |
| 109 | + val_freq: !!float 4e3 |
| 110 | + save_img: false |
| 111 | + rgb2bgr: true |
| 112 | + use_image: false |
| 113 | + max_minibatch: 8 |
| 114 | + |
| 115 | + metrics: |
| 116 | + psnr: # metric name, can be arbitrary |
| 117 | + type: calculate_psnr |
| 118 | + crop_border: 0 |
| 119 | + test_y_channel: false |
| 120 | + |
| 121 | +# logging settings |
| 122 | +logger: |
| 123 | + print_freq: 1000 |
| 124 | + save_checkpoint_freq: !!float 4e3 |
| 125 | + use_tb_logger: true |
| 126 | + wandb: |
| 127 | + project: ~ |
| 128 | + resume_id: ~ |
| 129 | + |
| 130 | +# dist training settings |
| 131 | +dist_params: |
| 132 | + backend: nccl |
| 133 | + port: 29500 |
0 commit comments