From 3cc8aef589087156f5c25af71bbbde2c35c38086 Mon Sep 17 00:00:00 2001 From: sw-song Date: Sun, 27 Jun 2021 15:12:46 +0900 Subject: [PATCH 01/11] add custom python package --- README.md | 13 ++++ custom.py | 192 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 205 insertions(+) create mode 100644 custom.py diff --git a/README.md b/README.md index 092b52bb4..f2108e686 100755 --- a/README.md +++ b/README.md @@ -1,3 +1,16 @@ +## StyleGAN2-ADA Custom implementation + +- **INPUT Image : sample B(Before), sample A(After), target B(Before)** +- **OUTPUT W : 'get_w.pt' (extracted Style)** +- **OUTPUT Image : target A(After)** + +```.bash +python custom.py(--sample_before 's_b.png' --sample_after 's_a.png' --target_before 't_b.png' --target_after 't_a.png') +``` + + +--- + ## StyleGAN2-ADA — Official PyTorch implementation ![Teaser image](./docs/stylegan2-ada-teaser-1024x252.png) diff --git a/custom.py b/custom.py new file mode 100644 index 000000000..5307ad73f --- /dev/null +++ b/custom.py @@ -0,0 +1,192 @@ +import torch +import torch.nn.functional as F +import torchvision +import torchvision.transforms as transforms +import dnnlib +import legacy + +import PIL +from PIL import Image + +import numpy as np + +import argparse +import copy +import pickle +import matplotlib.pyplot as plt + + +def parse_command_line_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--sample_before', required=True, help='image that already has the W') + parser.add_argument('--sample_after', required=True, help='image that does not inclue the W') + parser.add_argument('--target_before', required=True, help='image that you want to apply W') + parser.add_argument('--target_after', required=True, help='path of saving result') + + return vars(parser.parse_args()) + +def run(**kwargs): + sample_before = kwargs.sample_before + sample_after = kwargs.sample_after + target_before = kwargs.target_before + target_after = kwargs.target_after + + + with dnnlib.util.open_url(network_pkl) as f: + G = legacy.load_network_pkl(f)['G_ema'].to(device) + + # install plug-in + z = torch.randn([1, G.z_dim]).cuda() + c = None + img = G(z,c) + + + before = sample_before + after = sample_after + + w_b = projection(before) + w_a = projection(after) + + w = w_b - w_a # age vector + torch.save(w, 'get_w.pt') + + target = target_before + + w_t_b = projection(target) + w_t_a = w_t_b - w # minus-age + gen_target_after = generation(w_t_a,G) + img = Image.fromarray(gen_target_after) + img.save(target_after) + + +def projection(img_path): + img = Image.open(img_path).convert('RGB') + w, h = img.size + s = min(w,h) + + #------------------------------# + with dnnlib.util.open_url(network_pkl) as f: + G = legacy.load_network_pkl(f)['G_ema'].to(device) + #------------------------------# + img = img.crop(((w - s) // 2, (h - s) // 2, (w + s) // 2, (h + s) // 2)) + img = img.resize((G.img_resolution, G.img_resolution), PIL.Image.LANCZOS) + img_uint8 = np.array(img, dtype=np.uint8) + + G_eval = copy.deepcopy(G).eval().requires_grad_(False).to(device) + + # Compute w stats + z_samples = np.random.randn(10000, G_eval.z_dim) # G_eval.z_dim == 512, (10000,512) + w_samples = G_eval.mapping(torch.from_numpy(z_samples).to(device), None) + w_samples = w_samples[:,:1,:].cpu().numpy().astype(np.float32) + w_avg = np.mean(w_samples, axis=0, keepdims=True) + w_std = (np.sum((w_samples - w_avg)**2)/10000)**0.5 + + # Setup noise inputs + noise_bufs = { name: buf for (name, buf) in G.synthesis.named_buffers() if 'noise_const' in name } + + # Load VGG16 feature detector + url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt' + with dnnlib.util.open_url(url) as f: + vgg16 = torch.jit.load(f).eval().to(device) + + # Extract features for target image + + img_tensor = torch.tensor(img_uint8.transpose([2,0,1]), device=device) + img_tensor = img_tensor.unsqueeze(0).to(device).to(torch.float32) + if img_tensor.shape[2] > 256: + img_tensor = F.interpolate(img_tensor, size=(256,256), mode='area') # Resize to pass through the vgg16 network. + img_features = vgg16(img_tensor, resize_images=False, return_lpips=True) + # Set optimizer and Initiate noise + num_steps = 1000 + initial_learning_rate = 0.1 + # ========================================= # + + w_opt = torch.tensor(w_avg, dtype=torch.float32, device=device, requires_grad=True) # pylint: disable=not-callable + w_out = torch.zeros([num_steps] + list(w_opt.shape[1:]), dtype=torch.float32, device=device) + optimizer = torch.optim.Adam([w_opt] + list(noise_bufs.values()), betas=(0.9, 0.999), lr=initial_learning_rate) + # Init noise. + for buf in noise_bufs.values(): + buf[:] = torch.randn_like(buf) + buf.requires_grad = True + + # projection + num_steps = 1000 + lr_rampdown_length = 0.25 + lr_rampup_length = 0.05 + initial_noise_factor = 0.05 + noise_ramp_length = 0.75 + regularize_noise_weight = 1e5 + # ========================================= # + + for step in range(num_steps): + # Learning rate schedule. + t = step / num_steps + w_noise_scale = w_std * initial_noise_factor * max(0.0, 1.0 - t / noise_ramp_length) ** 2 + lr_ramp = min(1.0, (1.0 - t) / lr_rampdown_length) + lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi) + lr_ramp = lr_ramp * min(1.0, t / lr_rampup_length) + lr = initial_learning_rate * lr_ramp + for param_group in optimizer.param_groups: + param_group['lr'] = lr + + # Synthesize image from opt_w + w_noise = torch.randn_like(w_opt) * w_noise_scale + ws = (w_opt + w_noise).repeat([1, G.mapping.num_ws, 1]) + synth_images = G.synthesis(ws, noise_mode='const') + + # Downsample image to 256x256 if it's larger than that. VGG was built for 224x224 images. + synth_images = (synth_images + 1) * (255/2) + if synth_images.shape[2] > 256: + synth_images = F.interpolate(synth_images, size=(256, 256), mode='area') + + # Features for synth images. + synth_features = vgg16(synth_images, resize_images=False, return_lpips=True) + dist = (img_features - synth_features).square().sum() # Calculate the difference between two feature maps (target vs synth) generated through vgg. + # This is the point of projection. + # Noise regularization. + reg_loss = 0.0 + for v in noise_bufs.values(): + noise = v[None,None,:,:] # must be [1,1,H,W] for F.avg_pool2d() + while True: + reg_loss += (noise*torch.roll(noise, shifts=1, dims=3)).mean()**2 + reg_loss += (noise*torch.roll(noise, shifts=1, dims=2)).mean()**2 + if noise.shape[2] <= 8: + break + noise = F.avg_pool2d(noise, kernel_size=2) + loss = dist + reg_loss * regularize_noise_weight + + # Step + optimizer.zero_grad(set_to_none=True) + loss.backward() + optimizer.step() + # if (step+1)%100 == 0: + # print(f'step {step+1:>4d}/{num_steps}: dist {dist:<4.2f} loss {float(loss):<5.2f}') + + # Save projected W for each optimization step. + w_out[step] = w_opt.detach()[0] + + # Normalize noise. + with torch.no_grad(): + for buf in noise_bufs.values(): + buf -= buf.mean() + buf *= buf.square().mean().rsqrt() + + projected_w_steps = w_out.repeat([1, G.mapping.num_ws, 1]) + projected_w = projected_w_steps[-1] + return projected_w + +def generation(w,G): + synth_image = G.synthesis(w.unsqueeze(0), noise_mode='const') + synth_image = (synth_image + 1) * (255/2) + synth_image = synth_image.permute(0, 2, 3, 1).clamp(0, 255).to(torch.uint8)[0].cpu().numpy() + return synth_image + + + +if __name__ == '__main__': + args = parse_command_line_args() + + network_pkl = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/afhqdog.pkl' + device = torch.device('cuda') + + run(**args) \ No newline at end of file From 92b50ed95d6ae5faefbf96f9735df207af5806a0 Mon Sep 17 00:00:00 2001 From: sw-song Date: Sun, 27 Jun 2021 15:21:07 +0900 Subject: [PATCH 02/11] argparse error - --- custom.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/custom.py b/custom.py index 5307ad73f..750e88384 100644 --- a/custom.py +++ b/custom.py @@ -26,10 +26,10 @@ def parse_command_line_args(): return vars(parser.parse_args()) def run(**kwargs): - sample_before = kwargs.sample_before - sample_after = kwargs.sample_after - target_before = kwargs.target_before - target_after = kwargs.target_after + sample_before = kwargs['sample_before'] + sample_after = kwargs['sample_after'] + target_before = kwargs['target_before'] + target_after = kwargs['target_after'] with dnnlib.util.open_url(network_pkl) as f: @@ -188,5 +188,4 @@ def generation(w,G): network_pkl = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/afhqdog.pkl' device = torch.device('cuda') - run(**args) \ No newline at end of file From 56f7496d815c8921935341df094a6b8bc5a5eac7 Mon Sep 17 00:00:00 2001 From: sw-song Date: Sun, 27 Jun 2021 15:52:22 +0900 Subject: [PATCH 03/11] custom file edit (can show progress) --- custom.py | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/custom.py b/custom.py index 750e88384..f685df8ce 100644 --- a/custom.py +++ b/custom.py @@ -14,7 +14,7 @@ import copy import pickle import matplotlib.pyplot as plt - +from tqdm import tqdm def parse_command_line_args(): parser = argparse.ArgumentParser() @@ -44,22 +44,23 @@ def run(**kwargs): before = sample_before after = sample_after - w_b = projection(before) - w_a = projection(after) + w_b = projection(before, 'Type-SB(Sample|Before)') + w_a = projection(after, 'Type-SA(Sample|After)') w = w_b - w_a # age vector torch.save(w, 'get_w.pt') target = target_before - w_t_b = projection(target) + w_t_b = projection(target, 'Type-TB(Target|Before)') w_t_a = w_t_b - w # minus-age gen_target_after = generation(w_t_a,G) img = Image.fromarray(gen_target_after) img.save(target_after) -def projection(img_path): +def projection(img_path, id): + id = id img = Image.open(img_path).convert('RGB') w, h = img.size s = min(w,h) @@ -118,7 +119,7 @@ def projection(img_path): regularize_noise_weight = 1e5 # ========================================= # - for step in range(num_steps): + for step in tqdm(range(num_steps)): # Learning rate schedule. t = step / num_steps w_noise_scale = w_std * initial_noise_factor * max(0.0, 1.0 - t / noise_ramp_length) ** 2 @@ -159,8 +160,13 @@ def projection(img_path): optimizer.zero_grad(set_to_none=True) loss.backward() optimizer.step() - # if (step+1)%100 == 0: - # print(f'step {step+1:>4d}/{num_steps}: dist {dist:<4.2f} loss {float(loss):<5.2f}') + + if step==0: + print('[{}] projection start - Reproducing the image.. '.format(id)) + elif (step+1)%100 == 0 and (step+1) != num_steps: + print(f'step {step+1:>4d}/{num_steps}: dist {dist:<4.2f} loss {float(loss):<5.2f}') + elif (step+1) == num_steps: + print('projection clear') # Save projected W for each optimization step. w_out[step] = w_opt.detach()[0] @@ -179,6 +185,7 @@ def generation(w,G): synth_image = G.synthesis(w.unsqueeze(0), noise_mode='const') synth_image = (synth_image + 1) * (255/2) synth_image = synth_image.permute(0, 2, 3, 1).clamp(0, 255).to(torch.uint8)[0].cpu().numpy() + print('Finished creating a new image with the style(W) applied') return synth_image From ef4a41db3974f5968e85bf171181b12a35bc13e1 Mon Sep 17 00:00:00 2001 From: sw-song Date: Sun, 27 Jun 2021 16:08:28 +0900 Subject: [PATCH 04/11] readme update --- README.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/README.md b/README.md index f2108e686..85dc28dee 100755 --- a/README.md +++ b/README.md @@ -8,7 +8,21 @@ python custom.py(--sample_before 's_b.png' --sample_after 's_a.png' --target_before 't_b.png' --target_after 't_a.png') ``` +> If you don't have a personal GPU, you can easily use it in colab +``` +!git clone https://github.com/sw-song/stylegan2-ada-pytorch.git + +# wee need this package in colab +!pip install ninja + +# move to the folder that we cloned +%cd stylegan2-ada-pytorch/ + +# run python command +!python custom.py --sample_before 'sample_before.png' --sample_after 'sample_after.png' --target_before 'target_before.png' --target_after 'target_after.png' +``` +## Below is the readme from [the official repository](https://github.com/NVlabs/stylegan2-ada-pytorch) --- ## StyleGAN2-ADA — Official PyTorch implementation From 2563ed4dc9359db2cd961080bb59b9a49bcfa6f4 Mon Sep 17 00:00:00 2001 From: sw-song Date: Sun, 27 Jun 2021 20:04:13 +0900 Subject: [PATCH 05/11] image conversion+ --- README.md | 45 +++++++++++++++----------------------- custom.py => conversion.py | 5 ++--- 2 files changed, 20 insertions(+), 30 deletions(-) rename custom.py => conversion.py (98%) diff --git a/README.md b/README.md index 85dc28dee..eb68e8160 100755 --- a/README.md +++ b/README.md @@ -1,30 +1,3 @@ -## StyleGAN2-ADA Custom implementation - -- **INPUT Image : sample B(Before), sample A(After), target B(Before)** -- **OUTPUT W : 'get_w.pt' (extracted Style)** -- **OUTPUT Image : target A(After)** - -```.bash -python custom.py(--sample_before 's_b.png' --sample_after 's_a.png' --target_before 't_b.png' --target_after 't_a.png') -``` - -> If you don't have a personal GPU, you can easily use it in colab -``` -!git clone https://github.com/sw-song/stylegan2-ada-pytorch.git - -# wee need this package in colab -!pip install ninja - -# move to the folder that we cloned -%cd stylegan2-ada-pytorch/ - -# run python command -!python custom.py --sample_before 'sample_before.png' --sample_after 'sample_after.png' --target_before 'target_before.png' --target_after 'target_after.png' -``` - -## Below is the readme from [the official repository](https://github.com/NVlabs/stylegan2-ada-pytorch) ---- - ## StyleGAN2-ADA — Official PyTorch implementation ![Teaser image](./docs/stylegan2-ada-teaser-1024x252.png) @@ -155,6 +128,24 @@ python generate.py --outdir=out --projected_w=out/projected_w.npz \ --network=https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/ffhq.pkl ``` +## Image Conversion +To convert image, we need target image that want to convert and `W` that contains style information. + +First, We extract `W` from 2 sample images. One is an image expressing a specific style(ex. smile, skin, age etc.), +The other image doesn't have that style (the more completely identical other features here, the better). + +- input image : sample B(Before), sample A(After), target B(Before) +- output `W` : 'get_w.pt' (extracted Style) +- output image : target A(After) + +```.bash +python custom.py(--sample_before 's_b.png' --sample_after 's_a.png' --target_before 't_b.png' --target_after 't_a.png' --network 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/afhqdog.pkl') +``` + + +## Below is the readme from [the official repository](https://github.com/NVlabs/stylegan2-ada-pytorch) +--- + ## Using networks from Python You can use pre-trained networks in your own Python code as follows: diff --git a/custom.py b/conversion.py similarity index 98% rename from custom.py rename to conversion.py index f685df8ce..4ef96ac98 100644 --- a/custom.py +++ b/conversion.py @@ -22,7 +22,7 @@ def parse_command_line_args(): parser.add_argument('--sample_after', required=True, help='image that does not inclue the W') parser.add_argument('--target_before', required=True, help='image that you want to apply W') parser.add_argument('--target_after', required=True, help='path of saving result') - + parser.add_argument('--network', required=True, help='pkl - url address') return vars(parser.parse_args()) def run(**kwargs): @@ -192,7 +192,6 @@ def generation(w,G): if __name__ == '__main__': args = parse_command_line_args() - - network_pkl = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/afhqdog.pkl' + network_pkl = args.network device = torch.device('cuda') run(**args) \ No newline at end of file From 554f73503e95f3a359d3906a76e078a078330ff1 Mon Sep 17 00:00:00 2001 From: sw-song Date: Sun, 27 Jun 2021 20:18:54 +0900 Subject: [PATCH 06/11] readme update --- README.md | 13 +++++++------ conversion.py | 12 ++++++------ 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index eb68e8160..72e29b891 100755 --- a/README.md +++ b/README.md @@ -131,15 +131,16 @@ python generate.py --outdir=out --projected_w=out/projected_w.npz \ ## Image Conversion To convert image, we need target image that want to convert and `W` that contains style information. -First, We extract `W` from 2 sample images. One is an image expressing a specific style(ex. smile, skin, age etc.), -The other image doesn't have that style (the more completely identical other features here, the better). +First, We extract `W` from 2 sample images. One(`sample_after`) is an image expressing a specific style(ex. smile, skin, age etc.), +The other(`sample_before`) doesn't have that style (the more completely identical other features here, the better). -- input image : sample B(Before), sample A(After), target B(Before) -- output `W` : 'get_w.pt' (extracted Style) -- output image : target A(After) +- input image : `sample before`, `sample after`, `target before` +- output `W` : 'get_w.pt' (extracted Style +by subtracting `sample_before` from `sample_after`) +- output image : `target after` ```.bash -python custom.py(--sample_before 's_b.png' --sample_after 's_a.png' --target_before 't_b.png' --target_after 't_a.png' --network 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/afhqdog.pkl') +python conversion.py(--sample_before 's_b.png' --sample_after 's_a.png' --target_before 't_b.png' --target_after 't_a.png' --network 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/afhqdog.pkl') ``` diff --git a/conversion.py b/conversion.py index 4ef96ac98..64a97c801 100644 --- a/conversion.py +++ b/conversion.py @@ -44,17 +44,17 @@ def run(**kwargs): before = sample_before after = sample_after - w_b = projection(before, 'Type-SB(Sample|Before)') - w_a = projection(after, 'Type-SA(Sample|After)') + w_before = projection(before, 'Type-SB(Sample|Before)') + w_after = projection(after, 'Type-SA(Sample|After)') - w = w_b - w_a # age vector + w = w_after - w_before # age vector torch.save(w, 'get_w.pt') target = target_before - w_t_b = projection(target, 'Type-TB(Target|Before)') - w_t_a = w_t_b - w # minus-age - gen_target_after = generation(w_t_a,G) + w_target_before = projection(target, 'Type-TB(Target|Before)') + w_target_after = w_target_before + w + gen_target_after = generation(w_target_after,G) img = Image.fromarray(gen_target_after) img.save(target_after) From d81d5156fc66d3abc6d93614c5424bc29b69c862 Mon Sep 17 00:00:00 2001 From: sw-song Date: Sun, 27 Jun 2021 20:22:14 +0900 Subject: [PATCH 07/11] argparse error catch --- conversion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conversion.py b/conversion.py index 64a97c801..ec766d4e7 100644 --- a/conversion.py +++ b/conversion.py @@ -192,6 +192,6 @@ def generation(w,G): if __name__ == '__main__': args = parse_command_line_args() - network_pkl = args.network + network_pkl = args['network'] device = torch.device('cuda') run(**args) \ No newline at end of file From 72eb0ea1268ed91e1a649e17f0741ae0cab2bafb Mon Sep 17 00:00:00 2001 From: sw-song Date: Sun, 27 Jun 2021 20:32:34 +0900 Subject: [PATCH 08/11] test-progress --- conversion.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/conversion.py b/conversion.py index ec766d4e7..f314f0436 100644 --- a/conversion.py +++ b/conversion.py @@ -163,10 +163,10 @@ def projection(img_path, id): if step==0: print('[{}] projection start - Reproducing the image.. '.format(id)) - elif (step+1)%100 == 0 and (step+1) != num_steps: - print(f'step {step+1:>4d}/{num_steps}: dist {dist:<4.2f} loss {float(loss):<5.2f}') - elif (step+1) == num_steps: - print('projection clear') + # elif (step+1)%100 == 0 and (step+1) != num_steps: + # print(f'step {step+1:>4d}/{num_steps}: dist {dist:<4.2f} loss {float(loss):<5.2f}') + # elif (step+1) == num_steps: + # print('projection clear') # Save projected W for each optimization step. w_out[step] = w_opt.detach()[0] From c5ce4514430ceb2f747ae54b388b0e2ca776d367 Mon Sep 17 00:00:00 2001 From: sw-song Date: Sun, 27 Jun 2021 20:40:26 +0900 Subject: [PATCH 09/11] test-progress - no tqdm --- conversion.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/conversion.py b/conversion.py index f314f0436..a0ce9cd68 100644 --- a/conversion.py +++ b/conversion.py @@ -14,7 +14,7 @@ import copy import pickle import matplotlib.pyplot as plt -from tqdm import tqdm +# from tqdm import tqdm def parse_command_line_args(): parser = argparse.ArgumentParser() @@ -119,7 +119,7 @@ def projection(img_path, id): regularize_noise_weight = 1e5 # ========================================= # - for step in tqdm(range(num_steps)): + for step in range(num_steps): # Learning rate schedule. t = step / num_steps w_noise_scale = w_std * initial_noise_factor * max(0.0, 1.0 - t / noise_ramp_length) ** 2 @@ -163,10 +163,10 @@ def projection(img_path, id): if step==0: print('[{}] projection start - Reproducing the image.. '.format(id)) - # elif (step+1)%100 == 0 and (step+1) != num_steps: - # print(f'step {step+1:>4d}/{num_steps}: dist {dist:<4.2f} loss {float(loss):<5.2f}') - # elif (step+1) == num_steps: - # print('projection clear') + elif (step+1)%100 == 0 and (step+1) != num_steps: + print(f'step {step+1:>4d}/{num_steps}: dist {dist:<4.2f} loss {float(loss):<5.2f}') + elif (step+1) == num_steps: + print('projection clear') # Save projected W for each optimization step. w_out[step] = w_opt.detach()[0] From ed3fea22d895fb58698fc2980e5a37d5c7437cdd Mon Sep 17 00:00:00 2001 From: sw-song Date: Sun, 27 Jun 2021 20:53:30 +0900 Subject: [PATCH 10/11] readme edit --- README.md | 4 ---- 1 file changed, 4 deletions(-) diff --git a/README.md b/README.md index 72e29b891..bac728534 100755 --- a/README.md +++ b/README.md @@ -143,10 +143,6 @@ by subtracting `sample_before` from `sample_after`) python conversion.py(--sample_before 's_b.png' --sample_after 's_a.png' --target_before 't_b.png' --target_after 't_a.png' --network 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/afhqdog.pkl') ``` - -## Below is the readme from [the official repository](https://github.com/NVlabs/stylegan2-ada-pytorch) ---- - ## Using networks from Python You can use pre-trained networks in your own Python code as follows: From ac799c7ca39248ec37c82b6044d6d5d0a2bc3c40 Mon Sep 17 00:00:00 2001 From: sw-song Date: Sun, 27 Jun 2021 20:57:08 +0900 Subject: [PATCH 11/11] readme - conversion guide edit --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index bac728534..6accd1539 100755 --- a/README.md +++ b/README.md @@ -140,7 +140,9 @@ by subtracting `sample_before` from `sample_after`) - output image : `target after` ```.bash -python conversion.py(--sample_before 's_b.png' --sample_after 's_a.png' --target_before 't_b.png' --target_after 't_a.png' --network 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/afhqdog.pkl') +python conversion.py --sample_before s_b.png --sample_after s_a.png \ + --target_before t_b.png --target_after t_a.png \ + --network https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/afhqdog.pkl ``` ## Using networks from Python