From 885f05d7c687f53f5689b50599408723f731d0c7 Mon Sep 17 00:00:00 2001 From: YeonwooSung Date: Mon, 18 Jul 2022 23:25:39 +0900 Subject: [PATCH] Add Deepfake --- GenerativeModels/Deepfake/README.md | 7 + .../Deepfake/src/DeepFake_example.ipynb | 222 ++++++++++++++++++ GenerativeModels/README.md | 3 + 3 files changed, 232 insertions(+) create mode 100644 GenerativeModels/Deepfake/README.md create mode 100644 GenerativeModels/Deepfake/src/DeepFake_example.ipynb diff --git a/GenerativeModels/Deepfake/README.md b/GenerativeModels/Deepfake/README.md new file mode 100644 index 0000000..f58354e --- /dev/null +++ b/GenerativeModels/Deepfake/README.md @@ -0,0 +1,7 @@ +# DeepFake + +Deepfakes (a portmanteau of "deep learning" and "fake") are synthetic media in which a person in an existing image or video is replaced with someone else's likeness. While the act of creating fake content is not new, deepfakes leverage powerful techniques from machine learning and artificial intelligence to manipulate or generate visual and audio content that can more easily deceive. The main machine learning methods used to create deepfakes are based on deep learning and involve training generative neural network architectures, such as autoencoders, or generative adversarial networks (GANs). + +Deepfakes have garnered widespread attention for their uses in creating child sexual abuse material, celebrity pornographic videos, revenge porn, fake news, hoaxes, bullying, and financial fraud. This has elicited responses from both industry and government to detect and limit their use. + +[simple example notebook for deepfake](./src/DeepFake_example.ipynb) diff --git a/GenerativeModels/Deepfake/src/DeepFake_example.ipynb b/GenerativeModels/Deepfake/src/DeepFake_example.ipynb new file mode 100644 index 0000000..ee8c567 --- /dev/null +++ b/GenerativeModels/Deepfake/src/DeepFake_example.ipynb @@ -0,0 +1,222 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "DeepFake.ipynb", + "provenance": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "accelerator": "GPU" + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "3AV51kYB_ZCO" + }, + "source": [ + "# Deepfake Google Colab\n", + "Note:\n", + " \n", + "Run each code cell one at a time and in sequential order. \n", + " \n", + "Before running the code cells, go to runtime > change runtime type > and select GPU.\n", + " \n", + "If you want to do this again just factory reset runtime from the runtime menu." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2kxTqI2V_omw" + }, + "source": [ + "**Download dependencies and clone deepfake github**" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "6J2Lcdby8xTK" + }, + "source": [ + "!pip install PyYAML==5.3.1\n", + "!git clone https://github.com/AliaksandrSiarohin/first-order-model" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "uYX02-oKDcXc" + }, + "source": [ + "**Change working directory to where the github repo was cloned**" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "fkD87_rvCEMZ" + }, + "source": [ + "cd first-order-model" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "gh707Dj7_unR" + }, + "source": [ + "**Mount Google Drive**\n", + " \n", + "You'll get a link in the following output window. Click that choose which accounts drive you want to mount. It'll show a authentication code that you can copy and paste into the output window to mount your Google drive. \n", + "\n", + "Notes: \n", + "1. make a folder in your google drive named \"first-order-motion-model\"\n", + " \n", + "2. upload the following files to that folder:\n", + " https://drive.google.com/file/d/1xRD9rVjTizmqBnr6F4nOId5M08sCMKS9/view?usp=sharing\n", + " \n", + " https://drive.google.com/file/d/1RdFq0hdT-Lnr8k6iw7e4JMRzo4I5f5YN/view?usp=sharing\n", + " \n", + "3. Make sure the image you want to deepfake is named \"02\" and is in png format. The filename will be \"02.png\"\n", + " \n", + "4. The deepfake template should be named \"04\" and should be in mp4 format. The filename should be \"04.mp4\"\n", + " \n", + "5. The deepfake template for dame de/ baka mitai: https://drive.google.com/file/d/1-_K5D6gr7-tte0c_9VRXETAt6MQ9iSVd/view?usp=drivesdk" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "fWN2PGgAAsHd" + }, + "source": [ + "from google.colab import drive\n", + "drive.mount('/content/gdrive')" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7bIhTjstA7pW" + }, + "source": [ + "**Load the driving video and the source image into the program**" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "gPt6ckGxBDIF" + }, + "source": [ + "import imageio\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "import matplotlib.animation as animation\n", + "from skimage.transform import resize\n", + "from IPython.display import HTML\n", + "import warnings\n", + "warnings.filterwarnings(\"ignore\")\n", + " \n", + "source_image = imageio.imread('/content/gdrive/My Drive/first-order-motion-model/02.png')\n", + "driving_video = imageio.mimread('/content/gdrive/My Drive/first-order-motion-model/04.mp4')\n", + " \n", + " \n", + "#Resize image and video to 256x256\n", + " \n", + "source_image = resize(source_image, (256, 256))[..., :3]\n", + "driving_video = [resize(frame, (256, 256))[..., :3] for frame in driving_video]\n", + " \n", + "def display(source, driving, generated=None):\n", + " fig = plt.figure(figsize=(8 + 4 * (generated is not None), 6))\n", + " \n", + " ims = []\n", + " for i in range(len(driving)):\n", + " cols = [source]\n", + " cols.append(driving[i])\n", + " if generated is not None:\n", + " cols.append(generated[i])\n", + " im = plt.imshow(np.concatenate(cols, axis=1), animated=True)\n", + " plt.axis('off')\n", + " ims.append([im])\n", + " \n", + " ani = animation.ArtistAnimation(fig, ims, interval=50, repeat_delay=1000)\n", + " plt.close()\n", + " return ani\n", + " \n", + " \n", + "HTML(display(source_image, driving_video).to_html5_video())" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "cBSTTKwIBKWs" + }, + "source": [ + "**Create a model and load encodings and perform image animation**" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "rmeZtrGPBVEw" + }, + "source": [ + "from demo import load_checkpoints\n", + "generator, kp_detector = load_checkpoints(config_path='config/vox-256.yaml', \n", + " checkpoint_path='/content/gdrive/My Drive/first-order-motion-model/vox-cpk.pth.tar')\n", + " \n", + "print('Proceed to next cell')" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "2Y_BRB-zELm4" + }, + "source": [ + "from demo import make_animation\n", + "from skimage import img_as_ubyte\n", + "\n", + "predictions = make_animation(source_image, driving_video, generator, kp_detector, relative=True)\n", + "\n", + "#save resulting video\n", + "imageio.mimsave('../generated.mp4', [img_as_ubyte(frame) for frame in predictions])\n", + "#video can be downloaded from /content folder\n", + "\n", + "HTML(display(source_image, driving_video, predictions).to_html5_video())" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "F5xc1luVGXzG" + }, + "source": [ + "# Steps for editing generated video\n", + "\n", + "You'll need to speed up the generated video 3x so that it'll sync up with whatever music. You can use kapwing or another editor for this. If you're doing this in kapwing, you'll first need to speed it up 2x and speed up the resulting video again 1.5x. This is because kapwing doesn't have a 3x speed feature. You might have to do this" + ] + } + ] +} \ No newline at end of file diff --git a/GenerativeModels/README.md b/GenerativeModels/README.md index c222c8c..e5f3f9c 100644 --- a/GenerativeModels/README.md +++ b/GenerativeModels/README.md @@ -1 +1,4 @@ # Generative Models + +- [Deepfake](./Deepfake/) +- [GAN](./GAN/)