Index
+ + +D
+
|
+
M
+
|
+
diff --git a/.buildinfo b/.buildinfo new file mode 100644 index 0000000..1a5a1ee --- /dev/null +++ b/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: b87c29c3f7e59f3356fa0ff6e39e6a41 +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/_modules/deepdespeckling/denoiser.html b/_modules/deepdespeckling/denoiser.html new file mode 100644 index 0000000..b22fcda --- /dev/null +++ b/_modules/deepdespeckling/denoiser.html @@ -0,0 +1,188 @@ + + + +
+ + +
+import logging
+import torch
+import numpy as np
+
+
+
+[docs]
+class Denoiser:
+ """Class to share parameters beyond denoising functions
+ """
+
+ def __init__(self):
+ self.device = self.get_device()
+
+
+[docs]
+ def get_device(self) -> str:
+ """Get torch device to use depending on gpu's availability
+
+ Returns:
+ device (str): device to be used by torch
+ """
+ if torch.backends.mps.is_available() and torch.backends.mps.is_built():
+ device = "mps"
+ elif torch.cuda.is_available():
+ device = "cuda:0"
+ else:
+ device = "cpu"
+ logging.info(f"{device} device is used by torch")
+
+ return device
+
+
+
+[docs]
+ def initialize_axis_range(self, image_axis_dim: int, patch_size: int, stride_size: int) -> list:
+ """Initialize the convolution range for x or y axis
+
+ Args:
+ image_axis_dim (int): axis size
+ patch_size (int): patch size
+ stride_size (int): stride size
+
+ Returns:
+ axis_range (list) : pixel borders of each convolution
+ """
+ if image_axis_dim == patch_size:
+ axis_range = list(np.array([0]))
+ else:
+ axis_range = list(
+ range(0, image_axis_dim - patch_size, stride_size))
+ if (axis_range[-1] + patch_size) < image_axis_dim:
+ axis_range.extend(
+ range(image_axis_dim - patch_size, image_axis_dim - patch_size + 1))
+
+ return axis_range
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+import logging
+import os
+from glob import glob
+from deepdespeckling.denoiser import Denoiser
+
+from deepdespeckling.merlin.merlin_denoiser import MerlinDenoiser
+from deepdespeckling.sar2sar.sar2sar_denoiser import Sar2SarDenoiser
+from deepdespeckling.utils.constants import PATCH_SIZE, STRIDE_SIZE
+from deepdespeckling.utils.utils import (crop_image, get_cropping_coordinates, load_sar_image, preprocess_and_store_sar_images_from_coordinates,
+ create_empty_folder_in_directory, preprocess_and_store_sar_images)
+
+
+logging.basicConfig(level=logging.INFO)
+
+
+
+[docs]
+def get_denoiser(model_name: str, symetrise: bool = True) -> Denoiser:
+ """Get the right denoiser object from the model name
+
+ Args:
+ model_name (str): model name to be use for despeckling
+ symetrise (bool) : if using spotlight or stripmap model, if True, will symetrise the real and
+ imaginary parts of the noisy image. Defaults to True
+
+ Returns:
+ denoiser (Denoiser): the right denoiser, Sar2SarDenoiser or MerlinDenoiser
+ """
+ if model_name in ["spotlight", "stripmap"]:
+ denoiser = MerlinDenoiser(model_name=model_name, symetrise=symetrise)
+ elif model_name == "sar2sar":
+ denoiser = Sar2SarDenoiser()
+ else:
+ raise ValueError("The model name doesn't refer to an existing model ")
+
+ return denoiser
+
+
+
+
+[docs]
+def despeckle(sar_images_path: str, destination_directory_path: str, model_name: str = "spotlight",
+ patch_size: int = PATCH_SIZE, stride_size: int = STRIDE_SIZE, symetrise: bool = True):
+ """Despeckle coSAR images using trained MERLIN (spotlight or stripmap weights) or SAR2SAR
+
+ Args:
+ sar_images_path (str): path of sar images
+ destination_directory_path (str): path of folder in which results will be stored
+ model_name (str): model name, either "spotlight" or "stripmap" to select MERLIN model on the
+ right cosar image format or "sar2sar" for SAR2SAR model. Default to "spotlight"
+ patch_size (int): patch size. Defaults to constant PATCH_SIZE.
+ stride_size (int): stride size. Defaults to constant STRIDE_SIZE.
+ symetrise (bool) : if using spotlight or stripmap model, if True, will symetrise the real and
+ imaginary parts of the noisy image. Defaults to True
+ """
+
+ logging.info(
+ f"""Despeckling entire images using {model_name} weights""")
+
+ processed_images_path = create_empty_folder_in_directory(destination_directory_path=destination_directory_path,
+ folder_name="processed_images")
+ preprocess_and_store_sar_images(
+ sar_images_path=sar_images_path, processed_images_path=processed_images_path, model_name=model_name)
+
+ logging.info(
+ f"Starting inference.. Collecting data from {sar_images_path} and storing test results in {destination_directory_path}")
+
+ denoiser = get_denoiser(model_name=model_name, symetrise=symetrise)
+ denoiser.denoise_images(images_to_denoise_path=processed_images_path, save_dir=destination_directory_path,
+ patch_size=patch_size, stride_size=stride_size)
+
+
+
+
+[docs]
+def despeckle_from_coordinates(sar_images_path: str, coordinates_dict: dict, destination_directory_path: str, model_name: str = "spotlight",
+ patch_size: int = PATCH_SIZE, stride_size: int = STRIDE_SIZE, symetrise: bool = True):
+ """Despeckle specified area with coordinates in coSAR images using trained MERLIN (spotlight or stripmap weights)
+
+ Args:
+ sar_images_path (str): path of sar images
+ coordinates_dict (dict): dictionary containing pixel boundaries of the area to despeckle (x_start, x_end, y_start, y_end)
+ destination_directory_path (str): path of folder in which results will be stored
+ model_name (str): model name, either "spotlight" or "stripmap" to select MERLIN model on the
+ right cosar image format or "sar2sar" for SAR2SAR model. Default to "spotlight"
+ patch_size (int): patch size. Defaults to constant PATCH_SIZE.
+ stride_size (int): stride size. Defaults to constant STRIDE_SIZE.
+ symetrise (bool) : if using spotlight or stripmap model, if True, will symetrise the real and
+ imaginary parts of the noisy image. Defaults to True
+ """
+
+ logging.info(
+ f"""Despeckling images from coordinates using {model_name} weights""")
+
+ processed_images_path = create_empty_folder_in_directory(destination_directory_path=destination_directory_path,
+ folder_name="processed_images")
+ preprocess_and_store_sar_images_from_coordinates(sar_images_path=sar_images_path, processed_images_path=processed_images_path,
+ coordinates_dict=coordinates_dict, model_name=model_name)
+
+ logging.info(
+ f"Starting inference.. Collecting data from {sar_images_path} and storing test results in {destination_directory_path}")
+
+ denoiser = get_denoiser(model_name=model_name, symetrise=symetrise)
+ denoiser.denoise_images(images_to_denoise_path=processed_images_path, save_dir=destination_directory_path,
+ patch_size=patch_size, stride_size=stride_size)
+
+
+
+
+[docs]
+def despeckle_from_crop(sar_images_path: str, destination_directory_path: str, model_name: str = "spotlight",
+ patch_size: int = PATCH_SIZE, stride_size: int = STRIDE_SIZE, fixed: bool = True, symetrise: bool = True):
+ """Despeckle specified area with an integrated cropping tool (made with OpenCV) in coSAR images using trained MERLIN (spotlight or stripmap weights)
+
+ Args:
+ sar_images_path (str): path of sar images
+ destination_directory_path (str): path of folder in which results will be stored
+ patch_size (int): patch size. Defaults to constant PATCH_SIZE.
+ stride_size (int): stride size. Defaults to constant STRIDE_SIZE.
+ model_name (str): model name, either "spotlight" or "stripmap" to select MERLIN model on the
+ right cosar image format or "sar2sar" for SAR2SAR model. Default to "spotlight"
+ fixed (bool) : If True, crop size is limited to 256*256. Defaults to True
+ symetrise (bool) : if using spotlight or stripmap model, if True, will symetrise the real and
+ imaginary parts of the noisy image. Defaults to True
+ """
+
+ logging.info(
+ f"""Cropping and despeckling images using {model_name} weights""")
+
+ processed_images_path = create_empty_folder_in_directory(destination_directory_path=destination_directory_path,
+ folder_name="processed_images")
+
+ ext = "cos" if model_name in ["spotlight", "stripmap"] else "tiff"
+ images_paths = glob(os.path.join(sar_images_path, f"*.{ext}")) + \
+ glob(os.path.join(sar_images_path, "*.npy"))
+
+ for i, image_path in enumerate(images_paths):
+ # Load image for cropping
+ image = load_sar_image(image_path)
+
+ # Get cropping coordinates from the first image of the list of images to crop and despeckle
+ if i == 0:
+ cropping_coordinates = get_cropping_coordinates(
+ image=image, fixed=fixed, destination_directory_path=destination_directory_path, model_name=model_name)
+
+ # Crop image using stored cropping coordinates and store it in processed_images_path
+ crop_image(image, image_path, cropping_coordinates, model_name,
+ processed_images_path)
+
+ logging.info(
+ f"Starting inference.. Collecting data from {sar_images_path} and storing results in {destination_directory_path}")
+
+ denoiser = get_denoiser(model_name=model_name, symetrise=symetrise)
+ denoiser.denoise_images(images_to_denoise_path=processed_images_path, save_dir=destination_directory_path,
+ patch_size=patch_size, stride_size=stride_size)
+
+
+from glob import glob
+import logging
+from pathlib import Path
+import torch
+import os
+import numpy as np
+from tqdm import tqdm
+
+from deepdespeckling.denoiser import Denoiser
+from deepdespeckling.model import Model
+from deepdespeckling.utils.constants import M, m
+from deepdespeckling.utils.utils import (denormalize_sar_image, load_sar_image, save_image_to_npy_and_png,
+ symetrise_real_and_imaginary_parts, create_empty_folder_in_directory)
+
+current_dir = os.path.dirname(__file__)
+
+
+
+[docs]
+class MerlinDenoiser(Denoiser):
+ """Class to share parameters beyond denoising functions
+ """
+
+ def __init__(self, model_name, symetrise, **params):
+ """Initialize MerlinDenoiser class
+
+ Args:
+ model_name (str): name to be used, can be "spotlight" or "stripmap"
+ """
+ super().__init__(**params)
+ self.model_name = model_name
+ self.symetrise = symetrise
+ self.weights_path = self.init_model_weights_path()
+
+
+[docs]
+ def init_model_weights_path(self) -> str:
+ """Get model weights path from model name
+
+ Returns:
+ model_weights_path (str): the path of the weights of the specified model
+ """
+ if self.model_name == "spotlight":
+ model_weights_path = os.path.join(
+ current_dir, "saved_models/spotlight.pth")
+ elif self.model_name == "stripmap":
+ model_weights_path = os.path.join(
+ current_dir, "saved_models/stripmap.pth")
+ else:
+ raise ValueError(
+ "The model name doesn't refer to an existing model ")
+
+ return model_weights_path
+
+
+
+[docs]
+ def load_model(self, patch_size: int) -> Model:
+ """Load model with given weights
+
+ Args:
+ weights_path (str): path to weights
+ patch_size (int): patch size
+
+ Returns:
+ model (Model): model loaded with stored weights
+ """
+ model = Model(torch.device(self.device),
+ height=patch_size, width=patch_size)
+ model.load_state_dict(torch.load(
+ self.weights_path, map_location=torch.device("cpu")))
+
+ return model
+
+
+
+[docs]
+ def save_despeckled_images(self, despeckled_images: dict, image_name: str, save_dir: str):
+ """Save full, real and imaginary part of noisy and denoised image stored in a dictionary in png to a given folder
+
+ Args:
+ despeckled_images (dict): dictionary containing full, real and imaginary parts of noisy and denoised image
+ image_name (str): name of the image
+ save_dir (str): path to the folder where to save the png images
+ """
+ threshold = np.mean(
+ despeckled_images["noisy"]["full"]) + 3 * np.std(despeckled_images["noisy"]["full"])
+ image_name = image_name.split('\\')[-1]
+
+ for key in despeckled_images:
+ create_empty_folder_in_directory(save_dir, key)
+ for key2 in despeckled_images[key]:
+ save_image_to_npy_and_png(
+ despeckled_images[key][key2], save_dir, f"/{key}/{key}_{key2}_", image_name, threshold)
+
+
+
+[docs]
+ def denoise_image_kernel(self, noisy_image: torch.tensor, denoised_image: np.array, x: int, y: int, patch_size: int,
+ model: Model, normalisation_kernel: bool = False) -> np.array:
+ """Denoise a subpart of a given symetrised noisy image delimited by x, y and patch_size using a given model
+
+ Args:
+ noisy_image (torch tensor): symetrised noisy image to denoise
+ denoised_image (numpy array): symetrised partially denoised image
+ x (int): x coordinate of current kernel to denoise
+ y (int): y coordinate of current kernel to denoise
+ patch_size (int): patch size
+ model (Model): trained model with loaded weights
+ normalisation_kernel (bool, optional): Determine if. Defaults to False.
+
+ Returns:
+ denoised_image (numpy array): image denoised in the given coordinates and the ones already iterated
+ """
+ if not normalisation_kernel:
+
+ if self.device != 'cpu':
+ tmp_clean_image = model.forward(
+ noisy_image).cpu().detach().numpy()
+ else:
+ tmp_clean_image = model.forward(
+ noisy_image).detach().numpy()
+
+ tmp_clean_image = np.moveaxis(tmp_clean_image, 1, -1)
+ denoised_image[:, x:x + patch_size, y:y + patch_size, :] = denoised_image[:, x:x + patch_size,
+ y:y + patch_size,
+ :] + tmp_clean_image
+ else:
+ denoised_image[:, x:x + patch_size, y:y + patch_size, :] = denoised_image[:, x:x + patch_size,
+ y:y + patch_size,
+ :] + np.ones((1, patch_size, patch_size, 1))
+ return denoised_image
+
+
+
+[docs]
+ def preprocess_noisy_image(self, noisy_image: np.array) -> tuple[np.array, np.array, np.array]:
+ """preprocess a given noisy image and generates its real and imaginary parts
+
+ Args:
+ noisy_image (numpy array): noisy image
+
+ Returns:
+ noisy_image, noisy_image_real_part, noisy_image_imaginary_part (numpy array, numpy array, numpy array):
+ preprocessed noisy image, real part of noisy image, imaginary part of noisy image
+ """
+ noisy_image_real_part = (noisy_image[:, :, :, 0]).reshape(noisy_image.shape[0], noisy_image.shape[1],
+ noisy_image.shape[2], 1)
+ noisy_image_imaginary_part = (noisy_image[:, :, :, 1]).reshape(noisy_image.shape[0], noisy_image.shape[1],
+ noisy_image.shape[2], 1)
+ noisy_image = np.squeeze(
+ np.sqrt(noisy_image_real_part ** 2 + noisy_image_imaginary_part ** 2))
+
+ return noisy_image, noisy_image_real_part, noisy_image_imaginary_part
+
+
+
+[docs]
+ def preprocess_denoised_image(self, denoised_image_real_part: np.array, denoised_image_imaginary_part: np.array, count_image: np.array) -> tuple[np.array, np.array, np.array]:
+ """Preprocess given denoised real and imaginary parts of an image, and build the full denoised image
+
+ Args:
+ denoised_image_real_part (numpy array): real part of a denoised image
+ denoised_image_imaginary_part (numpy array): imaginary part of a denoised image
+ count_image (numpy array): normalisation image used for denormalisation
+
+ Returns:
+ denoised_image, denoised_image_real_part, denoised_image_imaginary_part (numpy array, numpy array, numpy array):
+ processed denoised full image, processed denoised image real part, processed denoised image imaginary part
+ """
+ denoised_image_real_part = denormalize_sar_image(
+ denoised_image_real_part / count_image)
+ denoised_image_imaginary_part = denormalize_sar_image(
+ denoised_image_imaginary_part / count_image)
+
+ # combine the two estimation
+ output_clean_image = 0.5 * (np.square(
+ denoised_image_real_part) + np.square(denoised_image_imaginary_part))
+
+ denoised_image = np.sqrt(np.squeeze(output_clean_image))
+
+ return denoised_image, denoised_image_real_part, denoised_image_imaginary_part
+
+
+
+[docs]
+ def denoise_image(self, noisy_image: np.array, patch_size: int, stride_size: int) -> dict:
+ """Preprocess and denoise a coSAR image using given model weights
+
+ Args:
+ noisy_image (numpy array): numpy array containing the noisy image to despeckle
+ patch_size (int): size of the patch of the convolution
+ stride_size (int): number of pixels between one convolution to the next
+
+ Returns:
+ despeckled_image (dict): noisy and denoised images
+ """
+ noisy_image = np.array(noisy_image).reshape(
+ 1, np.size(noisy_image, 0), np.size(noisy_image, 1), 2)
+
+ # Pad the image
+ image_height = np.size(noisy_image, 1)
+ image_width = np.size(noisy_image, 2)
+
+ noisy_image, noisy_image_real_part, noisy_image_imaginary_part = self.preprocess_noisy_image(
+ noisy_image)
+
+ model = self.load_model(patch_size=patch_size)
+
+ count_image = np.zeros(noisy_image_real_part.shape)
+ denoised_image_real_part = np.zeros(noisy_image_real_part.shape)
+ denoised_image_imaginary_part = np.zeros(noisy_image_real_part.shape)
+
+ x_range = self.initialize_axis_range(
+ image_height, patch_size, stride_size)
+ y_range = self.initialize_axis_range(
+ image_width, patch_size, stride_size)
+
+ for x in tqdm(x_range):
+ for y in y_range:
+ real_to_denoise = noisy_image_real_part[:,
+ x:x + patch_size, y:y + patch_size, :]
+ imag_to_denoise = noisy_image_imaginary_part[:,
+ x:x + patch_size, y:y + patch_size, :]
+ if self.symetrise:
+ real_to_denoise, imag_to_denoise = symetrise_real_and_imaginary_parts(
+ real_to_denoise, imag_to_denoise)
+
+ real_to_denoise = torch.tensor(
+ real_to_denoise, device=self.device, dtype=torch.float32)
+ imag_to_denoise = torch.tensor(
+ imag_to_denoise, device=self.device, dtype=torch.float32)
+
+ real_to_denoise = (torch.log(torch.square(
+ real_to_denoise)+1e-3)-2*m)/(2*(M-m))
+ imag_to_denoise = (torch.log(torch.square(
+ imag_to_denoise)+1e-3)-2*m)/(2*(M-m))
+
+ denoised_image_real_part = self.denoise_image_kernel(
+ real_to_denoise, denoised_image_real_part, x, y, patch_size, model)
+ denoised_image_imaginary_part = self.denoise_image_kernel(
+ imag_to_denoise, denoised_image_imaginary_part, x, y, patch_size, model)
+ count_image = self.denoise_image_kernel(
+ imag_to_denoise, count_image, x, y, patch_size, model, normalisation_kernel=True)
+
+ denoised_image, denoised_image_real_part, denoised_image_imaginary_part = self.preprocess_denoised_image(
+ denoised_image_real_part, denoised_image_imaginary_part, count_image)
+
+ despeckled_image = {"noisy": {"full": noisy_image,
+ "real": np.squeeze(noisy_image_real_part),
+ "imaginary": np.squeeze(noisy_image_imaginary_part)
+ },
+ "denoised": {"full": denoised_image,
+ "from_real": denoised_image_real_part,
+ "from_imaginary": denoised_image_imaginary_part
+ }
+ }
+
+ return despeckled_image
+
+
+
+[docs]
+ def denoise_images(self, images_to_denoise_path: list, save_dir: str, patch_size: int,
+ stride_size: int):
+ """Iterate over a directory of coSAR images and store the denoised images in a directory
+
+ Args:
+ images_to_denoise_path (list): a list of paths of npy images to denoise
+ save_dir (str): repository to save sar images, real images and noisy images
+ patch_size (int): size of the patch of the convolution
+ stride_size (int): number of pixels between one convolution to the next
+ """
+
+ images_to_denoise_paths = glob((images_to_denoise_path + '/*.npy'))
+
+ assert len(images_to_denoise_paths) != 0, 'No data!'
+
+ logging.info(f"Starting denoising images in {images_to_denoise_paths}")
+
+ for idx in range(len(images_to_denoise_paths)):
+ image_name = Path(images_to_denoise_paths[idx]).name
+ logging.info(
+ f"Despeckling {image_name}")
+
+ noisy_image_idx = load_sar_image(
+ images_to_denoise_paths[idx]).astype(np.float32)
+ despeckled_images = self.denoise_image(
+ noisy_image_idx, patch_size, stride_size)
+
+ logging.info(
+ f"Saving despeckled images in {save_dir}")
+ self.save_despeckled_images(
+ despeckled_images, image_name, save_dir)
+
+
+
+import torch
+import numpy as np
+
+
+
+[docs]
+class Model(torch.nn.Module):
+
+ def __init__(self, device: str, height: int, width: int):
+ super().__init__()
+
+ self.device = device
+
+ self.height = height
+ self.width = width
+
+ self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2)
+ self.leaky = torch.nn.LeakyReLU(0.1)
+
+ self.enc0 = torch.nn.Conv2d(in_channels=1, out_channels=48, kernel_size=(3, 3), stride=(1, 1),
+ padding='same', device=self.device)
+ self.enc1 = torch.nn.Conv2d(in_channels=48, out_channels=48, kernel_size=(3, 3), stride=(1, 1),
+ padding='same', device=self.device)
+ self.enc2 = torch.nn.Conv2d(in_channels=48, out_channels=48, kernel_size=(3, 3), stride=(1, 1),
+ padding='same', device=self.device)
+ self.enc3 = torch.nn.Conv2d(in_channels=48, out_channels=48, kernel_size=(3, 3), stride=(1, 1),
+ padding='same', device=self.device)
+ self.enc4 = torch.nn.Conv2d(in_channels=48, out_channels=48, kernel_size=(3, 3), stride=(1, 1),
+ padding='same', device=self.device)
+ self.enc5 = torch.nn.Conv2d(in_channels=48, out_channels=48, kernel_size=(3, 3), stride=(1, 1),
+ padding='same', device=self.device)
+ self.enc6 = torch.nn.Conv2d(in_channels=48, out_channels=48, kernel_size=(3, 3), stride=(1, 1),
+ padding='same', device=self.device)
+
+ self.dec5 = torch.nn.Conv2d(in_channels=96, out_channels=96, kernel_size=(3, 3), stride=(1, 1),
+ padding='same', device=self.device)
+ self.dec5b = torch.nn.Conv2d(in_channels=96, out_channels=96, kernel_size=(3, 3), stride=(1, 1),
+ padding='same', device=self.device)
+ self.dec4 = torch.nn.Conv2d(in_channels=144, out_channels=96, kernel_size=(3, 3), stride=(1, 1),
+ padding='same', device=self.device)
+ self.dec4b = torch.nn.Conv2d(in_channels=96, out_channels=96, kernel_size=(3, 3), stride=(1, 1),
+ padding='same', device=self.device)
+ self.dec3 = torch.nn.Conv2d(in_channels=144, out_channels=96, kernel_size=(3, 3), stride=(1, 1),
+ padding='same', device=self.device)
+ self.dec3b = torch.nn.Conv2d(in_channels=96, out_channels=96, kernel_size=(3, 3), stride=(1, 1),
+ padding='same', device=self.device)
+ self.dec2 = torch.nn.Conv2d(in_channels=144, out_channels=96, kernel_size=(3, 3), stride=(1, 1),
+ padding='same', device=self.device)
+ self.dec2b = torch.nn.Conv2d(in_channels=96, out_channels=96, kernel_size=(3, 3), stride=(1, 1),
+ padding='same', device=self.device)
+ self.dec1a = torch.nn.Conv2d(in_channels=97, out_channels=64, kernel_size=(3, 3), stride=(1, 1),
+ padding='same', device=self.device)
+ self.dec1b = torch.nn.Conv2d(in_channels=64, out_channels=32, kernel_size=(3, 3), stride=(1, 1),
+ padding='same', device=self.device)
+ self.dec1 = torch.nn.Conv2d(in_channels=32, out_channels=1, kernel_size=(3, 3), stride=(1, 1),
+ padding='same', device=self.device)
+
+ self.upscale2d = torch.nn.UpsamplingNearest2d(scale_factor=2)
+
+
+[docs]
+ def forward(self, x: np.array) -> np.array:
+ """ Defines a class for an autoencoder algorithm for an object (image) x
+
+ An autoencoder is a specific type of feedforward neural networks where the
+ input is the same as the
+ output. It compresses the input into a lower-dimensional code and then
+ reconstruct the output from this representattion. It is a dimensionality
+ reduction algorithm
+
+ Parameters
+ ----------
+ x : np.array
+ a numpy array containing image
+
+ Returns
+ ----------
+ x-n : np.array
+ a numpy array containing the denoised image i.e the image itself minus the noise
+
+ """
+ x = torch.reshape(x, [1, 1, self.height, self.width])
+ # x = torch.permute(x, (0, 3, 1, 2))
+ skips = [x]
+
+ n = x
+
+ # ENCODER
+ n = self.leaky(self.enc0(n))
+ n = self.leaky(self.enc1(n))
+ n = self.pool(n)
+ skips.append(n)
+
+ n = self.leaky(self.enc2(n))
+ n = self.pool(n)
+ skips.append(n)
+
+ n = self.leaky(self.enc3(n))
+ n = self.pool(n)
+ skips.append(n)
+
+ n = self.leaky(self.enc4(n))
+ n = self.pool(n)
+ skips.append(n)
+
+ n = self.leaky(self.enc5(n))
+ n = self.pool(n)
+ n = self.leaky(self.enc6(n))
+
+ # DECODER
+ n = self.upscale2d(n)
+ n = torch.cat((n, skips.pop()), dim=1)
+ n = self.leaky(self.dec5(n))
+ n = self.leaky(self.dec5b(n))
+
+ n = self.upscale2d(n)
+ n = torch.cat((n, skips.pop()), dim=1)
+ n = self.leaky(self.dec4(n))
+ n = self.leaky(self.dec4b(n))
+
+ n = self.upscale2d(n)
+ n = torch.cat((n, skips.pop()), dim=1)
+ n = self.leaky(self.dec3(n))
+ n = self.leaky(self.dec3b(n))
+
+ n = self.upscale2d(n)
+ n = torch.cat((n, skips.pop()), dim=1)
+ n = self.leaky(self.dec2(n))
+ n = self.leaky(self.dec2b(n))
+
+ n = self.upscale2d(n)
+ n = torch.cat((n, skips.pop()), dim=1)
+ n = self.leaky(self.dec1a(n))
+ n = self.leaky(self.dec1b(n))
+
+ n = self.dec1(n)
+
+ return x - n
+
+
+
+from glob import glob
+import logging
+import os
+from pathlib import Path
+import torch
+import numpy as np
+from tqdm import tqdm
+
+from deepdespeckling.denoiser import Denoiser
+from deepdespeckling.model import Model
+from deepdespeckling.utils.constants import M, m
+from deepdespeckling.utils.utils import (denormalize_sar_image, load_sar_image, normalize_sar_image, save_image_to_npy_and_png,
+ create_empty_folder_in_directory)
+
+current_dir = os.path.dirname(__file__)
+
+
+
+[docs]
+class Sar2SarDenoiser(Denoiser):
+ """Class to share parameters beyond denoising functions
+ """
+
+ def __init__(self, **params):
+ super().__init__(**params)
+ self.weights_path = os.path.join(
+ current_dir, "saved_model/sar2sar.pth")
+ print(self.weights_path)
+
+
+[docs]
+ def load_model(self, patch_size: int) -> Model:
+ """Load model with given weights
+
+ Args:
+ weights_path (str): path to weights
+ patch_size (int): patch size
+
+ Returns:
+ model (Model): model loaded with stored weights
+ """
+ model = Model(torch.device(self.device),
+ height=patch_size, width=patch_size)
+ model.load_state_dict(torch.load(
+ self.weights_path, map_location=torch.device("cpu"))['model_state_dict'])
+
+ return model
+
+
+
+[docs]
+ def save_despeckled_images(self, despeckled_images: dict, image_name: str, save_dir: str):
+ """Save full, real and imaginary part of noisy and denoised image stored in a dictionary in png to a given folder
+
+ Args:
+ despeckled_images (dict): dictionary containing noisy and denoised image
+ image_name (str): name of the image
+ save_dir (str): path to the folder where to save the png images
+ """
+ threshold = np.mean(
+ despeckled_images["noisy"]) + 3 * np.std(despeckled_images["noisy"])
+ image_name = image_name.split('\\')[-1]
+
+ for key in despeckled_images:
+ create_empty_folder_in_directory(save_dir, key)
+ save_image_to_npy_and_png(
+ despeckled_images[key], save_dir, f"/{key}/{key}_", image_name, threshold)
+
+
+
+[docs]
+ def denoise_image_kernel(self, noisy_image_kernel: torch.tensor, denoised_image_kernel: np.array, x: int, y: int, patch_size: int, model: Model, normalisation_kernel: bool = False) -> np.array:
+ """Denoise a subpart of a given symetrised noisy image delimited by x, y and patch_size using a given model
+
+ Args:
+ noisy_image_kernel (torch tensor): part of the noisy image to denoise
+ denoised_image_kernel (numpy array): part of the partially denoised image
+ x (int): x coordinate of current kernel to denoise
+ y (int): y coordinate of current kernel to denoise
+ patch_size (int): patch size
+ model (Model): trained model with loaded weights
+ normalisation_kernel (bool, optional): Determine if. Defaults to False.
+
+ Returns:
+ denoised_image_kernel (numpy array): image denoised in the given coordinates and the ones already iterated
+ """
+ if not normalisation_kernel:
+
+ with torch.no_grad():
+ if self.device != 'cpu':
+ tmp_clean_image = model.forward(
+ noisy_image_kernel).cpu().numpy()
+ else:
+ tmp_clean_image = model.forward(
+ noisy_image_kernel).numpy()
+
+ tmp_clean_image = denormalize_sar_image(np.squeeze(
+ np.asarray(tmp_clean_image)))
+
+ denoised_image_kernel[x:x + patch_size, y:y + patch_size] = denoised_image_kernel[x:x +
+ patch_size, y:y + patch_size] + tmp_clean_image
+ else:
+ denoised_image_kernel[x:x + patch_size, y:y + patch_size] = denoised_image_kernel[x:x +
+ patch_size, y:y + patch_size] + np.ones((patch_size, patch_size))
+
+ return denoised_image_kernel
+
+
+
+[docs]
+ def denormalize_sar_image(self, image: np.array) -> np.array:
+ """Denormalize a sar image stored in a numpy array
+
+ Args:
+ image (numpy array): a sar image
+
+ Raises:
+ TypeError: raise an error if the image file is not a numpy array
+
+ Returns:
+ (numpy array): the image denormalized
+ """
+ if not isinstance(image, np.ndarray):
+ raise TypeError('Please provide a numpy array')
+ return np.exp((np.clip(np.squeeze(image), 0, image.max()))*(M-m)+m)
+
+
+
+[docs]
+ def denoise_image(self, noisy_image: np.array, patch_size: int, stride_size: int) -> dict:
+ """Preprocess and denoise a coSAR image using given model weights
+
+ Args:
+ noisy_image (numpy array): numpy array containing the noisy image to despeckle
+ patch_size (int): size of the patch of the convolution
+ stride_size (int): number of pixels between one convolution to the next
+
+ Returns:
+ output_image (numpy array): denoised image
+ """
+ noisy_image = np.array(noisy_image).reshape(
+ 1, np.size(noisy_image, 0), np.size(noisy_image, 1), 1).astype(np.float32)
+
+ noisy_image = normalize_sar_image(noisy_image)
+
+ noisy_image = torch.tensor(
+ noisy_image, dtype=torch.float)
+
+ # Pad the image
+ image_height = noisy_image.size(dim=1)
+ image_width = noisy_image.size(dim=2)
+
+ model = self.load_model(patch_size=patch_size)
+
+ count_image = np.zeros((image_height, image_width))
+ denoised_image = np.zeros((image_height, image_width))
+
+ x_range = self.initialize_axis_range(
+ image_height, patch_size, stride_size)
+ y_range = self.initialize_axis_range(
+ image_width, patch_size, stride_size)
+
+ for x in tqdm(x_range):
+ for y in y_range:
+ noisy_image_kernel = noisy_image[:,
+ x:x + patch_size, y:y + patch_size, :]
+ noisy_image_kernel = noisy_image_kernel.to(self.device)
+
+ denoised_image = self.denoise_image_kernel(
+ noisy_image_kernel, denoised_image, x, y, patch_size, model)
+ count_image = self.denoise_image_kernel(
+ noisy_image_kernel, count_image, x, y, patch_size, model, normalisation_kernel=True)
+
+ denoised_image = denoised_image / count_image
+
+ noisy_image_denormalized = self.denormalize_sar_image(
+ np.squeeze(np.asarray(noisy_image.cpu().numpy())))
+
+ despeckled_image = {"noisy": noisy_image_denormalized,
+ "denoised": denoised_image
+ }
+
+ return despeckled_image
+
+
+
+[docs]
+ def denoise_images(self, images_to_denoise_path: list, save_dir: str, patch_size: int,
+ stride_size: int):
+ """Iterate over a directory of coSAR images and store the denoised images in a directory
+
+ Args:
+ images_to_denoise_path (list): a list of paths of npy images to denoise
+ save_dir (str): repository to save sar images, real images and noisy images
+ patch_size (int): size of the patch of the convolution
+ stride_size (int): number of pixels between one convolution to the next
+ """
+
+ images_to_denoise_paths = glob((images_to_denoise_path + '/*.npy'))
+
+ assert len(images_to_denoise_paths) != 0, 'No data!'
+
+ logging.info(f"Starting denoising images in {images_to_denoise_paths}")
+
+ for idx in range(len(images_to_denoise_paths)):
+ image_name = Path(images_to_denoise_paths[idx]).name
+ logging.info(
+ f"Despeckling {image_name}")
+
+ noisy_image_idx = load_sar_image(
+ images_to_denoise_paths[idx]).astype(np.float32)
+ despeckled_images = self.denoise_image(
+ noisy_image_idx, patch_size, stride_size)
+
+ logging.info(
+ f"Saving despeckled images in {save_dir}")
+ self.save_despeckled_images(
+ despeckled_images, image_name, save_dir)
+
+
+
+import struct
+from osgeo import gdal
+import numpy as np
+
+
+
+[docs]
+def cos2mat(path_to_cosar_image: str) -> np.array:
+ """Convert a CoSAR imge to a numpy array of size [ncolumns,nlines,2]
+
+ Args:
+ path_to_cosar_image (str): path to the image which is a cos file
+
+ Returns:
+ numpy array : the image in a numpy array
+ """
+
+ print('Converting CoSAR to numpy array of size [ncolumns,nlines,2]')
+
+ try:
+ fin = open(path_to_cosar_image, 'rb')
+ except IOError:
+ legx = path_to_cosar_image + ': it is a not openable file'
+ print(legx)
+ print(u'failed to call cos2mat')
+ return 0, 0, 0, 0
+
+ ibib = struct.unpack(">i", fin.read(4))[0]
+ irsri = struct.unpack(">i", fin.read(4))[0]
+ irs = struct.unpack(">i", fin.read(4))[0]
+ ias = struct.unpack(">i", fin.read(4))[0]
+ ibi = struct.unpack(">i", fin.read(4))[0]
+ irtnb = struct.unpack(">i", fin.read(4))[0]
+ itnl = struct.unpack(">i", fin.read(4))[0]
+
+ nlig = struct.unpack(">i", fin.read(4))[0]
+ ncoltot = int(irtnb / 4)
+ ncol = ncoltot - 2
+ nlig = ias
+
+ print(u'Reading image in CoSAR format. ncolumns=%d nlines=%d' % (ncol, nlig))
+
+ firm = np.zeros(4 * ncoltot, dtype=np.byte())
+ imgcxs = np.empty([nlig, ncol], dtype=np.complex64())
+
+ fin.seek(0)
+ firm = fin.read(4 * ncoltot)
+ firm = fin.read(4 * ncoltot)
+ firm = fin.read(4 * ncoltot)
+ firm = fin.read(4 * ncoltot)
+
+ for iut in range(nlig):
+ firm = fin.read(4 * ncoltot)
+ imgligne = np.ndarray(2 * ncoltot, '>h', firm)
+ imgcxs[iut, :] = imgligne[4:2 * ncoltot:2] + \
+ 1j * imgligne[5:2 * ncoltot:2]
+
+ print('[:,:,0] contains the real part of the SLC image data')
+ print('[:,:,1] contains the imaginary part of the SLC image data')
+
+ return np.stack((np.real(imgcxs), np.imag(imgcxs)), axis=2)
+
+
+
+
+[docs]
+def load_tiff_image(path_to_tiff_image: str) -> np.array:
+ """Load a tiff image in a numpy array using gdal library
+
+ Args:
+ path_to_tiff_image (str): path to a tiff image
+
+ Returns:
+ (numpy array): numpy array containing the tiff image
+ """
+ dataset = gdal.Open(path_to_tiff_image)
+
+ for x in range(1, dataset.RasterCount + 1):
+ band = dataset.GetRasterBand(x)
+ array = band.ReadAsArray()
+
+ return array.astype(np.float64)
+
+
+import numpy as np
+import cv2
+import os
+from PIL import Image
+from scipy import signal
+from pathlib import Path
+from glob import glob
+
+from deepdespeckling.utils.load_cosar import cos2mat, load_tiff_image
+from deepdespeckling.utils.constants import M, m
+
+
+
+[docs]
+def normalize_sar_image(image: np.array) -> np.array:
+ """normalize a sar image store in a numpy array
+
+ Args:
+ image (numpy array): the image to be normalized
+
+ Returns:
+ (numpy array): normalized image
+ """
+ return ((np.log(np.clip(image, 0, image.max())+1e-6)-m)/(M-m)).astype(np.float32)
+
+
+
+
+[docs]
+def denormalize_sar_image(image: np.array) -> np.array:
+ """Denormalize a sar image store in a numpy array
+
+ Args:
+ image (numpy array): a sar image
+
+ Raises:
+ TypeError: raise an error if the image file is not a numpy array
+
+ Returns:
+ (numpy array): the image denormalized
+ """
+ if not isinstance(image, np.ndarray):
+ raise TypeError('Please provide a numpy array')
+ return np.exp((M - m) * (np.squeeze(image)).astype('float32') + m)
+
+
+
+
+[docs]
+def denormalize_sar_image_sar2sar(image: np.array) -> np.array:
+ """Denormalize a sar image store i a numpy array
+
+ Args:
+ image (numpy array): a sar image
+
+ Raises:
+ TypeError: raise an error if the image file is not a numpy array
+
+ Returns:
+ (numpy array): the image denormalized
+ """
+ if not isinstance(image, np.ndarray):
+ raise TypeError('Please provide a numpy array')
+ return np.exp((np.clip(np.squeeze(image), 0, image.max()))*(M-m)+m)
+
+
+
+
+[docs]
+def load_sar_image(image_path: str) -> np.array:
+ """Load a SAR image in a numpy array, use cos2mat function if the file is a cos file,
+ load_tiff_image if the file is a tiff file
+
+ Args:
+ image_path (str) : absolute path to a SAR image (cos or npy file)
+
+ Returns:
+ image (numpy array) : the image of dimension [ncolumns,nlines,2]
+ """
+ if Path(image_path).suffix == ".npy":
+ image = np.load(image_path)
+ elif Path(image_path).suffix == ".cos":
+ image = cos2mat(image_path)
+ elif Path(image_path).suffix == ".tiff":
+ image = load_tiff_image(image_path)
+ else:
+ raise ValueError("the image should be a cos, npy or tiff file")
+ return image
+
+
+
+
+[docs]
+def load_sar_images(file_list):
+ """ Description
+ ----------
+ Loads files , resize them and append them into a list called data
+
+ Parameters
+ ----------
+ filelist : a path to a folder containing the images
+
+ Returns
+ ----------
+ A list of images
+
+ """
+ if not isinstance(file_list, list):
+ image = np.load(file_list)
+ image = np.array(image).reshape(
+ 1, np.size(image, 0), np.size(image, 1), 2)
+ return image
+ data = []
+ for file in file_list:
+ image = np.load(file)
+ data.append(np.array(image).reshape(
+ 1, np.size(image, 0), np.size(image, 1), 2))
+ return data
+
+
+
+
+[docs]
+def create_empty_folder_in_directory(destination_directory_path: str, folder_name: str = "processed_images") -> str:
+ """Create an empty folder in a given directory
+
+ Args:
+ destination_directory_path (str): path pf the directory in which an empty folder is created if it doest not exist yet
+ folder_name (str, optional): name of the folder to create. Defaults to "processed_images".
+
+ Returns:
+ processed_images_path: path of the created empty folder
+ """
+ processed_images_path = destination_directory_path + f'/{folder_name}'
+ if not os.path.exists(processed_images_path):
+ os.mkdir(processed_images_path)
+ return processed_images_path
+
+
+
+
+[docs]
+def preprocess_and_store_sar_images(sar_images_path: str, processed_images_path: str, model_name: str = "spotlight"):
+ """Convert coSAR images to numpy arrays and store it in a specified path
+
+ Args:
+ sar_images_path (str): path of a folder containing coSAR images to be converted in numpy array
+ processed_images_path (str): path of the folder where converted images are stored
+ model_name (str): model name to be use for despeckling
+ """
+ ext = "cos" if model_name in ["spotlight", "stripmap"] else "tiff"
+ images_paths = glob(os.path.join(sar_images_path, "*.npy")) + \
+ glob(os.path.join(sar_images_path, f"*.{ext}"))
+ for image_path in images_paths:
+ imagename = image_path.split('/')[-1].split('.')[0]
+ if not os.path.exists(processed_images_path + '/' + imagename + '.npy'):
+ image = load_sar_image(image_path)
+ np.save(processed_images_path + '/' + imagename + '.npy', image)
+
+
+
+
+[docs]
+def preprocess_and_store_sar_images_from_coordinates(sar_images_path: str, processed_images_path: str, coordinates_dict: dict, model_name: str = "spotlight"):
+ """Convert specified areas of coSAR images to numpy arrays and store it in a specified path
+
+ Args:
+ sar_images_path (str): path of a folder containing coSAR images to be converted in numpy array
+ processed_images_path (str): path of the folder where converted images are stored
+ coordinates_dict (dict): dictionary containing pixel boundaries of the area to despeckle (x_start, x_end, y_start, y_end)
+ model_name (str): model name to be use for despeckling. Default to "spotlight"
+ """
+ x_start = coordinates_dict["x_start"]
+ x_end = coordinates_dict["x_end"]
+ y_start = coordinates_dict["y_start"]
+ y_end = coordinates_dict["y_end"]
+
+ ext = "cos" if model_name in ["spotlight", "stripmap"] else "tiff"
+
+ images_paths = glob(os.path.join(sar_images_path, "*.npy")) + \
+ glob(os.path.join(sar_images_path, f"*.{ext}"))
+ for image_path in images_paths:
+ imagename = image_path.split('/')[-1].split('.')[0]
+ if not os.path.exists(processed_images_path + '/' + imagename + '.npy'):
+ image = load_sar_image(image_path)
+ if ext == "cos":
+ np.save(processed_images_path + '/' + imagename +
+ '.npy', image[x_start:x_end, y_start:y_end, :])
+ else:
+ np.save(processed_images_path + '/' + imagename +
+ '.npy', image[x_start:x_end, y_start:y_end])
+
+
+
+
+[docs]
+def get_maximum_patch_size(kernel_size: int, patch_bound: int) -> int:
+ """Get maximum manifold of a number lower than a bound
+
+ Args:
+ kernel_size (int): the kernel size of the trained model
+ patch_bound (int): the maximum bound of the kernel size
+
+ Returns:
+ maximum_patch_size (int) : the maximum patch size
+ """
+ k = 1
+
+ while kernel_size * k < patch_bound:
+ k = k * 2
+
+ maximum_patch_size = int(kernel_size * (k/2))
+
+ return maximum_patch_size
+
+
+
+
+[docs]
+def get_maximum_patch_size_from_image_dimensions(kernel_size: int, height: int, width: int) -> int:
+ """Get the maximum patch size from the width and heigth and the kernel size of the model we use
+
+ Args:
+ kernel_size (int): the kernel size of the trained model
+ height (int): the heigth of the image
+ width (int): the width of the image
+
+ Returns:
+ maximum_patch_size (int) : the maximum patch size to use for despeckling
+ """
+ patch_bound = min(height, width)
+
+ if patch_bound <= kernel_size:
+ maximum_patch_size = kernel_size
+ else:
+ maximum_patch_size = get_maximum_patch_size(
+ kernel_size=kernel_size, patch_bound=patch_bound)
+
+ return maximum_patch_size
+
+
+
+
+[docs]
+def symetrise_real_and_imaginary_parts(real_part: np.array, imag_part: np.array) -> tuple[np.array, np.array]:
+ """Symetrise given real and imaginary parts to ensure MERLIN properties
+
+ Args:
+ real_part (numpy array): real part of the noisy image to symetrise
+ imag_part (numpy array): imaginary part of the noisy image to symetrise
+
+ Returns:
+ np.real(ima2), np.imag(ima2) (numpy array, numpy array): symetrised real and imaginary parts of a noisy image
+ """
+ S = np.fft.fftshift(np.fft.fft2(
+ real_part[0, :, :, 0] + 1j * imag_part[0, :, :, 0]))
+ p = np.zeros((S.shape[0])) # azimut (ncol)
+ for i in range(S.shape[0]):
+ p[i] = np.mean(np.abs(S[i, :]))
+ sp = p[::-1]
+ c = np.real(np.fft.ifft(np.fft.fft(p) * np.conjugate(np.fft.fft(sp))))
+ d1 = np.unravel_index(c.argmax(), p.shape[0])
+ d1 = d1[0]
+ shift_az_1 = int(round(-(d1 - 1) / 2)) % p.shape[0] + int(p.shape[0] / 2)
+ p2_1 = np.roll(p, shift_az_1)
+ shift_az_2 = int(
+ round(-(d1 - 1 - p.shape[0]) / 2)) % p.shape[0] + int(p.shape[0] / 2)
+ p2_2 = np.roll(p, shift_az_2)
+ window = signal.gaussian(p.shape[0], std=0.2 * p.shape[0])
+ test_1 = np.sum(window * p2_1)
+ test_2 = np.sum(window * p2_2)
+ # make sure the spectrum is symetrized and zeo-Doppler centered
+ if test_1 >= test_2:
+ p2 = p2_1
+ shift_az = shift_az_1 / p.shape[0]
+ else:
+ p2 = p2_2
+ shift_az = shift_az_2 / p.shape[0]
+ S2 = np.roll(S, int(shift_az * p.shape[0]), axis=0)
+
+ q = np.zeros((S.shape[1])) # range (nlin)
+ for j in range(S.shape[1]):
+ q[j] = np.mean(np.abs(S[:, j]))
+ sq = q[::-1]
+ # correlation
+ cq = np.real(np.fft.ifft(np.fft.fft(q) * np.conjugate(np.fft.fft(sq))))
+ d2 = np.unravel_index(cq.argmax(), q.shape[0])
+ d2 = d2[0]
+ shift_range_1 = int(round(-(d2 - 1) / 2)
+ ) % q.shape[0] + int(q.shape[0] / 2)
+ q2_1 = np.roll(q, shift_range_1)
+ shift_range_2 = int(
+ round(-(d2 - 1 - q.shape[0]) / 2)) % q.shape[0] + int(q.shape[0] / 2)
+ q2_2 = np.roll(q, shift_range_2)
+ window_r = signal.gaussian(q.shape[0], std=0.2 * q.shape[0])
+ test_1 = np.sum(window_r * q2_1)
+ test_2 = np.sum(window_r * q2_2)
+ if test_1 >= test_2:
+ q2 = q2_1
+ shift_range = shift_range_1 / q.shape[0]
+ else:
+ q2 = q2_2
+ shift_range = shift_range_2 / q.shape[0]
+
+ Sf = np.roll(S2, int(shift_range * q.shape[0]), axis=1)
+ ima2 = np.fft.ifft2(np.fft.ifftshift(Sf))
+
+ return np.real(ima2), np.imag(ima2)
+
+
+
+
+[docs]
+def preprocess_image(image: np.array, threshold: float) -> np.array:
+ """Preprocess image by limiting pixel values with a threshold
+
+ Args:
+ image (numpy array): image to preprocess
+ threshold (float): pixel value threshold
+
+ Returns:
+ image (cv2 Image): image to be saved
+ """
+ image = np.clip(image, 0, threshold)
+ image = image / threshold * 255
+ image = Image.fromarray(image.astype('float64')).convert('L')
+
+ return image
+
+
+
+
+[docs]
+def save_image_to_png(image: np.array, threshold: int, image_full_path: str):
+ """Save a given image to a png file in a given folder
+
+ Args:
+ image (numpy array): image to save
+ threshold (float): threshold of pixel values of the image to be saved in png
+ image_full_path (str): full path of the image
+
+ Raises:
+ TypeError: if the image is not a numpy array
+ """
+ if not isinstance(image, np.ndarray):
+ raise TypeError('Please provide a numpy array')
+
+ image = preprocess_image(image, threshold=threshold)
+ image.save(image_full_path.replace('npy', 'png'))
+
+
+
+
+[docs]
+def save_image_to_npy_and_png(image: np.array, save_dir: str, prefix: str, image_name: str, threshold: float):
+ """Save a given image to npy and png in a given folder
+
+ Args:
+ image (numpy array): image to save
+ save_dir (str): path to the folder where to save the image
+ prefix (str): prefix of the image file name
+ image_name (str): name of the image file
+ threshold (float): threshold of image pixel values used for png conversion
+ """
+ image_full_path = save_dir + prefix + image_name
+
+ # Save image to npy file
+ np.save(image_full_path, image)
+
+ # Save image to png file
+ save_image_to_png(image, threshold, image_full_path)
+
+
+
+
+[docs]
+def compute_psnr(Shat: np.array, S: np.array) -> float:
+ """Compute Peak Signal to Noise Ratio
+
+ Args:
+ Shat (numpy array): a SAR amplitude image
+ S (numpy array): a reference SAR image
+
+ Returns:
+ res (float): psnr value
+ """
+ P = np.quantile(S, 0.99)
+ res = 10 * np.log10((P ** 2) / np.mean(np.abs(Shat - S) ** 2))
+ return res
+
+
+
+
+[docs]
+def get_cropping_coordinates(image: np.array, destination_directory_path: str, model_name: str, fixed: bool = True):
+ """Launch the crop tool to enable the user to select the subpart of the image to be despeckled
+
+ Args:
+ image (numpy aray): full image to be cropped
+ destination_directory_path (str): path of a folder to store the results
+ model_name (str): model name to be use for despeckling. Default to "spotlight"
+ fixed (bool, optional): whether the area of selection has a fixed size of not. Defaults to True.
+ """
+ image = preprocess_sar_image_for_cropping(image, model_name)
+ full_image = image.copy()
+ cropping = False
+ x_start, y_start, x_end, y_end = 0, 0, 0, 0
+
+ # CV2 CROPPING IN WINDOW
+ def mouse_crop(event, x, y, flags, param):
+ """ The callback function of crop() to deal with user's events
+ """
+ global x_start, y_start, x_end, y_end, cropping
+ cropping = False
+
+ if event == cv2.EVENT_LBUTTONDOWN:
+ x_start, y_start, x_end, y_end = x, y, x, y
+ cropping = True
+
+ # Mouse is Moving
+ elif event == cv2.EVENT_MOUSEMOVE:
+ x_end, y_end = x, y
+
+ # if the left mouse button was released
+ elif event == cv2.EVENT_LBUTTONUP:
+ # record the ending (x, y) coordinates
+ x_end, y_end = x, y
+ if fixed:
+ if x_start > x_end and y_start > y_end:
+ tempxstart = x_start
+ tempystart = y_start
+
+ x_start = tempxstart - 32
+ x_end = tempxstart
+
+ y_start = tempystart - 32
+ y_end = tempystart
+
+ elif x_start > x_end and y_start < y_end:
+ tempxstart = x_start
+ tempystart = y_start
+
+ x_start = tempxstart - 32
+ y_start = tempystart
+
+ x_end = tempxstart
+ y_end = tempystart + 32
+
+ elif x_start < x_end and y_start > y_end:
+ tempxstart = x_start
+ tempystart = y_start
+
+ x_start = tempxstart
+ y_start = tempystart - 32
+ x_end = tempxstart + 32
+ y_end = tempystart
+
+ else:
+ x_end = x_start + 32
+ y_end = y_start + 32
+ else:
+ if x_start > x_end and y_start > y_end:
+ tempx = x_start
+ x_start = x_end
+ x_end = tempx
+
+ tempy = y_start
+ y_start = y_end
+ y_end = tempy
+
+ elif x_start > x_end and y_start < y_end:
+ tempxstart = x_start
+ tempystart = y_start
+ tempxend = x_end
+ tempyend = y_end
+
+ x_start = tempxend
+ y_start = tempystart
+ x_end = tempxstart
+ y_end = tempyend
+
+ elif x_start < x_end and y_start > y_end:
+ tempxstart = x_start
+ tempystart = y_start
+ tempxend = x_end
+ tempyend = y_end
+
+ x_start = tempxstart
+ y_start = tempyend
+ x_end = tempxend
+ y_end = tempystart
+
+ # cropping is finished
+ cv2.rectangle(image, (x_start, y_start),
+ (x_end, y_end), (255, 0, 0), 2)
+ cropping = False
+
+ refPoint = [(x_start, y_start), (x_end, y_end)]
+
+ if len(refPoint) == 2: # when two points were found
+ cropped_image = full_image[refPoint[0][1] * 8:refPoint[1][1]
+ * 8, refPoint[0][0] * 8:refPoint[1][0] * 8]
+ if fixed:
+ cropped_image = cv2.resize(cropped_image, (256, 256))
+ else:
+ cropped_image = cv2.resize(
+ cropped_image, (8 * (x_end - x_start), 8 * (y_end - y_start)))
+ cv2.imshow("Cropped", cropped_image)
+
+ with open(destination_directory_path+'/cropping_coordinates.txt', 'w') as filehandle:
+ for listitem in refPoint:
+ filehandle.write(f'{listitem}\n')
+
+ h, w, _ = image.shape
+ # resizing image
+ image = cv2.resize(image, (int(w / 8), int(h / 8)))
+ cv2.namedWindow("image")
+ cv2.setMouseCallback("image", mouse_crop)
+
+ while True:
+ i = image.copy()
+ if not cropping:
+ cv2.imshow("image", image)
+ elif cropping:
+ cv2.imshow("image", i)
+ if not fixed:
+ cv2.rectangle(i, (x_start, y_start),
+ (x_end, y_end), (255, 0, 0), 2)
+ key = cv2.waitKey(10)
+ if key == ord('q'):
+ cv2.destroyAllWindows()
+ return get_cropping_coordinates_from_file(destination_directory_path=destination_directory_path)
+
+
+
+
+[docs]
+def get_cropping_coordinates_from_file(destination_directory_path: str) -> list:
+ """Get cropping coordinates from a file where it's stored
+
+ Args:
+ destination_directory_path (str): path of the file in which the cropping coordinates are stored
+
+ Returns:
+ cropping_coordinates (list): list containing cropping coordinates
+ """
+ cropping_coordinates = []
+
+ with open(destination_directory_path+'/cropping_coordinates.txt', 'r') as filehandle:
+ for line in filehandle:
+ # Remove linebreak which is the last character of the string
+ curr_place = eval(line[:-1])
+ # Add item to the list
+ cropping_coordinates.append(curr_place)
+
+ return cropping_coordinates
+
+
+
+
+[docs]
+def crop_image(image: np.array, image_path: str, cropping_coordinates: list, model_name: str, processed_images_path: str):
+ """Crop an image using given cropping coordinates and store the result in a given folder
+
+ Args:
+ image (numpy array): image to be cropped
+ image_path (str): path of the image
+ cropping_coordinates (list): list of coordinates of cropping, format [(x1, y1), (x2, y2)]
+ model_name (str): name of the model (stripmap, spotlight or sar2sar)
+ processed_images_path (str): path of the folder where to store the cropped image in npy format
+ """
+ if model_name in ["spotlight", "stripmap"]:
+ image_real_part = image[:, :, 0]
+ image_imaginary_part = image[:, :, 1]
+
+ cropped_image_real_part = image_real_part[cropping_coordinates[0][1] * 8:cropping_coordinates[1][1] * 8,
+ cropping_coordinates[0][0] * 8:cropping_coordinates[1][0] * 8]
+ cropped_image_imaginary_part = image_imaginary_part[cropping_coordinates[0][1] * 8:cropping_coordinates[1][1] * 8,
+ cropping_coordinates[0][0] * 8:cropping_coordinates[1][0] * 8]
+
+ cropped_image_real_part = cropped_image_real_part.reshape(cropped_image_real_part.shape[0],
+ cropped_image_real_part.shape[1], 1)
+ cropped_image_imaginary_part = cropped_image_imaginary_part.reshape(cropped_image_imaginary_part.shape[0],
+ cropped_image_imaginary_part.shape[1], 1)
+
+ cropped_image = np.concatenate(
+ (cropped_image_real_part, cropped_image_imaginary_part), axis=2)
+ else:
+ cropped_image = image[cropping_coordinates[0][1] * 8:cropping_coordinates[1][1] * 8,
+ cropping_coordinates[0][0] * 8:cropping_coordinates[1][0] * 8]
+
+ cropped_image = cropped_image.reshape(
+ cropped_image.shape[0], cropped_image.shape[1], 1)
+
+ image_path_name = Path(image_path)
+ np.save(processed_images_path + '/' + image_path_name.stem +
+ '_cropped_to_denoise', cropped_image)
+
+
+
+
+[docs]
+def preprocess_sar_image_for_cropping(image: np.array, model_name: str) -> np.array:
+ """Preprocess image to use the cropping tool
+
+ Args:
+ image (numpy array): image from which we get cropping coordinates by using the cropping tool
+ model_name (str): name of the model (stripmap, spotlight or sar2sar)
+
+ Returns:
+ image (cv2 image): image preprocessed for cropping
+ """
+ if model_name in ["spotlight", "stripmap"]:
+ image_data_real = image[:, :, 0]
+ image_data_imag = image[:, :, 1]
+ image = np.squeeze(
+ np.sqrt(np.square(image_data_real) + np.square(image_data_imag)))
+
+ threshold = np.mean(image) + 3 * np.std(image)
+
+ image = np.clip(image, 0, threshold)
+ image = image / threshold * 255
+
+ image = Image.fromarray(image.astype('float64')).convert('L')
+ image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
+
+ return image
+
+
' + + '' + + _("Hide Search Matches") + + "
" + ) + ); + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords: () => { + document + .querySelectorAll("#searchbox .highlight-link") + .forEach((el) => el.remove()); + document + .querySelectorAll("span.highlighted") + .forEach((el) => el.classList.remove("highlighted")); + localStorage.removeItem("sphinx_highlight_terms") + }, + + initEscapeListener: () => { + // only install a listener if it is really needed + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return; + if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) { + SphinxHighlight.hideSearchWords(); + event.preventDefault(); + } + }); + }, +}; + +_ready(() => { + /* Do not call highlightSearchWords() when we are on the search page. + * It will highlight words from the *previous* search query. + */ + if (typeof Search === "undefined") SphinxHighlight.highlightSearchWords(); + SphinxHighlight.initEscapeListener(); +}); diff --git a/genindex.html b/genindex.html new file mode 100644 index 0000000..5cf54fe --- /dev/null +++ b/genindex.html @@ -0,0 +1,135 @@ + + + + + +
|
+
|
+