From 685f31b05203558fdd7e973639ebb84605aca76f Mon Sep 17 00:00:00 2001 From: devaletanmay Date: Wed, 25 Aug 2021 17:22:39 +0530 Subject: [PATCH 1/3] Visual Perturbation temp --- code_soup/common/vision/perturbations.py | 47 ++++++++++++++++++++---- 1 file changed, 40 insertions(+), 7 deletions(-) diff --git a/code_soup/common/vision/perturbations.py b/code_soup/common/vision/perturbations.py index c68bc3c..0ac5671 100644 --- a/code_soup/common/vision/perturbations.py +++ b/code_soup/common/vision/perturbations.py @@ -3,13 +3,19 @@ import numpy as np import torch +import torch.nn as nn + +from math import log10 from code_soup.common.perturbation import Perturbation class VisualPerturbation(Perturbation): """ - Docstring for VisualPerturbations + An abstract method for various Visual Perturbation Metrics + Methods + __init__(self, original : Union[np.ndarray, torch.Tensor], perturbed: Union[np.ndarray, torch.Tensor]) + - init method """ def __init__( @@ -21,15 +27,42 @@ def __init__( Docstring #Automatically cast to Tensor using the torch.from_numpy() in the __init__ using if """ - raise NotImplementedError - def calculate_LPNorm(self, p: Union[int, str]): - raise NotImplementedError + if type(original) == torch.Tensor: + self.original = original + else: + self.original = torch.from_numpy(original) + print(self.original.shape) - def calculate_PSNR(self): - raise NotImplementedError + if type(perturbed) == torch.Tensor: + self.perturbed = perturbed + else: + self.perturbed = torch.from_numpy(perturbed) + + def flatten(self, array : torch.tensor) -> torch.Tensor: + return array.flatten() + + def totensor(self, array : np.ndarray) -> torch.Tensor: + return torch.from_numpy(array) + + def subtract(self,original : torch.Tensor, perturbed : torch.Tensor) -> torch.Tensor: + return torch.sub(original, perturbed) + + def calculate_LPNorm(self, p: Union[int, str]) -> float: + if p == 'inf': + return torch.linalg.vector_norm(self.flatten(self.subtract(self.original,self.perturbed)), ord = float('inf')).item() + elif p == 'fro': + return self.calculate_LPNorm(2) + else: + return torch.linalg.norm(self.flatten(self.subtract(self.original,self.perturbed)), ord = p).item() + + def calculate_PSNR(self) -> float: + return 20 * log10(255.0/self.calculate_RMSE()) - def calculate_RMSE(self): + def calculate_RMSE(self) -> float: + # return torch.sqrt(torch.mean(self.subtract(self.flatten(self.original), self.flatten(self.perturbed))**2)).item() + # loss = nn.MSELoss() + # return (loss(self.original, self.perturbed)**0.5).item() raise NotImplementedError def calculate_SAM(self): From b4cbca484d5db76dcade9827baf5d51d80755ce3 Mon Sep 17 00:00:00 2001 From: devaletanmay <56354045+devaletanmay@users.noreply.github.com> Date: Mon, 30 Aug 2021 19:31:37 +0530 Subject: [PATCH 2/3] Changed RMSE and PSNR --- code_soup/common/vision/perturbations.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/code_soup/common/vision/perturbations.py b/code_soup/common/vision/perturbations.py index 0ac5671..bdc85cf 100644 --- a/code_soup/common/vision/perturbations.py +++ b/code_soup/common/vision/perturbations.py @@ -57,13 +57,11 @@ def calculate_LPNorm(self, p: Union[int, str]) -> float: return torch.linalg.norm(self.flatten(self.subtract(self.original,self.perturbed)), ord = p).item() def calculate_PSNR(self) -> float: - return 20 * log10(255.0/self.calculate_RMSE()) + return 20 * log10(1.0/self.calculate_RMSE()) def calculate_RMSE(self) -> float: - # return torch.sqrt(torch.mean(self.subtract(self.flatten(self.original), self.flatten(self.perturbed))**2)).item() - # loss = nn.MSELoss() - # return (loss(self.original, self.perturbed)**0.5).item() - raise NotImplementedError + loss = nn.MSELoss() + return (loss(self.original, self.perturbed)**0.5).item() def calculate_SAM(self): raise NotImplementedError From 7aa48e78c7c710d521237fbcfb9512076408fe4a Mon Sep 17 00:00:00 2001 From: abhi-glitchhg <72816663+abhi-glitchhg@users.noreply.github.com> Date: Mon, 30 Aug 2021 20:08:43 +0530 Subject: [PATCH 3/3] Updated gan.py (#74) * Update gan.py added zero_grad() method on discriminator; and now step method returns Generator loss * Update gan.py --- code_soup/ch5/models/gan.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/code_soup/ch5/models/gan.py b/code_soup/ch5/models/gan.py index 71e62c0..d8236fb 100644 --- a/code_soup/ch5/models/gan.py +++ b/code_soup/ch5/models/gan.py @@ -169,6 +169,8 @@ def step(self, data: torch.Tensor) -> Tuple: Discriminator loss D_G_z2: Average discriminator outputs for the all fake batch after updating discriminator + errG: + Generator loss """ real_image, _ = data real_image = real_image.to(self.device) @@ -176,6 +178,7 @@ def step(self, data: torch.Tensor) -> Tuple: label = torch.full( (batch_size,), self.real_label, dtype=torch.float, device=self.device ) + self.discriminator.zero_grad() # Forward pass real batch through D output = self.discriminator(real_image).view(-1) # Calculate loss on all-real batch @@ -211,4 +214,4 @@ def step(self, data: torch.Tensor) -> Tuple: D_G_z2 = output.mean().item() # Update G self.generator.optimizer.step() - return D_x, D_G_z1, errD, D_G_z2 + return D_x, D_G_z1, errD, D_G_z2, errG