Untitled

mail@pastecode.io avatar
unknown
plain_text
a month ago
3.9 kB
3
Indexable
Never
import torch
import os
import sys
import numpy as np
from scipy.optimize import minimize
from src import data_preparation
import matplotlib.pyplot as plt
from models.small_cnn import SmallCNN
import config

sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))

from src import data_preparation
from src import utils

dataset = data_preparation.load_dataset()

# Carica il modello
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = SmallCNN().to(device)
model.load_state_dict(torch.load(
    os.path.join(os.path.dirname(__file__), '..', 'checkpoints', 'smallcnn_regular', 'model-nn-epoch10.pt'),
    map_location=device
))

def C(x):
    with torch.no_grad():
        x = x.to(device)
        x_reshaped = x.view(1, 1, 28, 28)
        return model(x_reshaped).cpu().numpy()

def g(x_k):
    I = torch.eye(10, device=device)  # matrice (10, 10)
    ones = torch.ones(10, device=device)  # vettore riga (1,10)
    ej = torch.zeros(10, device=device)  # vettore riga (1,10)
    ej[config.target_class] = 1
    C_xk = model(x_k.view(1, 1, 28, 28).to(device))  # vettore riga (1,10)
    g = (I - torch.mul(ej.T, ones)) @ C_xk.T  # g: Rn --> R10
    return g.cpu().detach()

def objective(x, x_k, g_val, tau):
    x_torch = torch.tensor(x, dtype=torch.float32, device=device).view(1, 28, 28)
    x_k_torch = torch.tensor(x_k, dtype=torch.float32, device=device).view(1, 28, 28)
    g_val_torch = torch.tensor(g_val, dtype=torch.float32, device=device)
    
    termf = utils.f(x_torch, x_k_torch)
    termp = tau * torch.sum(torch.max(torch.zeros_like(g_val_torch), g_val_torch)**2)
    objective = termf + termp
    # objective_gradient = objective_gradient_fn(x_torch, x_k_torch, g_val_torch, tau)
    return objective.cpu().item()

def objective_gradient_fn(x, x_k, g_val, tau):
    f_grad = utils.f_gradient(x, x_k)
    g_grad = torch.autograd.functional.jacobian(g, x)
    p_grad = 2 * tau * torch.max(torch.zeros_like(g_val), g_val) * g_grad
    return f_grad + p_grad

def spm(x_origin, target_class, tau=1, tau_increase_factor=10, max_iter=100):
    x_adv = x_origin.clone().detach().to(device)
    x_origin_np = x_origin.clone().detach().cpu().numpy().flatten()
    
    for iteration in range(max_iter):
        logits = C(x_adv)
        pred_class = np.argmax(logits)
        print(f"Predicted class: {pred_class}")
        #logits print
        print(logits)
        if pred_class == target_class:
            print(f"Success: Adversarial example found after {iteration + 1} iterations.")
            break

        # Calcola g(x_k)
        g_val = g(x_adv).numpy()

        if(iteration % 10 == 0):  
            # Stampa x_target, x_origin_clone, f_grad, g_val, g_grad
            print(f"Target class: {target_class}")
            print(f"Original image:")
            utils.show_image(x_origin.cpu())  # Sposta il tensore sulla CPU per la visualizzazione
            # utils.plot_tensor(x_origin)

            print(f"Adversarial image:")
            utils.show_image(x_adv.cpu())  # Sposta il tensore sulla CPU per la visualizzazione
            # utils.plot_tensor(x_adv)

        
        # Esegui l'ottimizzazione per trovare d ottimale
        x_adv_np = x_adv.clone().detach().cpu().numpy().flatten()
        res = minimize(objective, x_adv_np, args=(x_origin_np, g_val, tau), method='L-BFGS-B')
        # res = minimize(objective, x_adv_np, args=(x_origin_np, g_val, tau), method='L-BFGS-B', jac=True)
        x_adv = torch.tensor(res.x, dtype=torch.float32, device=device).view(28, 28)



        # Aumenta tau
        tau *= tau_increase_factor
        print(f"Tau increased to {tau}")

    if pred_class != target_class:
        print("Failed to find an adversarial example within the maximum number of iterations.")
    
    return x_adv
Leave a Comment