复现各种对抗攻击方法

import torch
from torch import nn
import matplotlib.pyplot as plt

def clip(x, x_, eps):
    mask = torch.ones_like(x)
    lower_clip = torch.max(torch.stack([mask * 0, x - eps, x_]), dim=0)[0]
    return torch.min(torch.stack([mask, x + eps, lower_clip]), dim=0)[0]


def train_adv_examples(
        model: nn.Module, loss_fct: callable, adv_examples: torch.Tensor, adv_targets: torch.Tensor,
        epochs: int = 10, alpha: float = 1.0, clip_eps: float = (1 / 255) * 8, do_clip: bool = False, minimize: bool = False
):
    model.eval()

    for e in range(epochs):
        adv_examples.requires_grad = True
        model.zero_grad()

        adv_out = model(adv_examples)
        loss = loss_fct(adv_out, adv_targets)
        loss.backward()

        adv_grad = adv_examples.grad
        adv_examples = adv_examples.detach()

        direction = -1 if minimize else 1
        adv_sign_grad = adv_examples + direction * alpha * adv_grad.sign()

        if do_clip:
            adv_examples = clip(adv_examples, adv_sign_grad, clip_eps)
        else:
            adv_examples = adv_sign_grad

    return adv_examples


def train_adv_fgsm(
        model: nn.Module, loss_fct: callable, adv_examples: torch.Tensor, adv_targets: torch.Tensor,
        epochs: int = 10, alpha: float = 0.1
):
    return train_adv_examples(
        model, loss_fct, adv_examples, adv_targets,
        epochs=epochs, alpha=alpha, do_clip=False, minimize=False
    )


def train_adv_bim(
        model: nn.Module, loss_fct: callable, adv_examples: torch.Tensor, adv_targets: torch.Tensor,
        epochs: int = 10, alpha: float = 1.0, clip_eps: float = (1 / 255) * 8
):
    return train_adv_examples(
        model, loss_fct, adv_examples, adv_targets,
        epochs=epochs, alpha=alpha, do_clip=True, clip_eps=clip_eps, minimize=False
    )

def train_adv_cw(
        model: nn.Module, adv_examples: torch.Tensor, adv_target: int = 3, iteration: int = 5000, lr: float = 0.01, c:float = 1
):
    def create_single_adv(model: nn.Module, adv_examples: torch.Tensor, adv_target: int = 3, iteration: int = 5000, lr: float = 0.01, c:float = 1):
        box_max = 1
        box_min = 0
        box_mul  = (box_max - box_min)/2
        box_plus = (box_min + box_max)/2
        modifier = torch.zeros_like(adv_examples, requires_grad=True)
        l2dist_list = []
        loss2_list = []
        loss_list = []
        model.eval()
        adv_examples_c = torch.arctanh((adv_examples - box_plus)/box_mul * 0.99999)
        for i in range(iteration):
            new_example = torch.tanh(adv_examples + modifier)*box_mul + box_plus
            l2dist = torch.dist(new_example, adv_examples, p=2)
            output = model(new_example)
            #设定攻击目标
            onehot = torch.zeros_like(output)
            onehot[:, adv_target] = 1
            others = torch.max((1-onehot)*output, dim=1).values
            real = torch.sum(output*onehot, dim=1)
            loss2 = torch.sum(torch.maximum(torch.zeros_like(others) - 0.01, others - real))
            loss = l2dist + c*loss2

            l2dist_list.append(l2dist)
            loss2_list.append(loss2)
            loss_list.append(loss)

            if modifier.grad is not None:
                modifier.grad.zero_()
            loss.backward()

            modifier = (modifier - modifier.grad*lr).detach()
            modifier.requires_grad = True

        def plot_loss(loss, loss_name):
            plt.figure()
            plt.plot([i for i in range(len(loss))], [i.detach().numpy() for i in loss])
            # plt.yticks(np.arange(1,50,0.5))
            plt.xlabel('iteration times')
            plt.ylabel(loss_name)
            plt.show()

        plot_loss(l2dist_list, 'l2 distance loss')
        plot_loss(loss2_list, 'category loss')
        plot_loss(loss_list, 'all loss')
        new_img = torch.tanh(adv_examples + modifier) * box_mul + box_plus
        return new_img
    adv_list = []
    # for i in adv_examples:
    return create_single_adv(model,adv_examples,adv_target,iteration,lr)

def train_adv_least_likely(
        model: nn.Module, loss_fct: callable, adv_examples: torch.Tensor,
        epochs: int = 10, alpha: float = 0.1, clip_eps: float = (1 / 255) * 8
):
    model.eval()
    adv_targets = model(adv_examples).argmin(dim=1).detach()
    return train_adv_examples(
        model, loss_fct, adv_examples, adv_targets,
        epochs=epochs, alpha=alpha, do_clip=True, clip_eps=clip_eps, minimize=True
    )

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值