diff --git a/pix2pixHD_attack/models/pix2pixHD_model.py b/pix2pixHD_attack/models/pix2pixHD_model.py index dd99e48..daa2d62 100755 --- a/pix2pixHD_attack/models/pix2pixHD_model.py +++ b/pix2pixHD_attack/models/pix2pixHD_model.py @@ -241,7 +241,7 @@ class Pix2PixHDModel(BaseModel): else: input_concat = input_label - input_adv = torch.clamp(input_concat + perturb * 1000, min=-1, max=1) + input_adv = torch.clamp(input_concat + perturb, min=-1, max=1) with torch.no_grad(): fake_image = self.netG.forward(input_adv) diff --git a/pix2pixHD_attack/test.py b/pix2pixHD_attack/test.py index 6c5296c..198a538 100755 --- a/pix2pixHD_attack/test.py +++ b/pix2pixHD_attack/test.py @@ -52,14 +52,15 @@ for i, data in enumerate(dataset): exit(0) minibatch = 1 - if i == 0: - adv_image, perturb = model.attack(data['label'], data['inst'], data['image']) + # if i == 0: + # adv_image, perturb = model.attack(data['label'], data['inst'], data['image']) if opt.engine: generated = run_trt_engine(opt.engine, minibatch, [data['label'], data['inst']]) elif opt.onnx: generated = run_onnx(opt.onnx, opt.data_type, minibatch, [data['label'], data['inst']]) else: # generated = model.inference(data['label'], data['inst'], data['image']) + adv_image, perturb = model.attack(data['label'], data['inst'], data['image']) generated, adv_img = model.inference_attack(data['label'], data['inst'], data['image'], perturb) visuals = OrderedDict([('input_label', util.tensor2label(adv_img.data[0], opt.label_nc)), diff --git a/pix2pixHD_attack/util/attacks.py b/pix2pixHD_attack/util/attacks.py index 54db657..b6296ff 100644 --- a/pix2pixHD_attack/util/attacks.py +++ b/pix2pixHD_attack/util/attacks.py @@ -7,7 +7,7 @@ import torch import torch.nn as nn class LinfPGDAttack(object): - def __init__(self, model=None, epsilon=0.2, k=1, a=0.01): + def __init__(self, model=None, epsilon=0.05, k=1, a=0.05): self.model = model self.epsilon = epsilon self.k = k