diff --git a/experiments.ipynb b/experiments.ipynb index 4d2f493..0a9489a 100644 --- a/experiments.ipynb +++ b/experiments.ipynb @@ -430,7 +430,7 @@ } ], "source": [ - "attacker = NoAttacker(model, eps=0.05) # no attack\n", + "attacker = NoAttacker(model=model, eps=0.05) # no attack\n", "defender = NoDefenceDefender(model) # no defense\n", "acc = accuracy(attacker, defender, step_size=1)\n", "print(f'Accuracy: {acc:.4f}')" @@ -878,7 +878,26 @@ "unprotected_acc = []\n", "defender = NoDefenceDefender(model)\n", "for eps in eps_space:\n", - " attacker = FGSMAttacker(model, eps=eps)\n", + " attacker = FGSMAttacker(model=model, eps=eps)\n", + " # Noise attack:\n", + " #attacker = NoiseAttacker(model=model, eps=eps)\n", + " # PGD attack:\n", + " #attacker = PGDAttacker(model=model, eps=eps, num_steps=10)\n", + " # Carlini and Wagner attack:\n", + " #attacker = CarliniWagnerAttacker(model=model, eps=eps, num_steps=5, lr=0,01)\n", + " # Deep Fool attack:\n", + " #attacker = DeepFoolAttacker(model=model, eps=eps, num_steps=10)\n", + " # Distillation black box attack:\n", + " #student = MLP(\n", + " # window_size=window_size,\n", + " # step_size=1,\n", + " # device='cuda',\n", + " # hidden_dim=512,\n", + " # num_epochs=20,\n", + " # batch_size=512,\n", + " # lr=0.001,\n", + " # )\n", + " #attacker = DistillationBlackBoxAttacker(model=model, eps=eps, student=student)\n", " unprotected_acc.append(accuracy(attacker, defender, step_size=10))" ] }, @@ -1157,7 +1176,17 @@ ], "source": [ "# Adversarial training on quantized data:\n", - "defender = ATQDefender(model)" + "defender = ATQDefender(model=model, qbit=8)\n", + "# Adversarial training:\n", + "#defender = AdversarialTrainingDefender(model=model, attacker=None, lambd=1)\n", + "# Defense by quantization:\n", + "#defender = QuantizationDefender(model=model, qbit=5)\n", + "# Desense by Distillation:\n", + "#defender = DistillationDefender(model=model, temp=100)\n", + "# Defense by regularization:\n", + "#defender = RegularizationDefender(model=model, regularization='l2', lambd=1., h=0.01)\n", + "# Defense by autoencoder:\n", + "#defender = AutoEncoderDefender(model=model, lr=0.001, training_attacker=None, adv_coeff=1, num_epochs=10, bottleneck_size=3)" ] }, { diff --git a/fdd_defense/defenders/atonquant.py b/fdd_defense/defenders/atonquant.py index a5e6cc4..16c6944 100644 --- a/fdd_defense/defenders/atonquant.py +++ b/fdd_defense/defenders/atonquant.py @@ -1,7 +1,7 @@ import numpy as np import random from fdd_defense.defenders.base import BaseDefender -from fdd_defense.attackers import FGSMAttacker +from fdd_defense.attackers import FGSMAttacker, PGDAttacker from fdd_defense.utils import weight_reset from tqdm.auto import trange, tqdm import torch @@ -25,7 +25,7 @@ def __init__(self, model, qbit=8): losses = [] for ts, _, label in tqdm(self.model.dataloader, desc='Steps ...', leave=False): epsilon = random.choice(self.eps) - attacker = FGSMAttacker(model, eps=epsilon) + attacker = PGDAttacker(model, eps=epsilon) batch_size = ts.shape[0] adv_ts = attacker.attack(ts, label) label = torch.LongTensor(label).to(self.model.device)