Skip to content

Commit

Permalink
Merge pull request #8 from AIRI-Institute/notebook
Browse files Browse the repository at this point in the history
Notebook
  • Loading branch information
KovalenkoAE authored Aug 7, 2024
2 parents 74a7228 + 0666026 commit a55bc71
Show file tree
Hide file tree
Showing 2 changed files with 34 additions and 5 deletions.
35 changes: 32 additions & 3 deletions experiments.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -430,7 +430,7 @@
}
],
"source": [
"attacker = NoAttacker(model, eps=0.05) # no attack\n",
"attacker = NoAttacker(model=model, eps=0.05) # no attack\n",
"defender = NoDefenceDefender(model) # no defense\n",
"acc = accuracy(attacker, defender, step_size=1)\n",
"print(f'Accuracy: {acc:.4f}')"
Expand Down Expand Up @@ -878,7 +878,26 @@
"unprotected_acc = []\n",
"defender = NoDefenceDefender(model)\n",
"for eps in eps_space:\n",
" attacker = FGSMAttacker(model, eps=eps)\n",
" attacker = FGSMAttacker(model=model, eps=eps)\n",
" # Noise attack:\n",
" #attacker = NoiseAttacker(model=model, eps=eps)\n",
" # PGD attack:\n",
" #attacker = PGDAttacker(model=model, eps=eps, num_steps=10)\n",
" # Carlini and Wagner attack:\n",
" #attacker = CarliniWagnerAttacker(model=model, eps=eps, num_steps=5, lr=0,01)\n",
" # Deep Fool attack:\n",
" #attacker = DeepFoolAttacker(model=model, eps=eps, num_steps=10)\n",
" # Distillation black box attack:\n",
" #student = MLP(\n",
" # window_size=window_size,\n",
" # step_size=1,\n",
" # device='cuda',\n",
" # hidden_dim=512,\n",
" # num_epochs=20,\n",
" # batch_size=512,\n",
" # lr=0.001,\n",
" # )\n",
" #attacker = DistillationBlackBoxAttacker(model=model, eps=eps, student=student)\n",
" unprotected_acc.append(accuracy(attacker, defender, step_size=10))"
]
},
Expand Down Expand Up @@ -1157,7 +1176,17 @@
],
"source": [
"# Adversarial training on quantized data:\n",
"defender = ATQDefender(model)"
"defender = ATQDefender(model=model, qbit=8)\n",
"# Adversarial training:\n",
"#defender = AdversarialTrainingDefender(model=model, attacker=None, lambd=1)\n",
"# Defense by quantization:\n",
"#defender = QuantizationDefender(model=model, qbit=5)\n",
"# Desense by Distillation:\n",
"#defender = DistillationDefender(model=model, temp=100)\n",
"# Defense by regularization:\n",
"#defender = RegularizationDefender(model=model, regularization='l2', lambd=1., h=0.01)\n",
"# Defense by autoencoder:\n",
"#defender = AutoEncoderDefender(model=model, lr=0.001, training_attacker=None, adv_coeff=1, num_epochs=10, bottleneck_size=3)"
]
},
{
Expand Down
4 changes: 2 additions & 2 deletions fdd_defense/defenders/atonquant.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import numpy as np
import random
from fdd_defense.defenders.base import BaseDefender
from fdd_defense.attackers import FGSMAttacker
from fdd_defense.attackers import FGSMAttacker, PGDAttacker
from fdd_defense.utils import weight_reset
from tqdm.auto import trange, tqdm
import torch
Expand All @@ -25,7 +25,7 @@ def __init__(self, model, qbit=8):
losses = []
for ts, _, label in tqdm(self.model.dataloader, desc='Steps ...', leave=False):
epsilon = random.choice(self.eps)
attacker = FGSMAttacker(model, eps=epsilon)
attacker = PGDAttacker(model, eps=epsilon)
batch_size = ts.shape[0]
adv_ts = attacker.attack(ts, label)
label = torch.LongTensor(label).to(self.model.device)
Expand Down

0 comments on commit a55bc71

Please sign in to comment.