90
90
# into the implementation.
91
91
#
92
92
93
- from __future__ import print_function
94
93
import torch
95
94
import torch .nn as nn
96
95
import torch .nn .functional as F
99
98
import numpy as np
100
99
import matplotlib .pyplot as plt
101
100
102
- # NOTE: This is a hack to get around "User-agent" limitations when downloading MNIST datasets
103
- # see, https://github.com/pytorch/vision/issues/3497 for more information
104
- from six .moves import urllib
105
- opener = urllib .request .build_opener ()
106
- opener .addheaders = [('User-agent' , 'Mozilla/5.0' )]
107
- urllib .request .install_opener (opener )
108
-
109
101
110
102
######################################################################
111
103
# Implementation
141
133
epsilons = [0 , .05 , .1 , .15 , .2 , .25 , .3 ]
142
134
pretrained_model = "data/lenet_mnist_model.pth"
143
135
use_cuda = True
136
+ # Set random seed for reproducibility
137
+ torch .manual_seed (42 )
144
138
145
139
146
140
######################################################################
@@ -179,18 +173,18 @@ def forward(self, x):
179
173
test_loader = torch .utils .data .DataLoader (
180
174
datasets .MNIST ('../data' , train = False , download = True , transform = transforms .Compose ([
181
175
transforms .ToTensor (),
182
- ])),
176
+ ])),
183
177
batch_size = 1 , shuffle = True )
184
178
185
179
# Define what device we are using
186
180
print ("CUDA Available: " ,torch .cuda .is_available ())
187
- device = torch .device ("cuda" if ( use_cuda and torch .cuda .is_available () ) else "cpu" )
181
+ device = torch .device ("cuda" if use_cuda and torch .cuda .is_available () else "cpu" )
188
182
189
183
# Initialize the network
190
184
model = Net ().to (device )
191
185
192
186
# Load the pretrained model
193
- model .load_state_dict (torch .load (pretrained_model , map_location = 'cpu' ))
187
+ model .load_state_dict (torch .load (pretrained_model , weights_only = True , map_location = 'cpu' ))
194
188
195
189
# Set the model in evaluation mode. In this case this is for the Dropout layers
196
190
model .eval ()
@@ -290,7 +284,7 @@ def test( model, device, test_loader, epsilon ):
290
284
if final_pred .item () == target .item ():
291
285
correct += 1
292
286
# Special case for saving 0 epsilon examples
293
- if ( epsilon == 0 ) and ( len (adv_examples ) < 5 ) :
287
+ if epsilon == 0 and len (adv_examples ) < 5 :
294
288
adv_ex = perturbed_data .squeeze ().detach ().cpu ().numpy ()
295
289
adv_examples .append ( (init_pred .item (), final_pred .item (), adv_ex ) )
296
290
else :
@@ -301,7 +295,7 @@ def test( model, device, test_loader, epsilon ):
301
295
302
296
# Calculate final accuracy for this epsilon
303
297
final_acc = correct / float (len (test_loader ))
304
- print ("Epsilon: {}\t Test Accuracy = {} / {} = {}" . format ( epsilon , correct , len ( test_loader ), final_acc ) )
298
+ print (f "Epsilon: { epsilon } \t Test Accuracy = { correct } / { len ( test_loader ) } = { final_acc } " )
305
299
306
300
# Return the accuracy and an adversarial example
307
301
return final_acc , adv_examples
@@ -387,9 +381,9 @@ def test( model, device, test_loader, epsilon ):
387
381
plt .xticks ([], [])
388
382
plt .yticks ([], [])
389
383
if j == 0 :
390
- plt .ylabel ("Eps: {}" . format ( epsilons [i ]) , fontsize = 14 )
384
+ plt .ylabel (f "Eps: { epsilons [i ]} " , fontsize = 14 )
391
385
orig ,adv ,ex = examples [i ][j ]
392
- plt .title ("{ } -> {}" . format ( orig , adv ) )
386
+ plt .title (f" { orig } -> { adv } " )
393
387
plt .imshow (ex , cmap = "gray" )
394
388
plt .tight_layout ()
395
389
plt .show ()
0 commit comments