Skip to content

Commit 6b087cf

Browse files
onurtoreSvetlana Karslioglu
and
Svetlana Karslioglu
authored
Issue 2338 (#2458)
* Fix: amp_recipe.py fix * Fix: amp_recipe fixed * Fix: beginner/examples_autograd/polynomial_autograd.py * Fix tuning_guide * Fix nestedtensor * Fix polynomial tensor * Fix neural-style tutorial * Fix cpp_extension.rst * Fix nested style --------- Signed-off-by: Onur Berk Töre <onurberk_t@hotmail.com> Co-authored-by: Svetlana Karslioglu <svekars@fb.com>
1 parent 0ef9a65 commit 6b087cf

File tree

4 files changed

+24
-20
lines changed

4 files changed

+24
-20
lines changed

advanced_source/neural_style_tutorial.py

+7-6
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
developed by Leon A. Gatys, Alexander S. Ecker and Matthias Bethge.
1515
Neural-Style, or Neural-Transfer, allows you to take an image and
1616
reproduce it with a new artistic style. The algorithm takes three images,
17-
an input image, a content-image, and a style-image, and changes the input
17+
an input image, a content-image, and a style-image, and changes the input
1818
to resemble the content of the content-image and the artistic style of the style-image.
1919
2020
@@ -70,6 +70,7 @@
7070
# method is used to move tensors or modules to a desired device.
7171

7272
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
73+
torch.set_default_device(device)
7374

7475
######################################################################
7576
# Loading the Images
@@ -261,7 +262,7 @@ def forward(self, input):
261262
# network to evaluation mode using ``.eval()``.
262263
#
263264

264-
cnn = models.vgg19(pretrained=True).features.to(device).eval()
265+
cnn = models.vgg19(pretrained=True).features.eval()
265266

266267

267268

@@ -271,8 +272,8 @@ def forward(self, input):
271272
# We will use them to normalize the image before sending it into the network.
272273
#
273274

274-
cnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406]).to(device)
275-
cnn_normalization_std = torch.tensor([0.229, 0.224, 0.225]).to(device)
275+
cnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406])
276+
cnn_normalization_std = torch.tensor([0.229, 0.224, 0.225])
276277

277278
# create a module to normalize input image so we can easily put it in a
278279
# ``nn.Sequential``
@@ -308,7 +309,7 @@ def get_style_model_and_losses(cnn, normalization_mean, normalization_std,
308309
content_layers=content_layers_default,
309310
style_layers=style_layers_default):
310311
# normalization module
311-
normalization = Normalization(normalization_mean, normalization_std).to(device)
312+
normalization = Normalization(normalization_mean, normalization_std)
312313

313314
# just in order to have an iterable access to or list of content/style
314315
# losses
@@ -373,7 +374,7 @@ def get_style_model_and_losses(cnn, normalization_mean, normalization_std,
373374
#
374375
# ::
375376
#
376-
# input_img = torch.randn(content_img.data.size(), device=device)
377+
# input_img = torch.randn(content_img.data.size())
377378

378379
# add the original input image to the figure:
379380
plt.figure()

beginner_source/examples_autograd/polynomial_autograd.py

+7-7
Original file line numberDiff line numberDiff line change
@@ -18,23 +18,23 @@
1818
import math
1919

2020
dtype = torch.float
21-
device = torch.device("cpu")
22-
# device = torch.device("cuda:0") # Uncomment this to run on GPU
21+
device = "cuda" if torch.cuda.is_available() else "cpu"
22+
torch.set_default_device(device)
2323

2424
# Create Tensors to hold input and outputs.
2525
# By default, requires_grad=False, which indicates that we do not need to
2626
# compute gradients with respect to these Tensors during the backward pass.
27-
x = torch.linspace(-math.pi, math.pi, 2000, device=device, dtype=dtype)
27+
x = torch.linspace(-math.pi, math.pi, 2000, dtype=dtype)
2828
y = torch.sin(x)
2929

3030
# Create random Tensors for weights. For a third order polynomial, we need
3131
# 4 weights: y = a + b x + c x^2 + d x^3
3232
# Setting requires_grad=True indicates that we want to compute gradients with
3333
# respect to these Tensors during the backward pass.
34-
a = torch.randn((), device=device, dtype=dtype, requires_grad=True)
35-
b = torch.randn((), device=device, dtype=dtype, requires_grad=True)
36-
c = torch.randn((), device=device, dtype=dtype, requires_grad=True)
37-
d = torch.randn((), device=device, dtype=dtype, requires_grad=True)
34+
a = torch.randn((), dtype=dtype, requires_grad=True)
35+
b = torch.randn((), dtype=dtype, requires_grad=True)
36+
c = torch.randn((), dtype=dtype, requires_grad=True)
37+
d = torch.randn((), dtype=dtype, requires_grad=True)
3838

3939
learning_rate = 1e-6
4040
for t in range(2000):

recipes_source/recipes/amp_recipe.py

+9-6
Original file line numberDiff line numberDiff line change
@@ -76,11 +76,14 @@ def make_model(in_size, out_size, num_layers):
7676
num_batches = 50
7777
epochs = 3
7878

79+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
80+
torch.set_default_device(device)
81+
7982
# Creates data in default precision.
8083
# The same data is used for both default and mixed precision trials below.
8184
# You don't need to manually change inputs' ``dtype`` when enabling mixed precision.
82-
data = [torch.randn(batch_size, in_size, device="cuda") for _ in range(num_batches)]
83-
targets = [torch.randn(batch_size, out_size, device="cuda") for _ in range(num_batches)]
85+
data = [torch.randn(batch_size, in_size) for _ in range(num_batches)]
86+
targets = [torch.randn(batch_size, out_size) for _ in range(num_batches)]
8487

8588
loss_fn = torch.nn.MSELoss().cuda()
8689

@@ -116,7 +119,7 @@ def make_model(in_size, out_size, num_layers):
116119
for epoch in range(0): # 0 epochs, this section is for illustration only
117120
for input, target in zip(data, targets):
118121
# Runs the forward pass under ``autocast``.
119-
with torch.autocast(device_type='cuda', dtype=torch.float16):
122+
with torch.autocast(device_type=device, dtype=torch.float16):
120123
output = net(input)
121124
# output is float16 because linear layers ``autocast`` to float16.
122125
assert output.dtype is torch.float16
@@ -151,7 +154,7 @@ def make_model(in_size, out_size, num_layers):
151154

152155
for epoch in range(0): # 0 epochs, this section is for illustration only
153156
for input, target in zip(data, targets):
154-
with torch.autocast(device_type='cuda', dtype=torch.float16):
157+
with torch.autocast(device_type=device, dtype=torch.float16):
155158
output = net(input)
156159
loss = loss_fn(output, target)
157160

@@ -184,7 +187,7 @@ def make_model(in_size, out_size, num_layers):
184187
start_timer()
185188
for epoch in range(epochs):
186189
for input, target in zip(data, targets):
187-
with torch.autocast(device_type='cuda', dtype=torch.float16, enabled=use_amp):
190+
with torch.autocast(device_type=device, dtype=torch.float16, enabled=use_amp):
188191
output = net(input)
189192
loss = loss_fn(output, target)
190193
scaler.scale(loss).backward()
@@ -202,7 +205,7 @@ def make_model(in_size, out_size, num_layers):
202205

203206
for epoch in range(0): # 0 epochs, this section is for illustration only
204207
for input, target in zip(data, targets):
205-
with torch.autocast(device_type='cuda', dtype=torch.float16):
208+
with torch.autocast(device_type=device, dtype=torch.float16):
206209
output = net(input)
207210
loss = loss_fn(output, target)
208211
scaler.scale(loss).backward()

recipes_source/recipes/tuning_guide.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -357,7 +357,7 @@ def fused_gelu(x):
357357
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
358358
# Instead of calling ``torch.rand(size).cuda()`` to generate a random tensor,
359359
# produce the output directly on the target device:
360-
# ``torch.rand(size, device=torch.device('cuda'))``.
360+
# ``torch.rand(size, device='cuda')``.
361361
#
362362
# This is applicable to all functions which create new tensors and accept
363363
# ``device`` argument:

0 commit comments

Comments
 (0)