Skip to content

Commit 3aa5d06

Browse files
committed
Remove unnecessary changes
1 parent 4c3dd3e commit 3aa5d06

File tree

1 file changed

+15
-18
lines changed

1 file changed

+15
-18
lines changed

intermediate_source/tensorboard_profiler_tutorial.py

+15-18
Original file line numberDiff line numberDiff line change
@@ -57,19 +57,18 @@
5757
# Transform it to the desired format and use ``DataLoader`` to load each batch.
5858

5959
transform = T.Compose(
60-
[T.Resize(224), T.ToTensor(), T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
61-
)
62-
train_set = torchvision.datasets.CIFAR10(
63-
root="./data", train=True, download=True, transform=transform
64-
)
60+
[T.Resize(224),
61+
T.ToTensor(),
62+
T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
63+
train_set = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
6564
train_loader = torch.utils.data.DataLoader(train_set, batch_size=32, shuffle=True)
6665

6766
######################################################################
6867
# Next, create Resnet model, loss function, and optimizer objects.
6968
# To run on GPU, move model and loss to GPU device.
7069

7170
device = torch.device("cuda:0")
72-
model = torchvision.models.resnet18(weights="IMAGENET1K_V1").cuda(device)
71+
model = torchvision.models.resnet18(weights='IMAGENET1K_V1').cuda(device)
7372
criterion = torch.nn.CrossEntropyLoss().cuda(device)
7473
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
7574
model.train()
@@ -78,7 +77,6 @@
7877
######################################################################
7978
# Define the training step for each batch of input data.
8079

81-
8280
def train(data):
8381
inputs, labels = data[0].to(device=device), data[1].to(device=device)
8482
outputs = model(inputs)
@@ -122,11 +120,11 @@ def train(data):
122120
# clicking a stack frame will navigate to the specific code line.
123121

124122
with torch.profiler.profile(
125-
schedule=torch.profiler.schedule(wait=1, warmup=1, active=3, repeat=1),
126-
on_trace_ready=torch.profiler.tensorboard_trace_handler("./log/resnet18"),
127-
record_shapes=True,
128-
profile_memory=True,
129-
with_stack=True,
123+
schedule=torch.profiler.schedule(wait=1, warmup=1, active=3, repeat=1),
124+
on_trace_ready=torch.profiler.tensorboard_trace_handler('./log/resnet18'),
125+
record_shapes=True,
126+
profile_memory=True,
127+
with_stack=True
130128
) as prof:
131129
for step, batch_data in enumerate(train_loader):
132130
prof.step() # Need to call this at each step to notify profiler of steps' boundary.
@@ -137,11 +135,10 @@ def train(data):
137135
######################################################################
138136
# Alternatively, the following non-context manager start/stop is supported as well.
139137
prof = torch.profiler.profile(
140-
schedule=torch.profiler.schedule(wait=1, warmup=1, active=3, repeat=1),
141-
on_trace_ready=torch.profiler.tensorboard_trace_handler("./log/resnet18"),
142-
record_shapes=True,
143-
with_stack=True,
144-
)
138+
schedule=torch.profiler.schedule(wait=1, warmup=1, active=3, repeat=1),
139+
on_trace_ready=torch.profiler.tensorboard_trace_handler('./log/resnet18'),
140+
record_shapes=True,
141+
with_stack=True)
145142
prof.start()
146143
for step, batch_data in enumerate(train_loader):
147144
prof.step()
@@ -359,7 +356,7 @@ def train(data):
359356
# ``aten::empty`` to allocate memory. For example, ``aten::ones`` is implemented as ``aten::empty`` followed by an
360357
# ``aten::fill_``. Solely display the operator name as ``aten::empty`` is of little help. It will be shown as
361358
# ``aten::ones (aten::empty)`` in this special case. The "Allocation Time", "Release Time" and "Duration"
362-
# columns' data might be missing if the event occurs outside of the time range.
359+
# columns' data might be missing if the event occurs outside of the time range.
363360
#
364361
# In the memory statistics table, the "Size Increase" column sums up all allocation size and minus all the memory
365362
# release size, that is, the net increase of memory usage after this operator. The "Self Size Increase" column is

0 commit comments

Comments
 (0)