Skip to content

Commit cdef251

Browse files
author
Svetlana Karslioglu
authored
Merge branch 'main' into patch-3
2 parents 9a04ceb + e1ec4bd commit cdef251

10 files changed

+52
-35
lines changed

.github/PULL_REQUEST_TEMPLATE.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -8,4 +8,4 @@ Fixes #ISSUE_NUMBER
88
- [ ] The issue that is being fixed is referred in the description (see above "Fixes #ISSUE_NUMBER")
99
- [ ] Only one issue is addressed in this pull request
1010
- [ ] Labels from the issue that this PR is fixing are added to this pull request
11-
- [ ] No unnessessary issues are included into this pull request.
11+
- [ ] No unnecessary issues are included into this pull request.

.github/scripts/docathon-label-sync.py

+3
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,9 @@ def main():
1414
repo = g.get_repo(f'{repo_owner}/{repo_name}')
1515
pull_request = repo.get_pull(pull_request_number)
1616
pull_request_body = pull_request.body
17+
# PR without description
18+
if pull_request_body is None:
19+
return
1720

1821
# get issue number from the PR body
1922
if not re.search(r'#\d{1,5}', pull_request_body):
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
Finetuning Torchvision Models
2+
=============================
3+
4+
This tutorial has been moved to https://pytorch.org/tutorials/intermediate/torchvision_tutorial.html
5+
6+
It will redirect in 3 seconds.
7+
8+
.. raw:: html
9+
10+
<meta http-equiv="Refresh" content="3; url='https://pytorch.org/tutorials/intermediate/torchvision_tutorial.html'" />

beginner_source/introyt/tensorboardyt_tutorial.py

+7
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,13 @@
6464
# PyTorch TensorBoard support
6565
from torch.utils.tensorboard import SummaryWriter
6666

67+
# In case you are using an environment that has TensorFlow installed,
68+
# such as Google Colab, uncomment the following code to avoid
69+
# a bug with saving embeddings to your TensorBoard directory
70+
71+
# import tensorflow as tf
72+
# import tensorboard as tb
73+
# tf.io.gfile = tb.compat.tensorflow_stub.io.gfile
6774

6875
######################################################################
6976
# Showing Images in TensorBoard

beginner_source/nn_tutorial.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -795,8 +795,7 @@ def __len__(self):
795795
return len(self.dl)
796796

797797
def __iter__(self):
798-
batches = iter(self.dl)
799-
for b in batches:
798+
for b in self.dl:
800799
yield (self.func(*b))
801800

802801
train_dl, valid_dl = get_data(train_ds, valid_ds, bs)

beginner_source/transformer_tutorial.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,7 @@ def forward(self, x: Tensor) -> Tensor:
149149
# into ``batch_size`` columns. If the data does not divide evenly into
150150
# ``batch_size`` columns, then the data is trimmed to fit. For instance, with
151151
# the alphabet as the data (total length of 26) and ``batch_size=4``, we would
152-
# divide the alphabet into 4 sequences of length 6:
152+
# divide the alphabet into sequences of length 6, resulting in 4 of such sequences.
153153
#
154154
# .. math::
155155
# \begin{bmatrix}

intermediate_source/mario_rl_tutorial.py

+6-5
Original file line numberDiff line numberDiff line change
@@ -711,17 +711,18 @@ def record(self, episode, epsilon, step):
711711
f"{datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S'):>20}\n"
712712
)
713713

714-
for metric in ["ep_rewards", "ep_lengths", "ep_avg_losses", "ep_avg_qs"]:
715-
plt.plot(getattr(self, f"moving_avg_{metric}"))
716-
plt.savefig(getattr(self, f"{metric}_plot"))
714+
for metric in ["ep_lengths", "ep_avg_losses", "ep_avg_qs", "ep_rewards"]:
717715
plt.clf()
716+
plt.plot(getattr(self, f"moving_avg_{metric}"), label=f"moving_avg_{metric}")
717+
plt.legend()
718+
plt.savefig(getattr(self, f"{metric}_plot"))
718719

719720

720721
######################################################################
721722
# Let’s play!
722723
# """""""""""""""
723724
#
724-
# In this example we run the training loop for 10 episodes, but for Mario to truly learn the ways of
725+
# In this example we run the training loop for 40 episodes, but for Mario to truly learn the ways of
725726
# his world, we suggest running the loop for at least 40,000 episodes!
726727
#
727728
use_cuda = torch.cuda.is_available()
@@ -735,7 +736,7 @@ def record(self, episode, epsilon, step):
735736

736737
logger = MetricLogger(save_dir)
737738

738-
episodes = 10
739+
episodes = 40
739740
for e in range(episodes):
740741

741742
state = env.reset()

intermediate_source/tensorboard_profiler_tutorial.py

+7-7
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
-----
1919
To install ``torch`` and ``torchvision`` use the following command:
2020
21-
::
21+
.. code-block::
2222
2323
pip install torch torchvision
2424
@@ -160,23 +160,23 @@ def train(data):
160160
#
161161
# Install PyTorch Profiler TensorBoard Plugin.
162162
#
163-
# ::
163+
# .. code-block::
164164
#
165165
# pip install torch_tb_profiler
166166
#
167167

168168
######################################################################
169169
# Launch the TensorBoard.
170170
#
171-
# ::
171+
# .. code-block::
172172
#
173173
# tensorboard --logdir=./log
174174
#
175175

176176
######################################################################
177177
# Open the TensorBoard profile URL in Google Chrome browser or Microsoft Edge browser.
178178
#
179-
# ::
179+
# .. code-block::
180180
#
181181
# http://localhost:6006/#pytorch_profiler
182182
#
@@ -287,7 +287,7 @@ def train(data):
287287
# In this example, we follow the "Performance Recommendation" and set ``num_workers`` as below,
288288
# pass a different name such as ``./log/resnet18_4workers`` to ``tensorboard_trace_handler``, and run it again.
289289
#
290-
# ::
290+
# .. code-block::
291291
#
292292
# train_loader = torch.utils.data.DataLoader(train_set, batch_size=32, shuffle=True, num_workers=4)
293293
#
@@ -316,7 +316,7 @@ def train(data):
316316
#
317317
# You can try it by using existing example on Azure
318318
#
319-
# ::
319+
# .. code-block::
320320
#
321321
# pip install azure-storage-blob
322322
# tensorboard --logdir=https://torchtbprofiler.blob.core.windows.net/torchtbprofiler/demo/memory_demo_1_10
@@ -366,7 +366,7 @@ def train(data):
366366
#
367367
# You can try it by using existing example on Azure:
368368
#
369-
# ::
369+
# .. code-block::
370370
#
371371
# pip install azure-storage-blob
372372
# tensorboard --logdir=https://torchtbprofiler.blob.core.windows.net/torchtbprofiler/demo/distributed_bert

prototype_source/README.txt

+4-4
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
Prototype Tutorials
22
------------------
33
1. distributed_rpc_profiling.rst
4-
Profiling PyTorch RPC-Based Workloads
5-
https://github.com/pytorch/tutorials/blob/release/1.6/prototype_source/distributed_rpc_profiling.rst
4+
Profiling PyTorch RPC-Based Workloads
5+
https://github.com/pytorch/tutorials/blob/main/prototype_source/distributed_rpc_profiling.rst
66

77
2. graph_mode_static_quantization_tutorial.py
88
Graph Mode Post Training Static Quantization in PyTorch
@@ -21,8 +21,8 @@ Prototype Tutorials
2121
https://github.com/pytorch/tutorials/blob/main/prototype_source/torchscript_freezing.py
2222

2323
6. vulkan_workflow.rst
24-
Vulkan Backend User Workflow
25-
https://pytorch.org/tutorials/intermediate/vulkan_workflow.html
24+
Vulkan Backend User Workflow
25+
https://pytorch.org/tutorials/intermediate/vulkan_workflow.html
2626

2727
7. fx_graph_mode_ptq_static.rst
2828
FX Graph Mode Post Training Static Quantization

prototype_source/fx_graph_mode_quant_guide.rst

+12-15
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
**Author**: `Jerry Zhang <https://github.com/jerryzh168>`_
55

66
FX Graph Mode Quantization requires a symbolically traceable model.
7-
We use the FX framework (TODO: link) to convert a symbolically traceable nn.Module instance to IR,
7+
We use the FX framework to convert a symbolically traceable nn.Module instance to IR,
88
and we operate on the IR to execute the quantization passes.
99
Please post your question about symbolically tracing your model in `PyTorch Discussion Forum <https://discuss.pytorch.org/c/quantization/17>`_
1010

@@ -22,16 +22,19 @@ You can use any combination of these options:
2222
b. Write your own observed and quantized submodule
2323

2424

25-
####################################################################
2625
If the code that is not symbolically traceable does not need to be quantized, we have the following two options
2726
to run FX Graph Mode Quantization:
28-
1.a. Symbolically trace only the code that needs to be quantized
27+
28+
29+
Symbolically trace only the code that needs to be quantized
2930
-----------------------------------------------------------------
3031
When the whole model is not symbolically traceable but the submodule we want to quantize is
3132
symbolically traceable, we can run quantization only on that submodule.
33+
3234
before:
3335

3436
.. code:: python
37+
3538
class M(nn.Module):
3639
def forward(self, x):
3740
x = non_traceable_code_1(x)
@@ -42,6 +45,7 @@ before:
4245
after:
4346

4447
.. code:: python
48+
4549
class FP32Traceable(nn.Module):
4650
def forward(self, x):
4751
x = traceable_code(x)
@@ -69,8 +73,7 @@ Note if original model needs to be preserved, you will have to
6973
copy it yourself before calling the quantization APIs.
7074

7175

72-
#####################################################
73-
1.b. Skip symbolically trace the non-traceable code
76+
Skip symbolically trace the non-traceable code
7477
---------------------------------------------------
7578
When we have some non-traceable code in the module, and this part of code doesn’t need to be quantized,
7679
we can factor out this part of the code into a submodule and skip symbolically trace that submodule.
@@ -134,8 +137,7 @@ quantization code:
134137
135138
If the code that is not symbolically traceable needs to be quantized, we have the following two options:
136139

137-
##########################################################
138-
2.a Refactor your code to make it symbolically traceable
140+
Refactor your code to make it symbolically traceable
139141
--------------------------------------------------------
140142
If it is easy to refactor the code and make the code symbolically traceable,
141143
we can refactor the code and remove the use of non-traceable constructs in python.
@@ -167,15 +169,10 @@ after:
167169
return x.permute(0, 2, 1, 3)
168170
169171
170-
quantization code:
171-
172172
This can be combined with other approaches and the quantization code
173173
depends on the model.
174174

175-
176-
177-
#######################################################
178-
2.b. Write your own observed and quantized submodule
175+
Write your own observed and quantized submodule
179176
-----------------------------------------------------
180177

181178
If the non-traceable code can’t be refactored to be symbolically traceable,
@@ -207,8 +204,8 @@ non-traceable logic, wrapped in a module
207204
class FP32NonTraceable:
208205
...
209206
210-
211-
2. Define observed version of FP32NonTraceable
207+
2. Define observed version of
208+
FP32NonTraceable
212209

213210
.. code:: python
214211

0 commit comments

Comments
 (0)