forked from pytorch/tutorials
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtensorboard_with_pytorch.py
168 lines (144 loc) · 5.25 KB
/
tensorboard_with_pytorch.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
"""
How to use TensorBoard with PyTorch
===================================
TensorBoard is a visualization toolkit for machine learning experimentation.
TensorBoard allows tracking and visualizing metrics such as loss and accuracy,
visualizing the model graph, viewing histograms, displaying images and much more.
In this tutorial we are going to cover TensorBoard installation,
basic usage with PyTorch, and how to visualize data you logged in TensorBoard UI.
Installation
----------------------
PyTorch should be installed to log models and metrics into TensorBoard log
directory. The following command will install PyTorch 1.4+ via
Anaconda (recommended):
::
$ conda install pytorch torchvision -c pytorch
or pip
::
$ pip install torch torchvision
"""
######################################################################
# Using TensorBoard in PyTorch
# -----
#
# Let’s now try using TensorBoard with PyTorch! Before logging anything,
# we need to create a ``SummaryWriter`` instance.
#
import torch
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter()
######################################################################
# Writer will output to ``./runs/`` directory by default.
#
######################################################################
# Log scalars
# -----
#
# In machine learning, it’s important to understand key metrics such as
# loss and how they change during training. Scalar helps to save
# the loss value of each training step, or the accuracy after each epoch.
#
# To log a scalar value, use
# ``add_scalar(tag, scalar_value, global_step=None, walltime=None)``.
# For example, lets create a simple linear regression training, and
# log loss value using ``add_scalar``
#
x = torch.arange(-5, 5, 0.1).view(-1, 1)
y = -5 * x + 0.1 * torch.randn(x.size())
model = torch.nn.Linear(1, 1)
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr = 0.1)
def train_model(iter):
for epoch in range(iter):
y1 = model(x)
loss = criterion(y1, y)
writer.add_scalar("Loss/train", loss, epoch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_model(10)
writer.flush()
######################################################################
# Call ``flush()`` method to make sure that all pending events
# have been written to disk.
#
# See `torch.utils.tensorboard tutorials <https://pytorch.org/docs/stable/tensorboard.html>`_
# to find more TensorBoard visualization types you can log.
#
# If you do not need the summary writer anymore, call ``close()`` method.
#
writer.close()
######################################################################
# Run TensorBoard
# -----
#
# Install TensorBoard through the command line to visualize data you logged
#
# .. code-block:: sh
#
# pip install tensorboard
#
#
# Now, start TensorBoard, specifying the root log directory you used above.
# Argument ``logdir`` points to directory where TensorBoard will look to find
# event files that it can display. TensorBoard will recursively walk
# the directory structure rooted at ``logdir``, looking for ``.*tfevents.*`` files.
#
# .. code-block:: sh
#
# tensorboard --logdir=runs
#
# Go to the URL it provides OR to `http://localhost:6006/ <http://localhost:6006/>`_
#
# .. image:: ../../_static/img/thumbnails/tensorboard_scalars.png
# :scale: 40 %
#
# This dashboard shows how the loss and accuracy change with every epoch.
# You can use it to also track training speed, learning rate, and other
# scalar values. It’s helpful to compare these metrics across different
# training runs to improve your model.
#
######################################################################
# Share TensorBoard dashboards
# ----------------------------
#
# `TensorBoard.dev <https://tensorboard.dev/>`_ lets you upload and share
# your ML experiment results with anyone. Use ``TensorBoard.dev`` to host,
# track, and share your TensorBoard dashboards.
#
# Install the latest version of TensorBoard to use the ``uploader``.
#
# .. code-block:: sh
#
# pip install tensorboard --upgrade
#
# Use a simple command to upload and share your TensorBoard.
#
# .. code-block:: sh
#
# tensorboard dev upload --logdir runs \
# --name "My latest experiment" \ # optional
# --description "Simple comparison of several hyperparameters" # optional
#
# For help, run ``$ tensorboard dev --help``.
#
# **Note:** Uploaded TensorBoards are public and visible to everyone.
# Do not upload sensitive data.
#
# View your TensorBoard live at URL provided in your terminal.
# For example: `https://tensorboard.dev/experiment/AdYd1TgeTlaLWXx6I8JUbA <https://tensorboard.dev/experiment/AdYd1TgeTlaLWXx6I8JUbA>`_
#
#
# .. image:: ../../_static/img/thumbnails/tensorboard_dev.png
# :scale: 40 %
#
#
# .. note::
# ``TensorBoard.dev`` currently supports scalars, graphs, histograms, distributions, ``hparams``, and text dashboards.
########################################################################
# Learn More
# ----------------------------
#
# - `torch.utils.tensorboard <https://pytorch.org/docs/stable/tensorboard.html>`_ docs
# - `Visualizing models, data, and training with TensorBoard <https://pytorch.org/tutorials/intermediate/tensorboard_tutorial.html>`_ tutorial
#