Skip to content

Commit 3066693

Browse files
committed
update pytorch samples
1 parent 79d0b5a commit 3066693

10 files changed

+588
-0
lines changed

cython/benchmark.py

+15
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
import cpython_demo, python_demo, time
2+
3+
to = 100000000
4+
start = time.time()
5+
python_demo.sum(to)
6+
end = time.time()
7+
py_time = end - start
8+
print("Python time = {}".format(py_time))
9+
10+
start = time.time()
11+
cpython_demo.sum(to)
12+
end = time.time()
13+
cy_time = end - start
14+
print("Cython time = {}".format(cy_time))
15+
print("Speedup = {}".format(py_time / cy_time))

cython/cpython_demo.py

+5
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
def sum(to):
2+
ans = 0
3+
for i in range(to):
4+
ans += i
5+
return ans

cython/python_demo.py

+5
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
def sum(to):
2+
ans = 0
3+
for i in range(to):
4+
ans += i
5+
return ans

cython/setup.py

+3
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
from distutils.core import setup
2+
from Cython.Build import cythonize
3+
setup(ext_modules = cythonize('cpython_demo.py'))

pytorch/pytorch_basic.md

Whitespace-only changes.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,289 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "code",
5+
"execution_count": 8,
6+
"metadata": {},
7+
"outputs": [],
8+
"source": [
9+
"import torch\n",
10+
"import torch.nn as nn\n",
11+
"import torch.nn.functional as F\n",
12+
"import torch.optim as optim\n",
13+
"\n",
14+
"from torchvision import datasets\n",
15+
"import torchvision.transforms as transforms\n",
16+
"import mmcv\n",
17+
"from itertools import product"
18+
]
19+
},
20+
{
21+
"cell_type": "code",
22+
"execution_count": 9,
23+
"metadata": {},
24+
"outputs": [],
25+
"source": [
26+
"torch.manual_seed(7)\n",
27+
"device = 'cuda:0'"
28+
]
29+
},
30+
{
31+
"cell_type": "code",
32+
"execution_count": 10,
33+
"metadata": {},
34+
"outputs": [],
35+
"source": [
36+
"class Network(nn.Module):\n",
37+
" def __init__(self):\n",
38+
" super(Network, self).__init__()\n",
39+
" self.conv1 = nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5)\n",
40+
" self.conv2 = nn.Conv2d(in_channels=6, out_channels=12, kernel_size=5)\n",
41+
" \n",
42+
" self.fc1 = nn.Linear(in_features=12*4*4, out_features=120)\n",
43+
" self.fc2 = nn.Linear(in_features=120, out_features=60)\n",
44+
" self.out = nn.Linear(in_features=60, out_features=10)\n",
45+
" \n",
46+
" def forward(self, x):\n",
47+
" x = self.conv1(x)\n",
48+
" x = F.relu(x)\n",
49+
" x = F.max_pool2d(x, kernel_size=2, stride=2)\n",
50+
"\n",
51+
" x = self.conv2(x)\n",
52+
" x = F.relu(x)\n",
53+
" x = F.max_pool2d(x, kernel_size=2, stride=2)\n",
54+
"\n",
55+
" x = torch.flatten(x, start_dim=1)\n",
56+
" x = self.fc1(x)\n",
57+
" x = self.fc2(x)\n",
58+
" x = self.out(x)\n",
59+
"\n",
60+
" return x"
61+
]
62+
},
63+
{
64+
"cell_type": "code",
65+
"execution_count": 11,
66+
"metadata": {},
67+
"outputs": [],
68+
"source": [
69+
"train_set = datasets.FashionMNIST(root='./data', train=True, download=True, transform=transforms.Compose([transforms.ToTensor()]))\n",
70+
"val_set = datasets.FashionMNIST(root='./data', train=False,download=True, transform=transforms.Compose([transforms.ToTensor()]))"
71+
]
72+
},
73+
{
74+
"cell_type": "code",
75+
"execution_count": 12,
76+
"metadata": {},
77+
"outputs": [
78+
{
79+
"data": {
80+
"text/plain": [
81+
"[[512, 1024, 8192], [0.01, 0.001, 0.0001, 1e-05], [True, False]]"
82+
]
83+
},
84+
"execution_count": 12,
85+
"metadata": {},
86+
"output_type": "execute_result"
87+
}
88+
],
89+
"source": [
90+
"# enable tensorboard\n",
91+
"from torch.utils.tensorboard import SummaryWriter\n",
92+
"\n",
93+
"parameters = dict(\n",
94+
" batch_size_list = [512, 1024, 1024*8],\n",
95+
" lr_list = [.01, .001, .0001, .00001],\n",
96+
" shuffle = [True, False]\n",
97+
")\n",
98+
"param_values = [v for v in parameters.values()]\n",
99+
"param_values"
100+
]
101+
},
102+
{
103+
"cell_type": "code",
104+
"execution_count": 13,
105+
"metadata": {},
106+
"outputs": [
107+
{
108+
"name": "stdout",
109+
"output_type": "stream",
110+
"text": [
111+
"[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 10/10, 0.2 task/s, elapsed: 58s, ETA: 0s\n",
112+
"[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 10/10, 0.2 task/s, elapsed: 59s, ETA: 0s\n",
113+
"[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 10/10, 0.2 task/s, elapsed: 59s, ETA: 0s\n",
114+
"[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 10/10, 0.2 task/s, elapsed: 59s, ETA: 0s\n",
115+
"[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 10/10, 0.2 task/s, elapsed: 59s, ETA: 0s\n",
116+
"[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 10/10, 0.2 task/s, elapsed: 59s, ETA: 0s\n",
117+
"[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 10/10, 0.2 task/s, elapsed: 60s, ETA: 0s\n",
118+
"[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 10/10, 0.2 task/s, elapsed: 59s, ETA: 0s\n",
119+
"[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 10/10, 0.2 task/s, elapsed: 56s, ETA: 0s\n",
120+
"[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 10/10, 0.2 task/s, elapsed: 56s, ETA: 0s\n",
121+
"[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 10/10, 0.2 task/s, elapsed: 55s, ETA: 0s\n",
122+
"[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 10/10, 0.2 task/s, elapsed: 55s, ETA: 0s\n",
123+
"[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 10/10, 0.2 task/s, elapsed: 55s, ETA: 0s\n",
124+
"[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 10/10, 0.2 task/s, elapsed: 55s, ETA: 0s\n",
125+
"[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 10/10, 0.2 task/s, elapsed: 55s, ETA: 0s\n",
126+
"[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 10/10, 0.2 task/s, elapsed: 55s, ETA: 0s\n",
127+
"[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 10/10, 0.2 task/s, elapsed: 53s, ETA: 0s\n",
128+
"[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 10/10, 0.2 task/s, elapsed: 53s, ETA: 0s\n",
129+
"[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 10/10, 0.2 task/s, elapsed: 52s, ETA: 0s\n",
130+
"[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 10/10, 0.2 task/s, elapsed: 53s, ETA: 0s\n",
131+
"[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 10/10, 0.2 task/s, elapsed: 53s, ETA: 0s\n",
132+
"[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 10/10, 0.2 task/s, elapsed: 53s, ETA: 0s\n",
133+
"[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 10/10, 0.2 task/s, elapsed: 53s, ETA: 0s\n",
134+
"[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 10/10, 0.2 task/s, elapsed: 53s, ETA: 0s\n"
135+
]
136+
}
137+
],
138+
"source": [
139+
"epochs = 10\n",
140+
"\n",
141+
"for batch_size, lr, shuffle in product(*param_values):\n",
142+
" model = Network().to(device)\n",
143+
" \n",
144+
" train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size)\n",
145+
" val_loader = torch.utils.data.DataLoader(val_set, batch_size=batch_size)\n",
146+
" optimizer = optim.Adam(model.parameters(), lr=lr)\n",
147+
" \n",
148+
" comment = f'_batch_size={batch_size}_lr={lr}_shuffle={shuffle}'\n",
149+
" writer = SummaryWriter(comment=comment)\n",
150+
" \n",
151+
" for epoch in mmcv.track_iter_progress(range(epochs)):\n",
152+
" correct_train, loss_train = 0., 0.\n",
153+
" for images, labels in (train_loader):\n",
154+
" images, labels = images.to(device), labels.to(device)\n",
155+
" preds = model(images)\n",
156+
" loss = F.cross_entropy(preds, labels)\n",
157+
" loss_train += loss.item()\n",
158+
" correct_train += (preds.argmax(dim=1) == labels).sum()\n",
159+
"\n",
160+
" optimizer.zero_grad()\n",
161+
" loss.backward()\n",
162+
" optimizer.step()\n",
163+
"\n",
164+
" correct_val, loss_val = 0., 0.\n",
165+
" with torch.no_grad():\n",
166+
" for images, labels in (val_loader):\n",
167+
" images, labels = images.to(device), labels.to(device)\n",
168+
" preds = model(images)\n",
169+
" loss = F.cross_entropy(preds, labels)\n",
170+
" loss_val += loss.item()\n",
171+
" correct_val += (preds.argmax(dim=1) == labels).sum()\n",
172+
"\n",
173+
" acc_train = correct_train/len(train_set)\n",
174+
" acc_val = correct_val/len(val_set)\n",
175+
"\n",
176+
" writer.add_scalar('Loss/train', loss_train, epoch)\n",
177+
" writer.add_scalar('Loss/test', loss_val, epoch)\n",
178+
" writer.add_scalar('Accuracy/train', acc_train, epoch)\n",
179+
" writer.add_scalar('Accuracy/test', acc_val, epoch)\n",
180+
" \n",
181+
" writer.close()"
182+
]
183+
},
184+
{
185+
"cell_type": "code",
186+
"execution_count": 16,
187+
"metadata": {},
188+
"outputs": [],
189+
"source": [
190+
"val_preds = torch.tensor([], dtype=torch.long).to(device)\n",
191+
"val_labels = torch.tensor([], dtype=torch.long).to(device)\n",
192+
"\n",
193+
"with torch.no_grad():\n",
194+
" for images, labels in (val_loader):\n",
195+
" images, labels = images.to(device), labels.to(device)\n",
196+
" preds = model(images).argmax(dim=1)\n",
197+
" val_preds = torch.cat((val_preds, preds.type(torch.long)), dim=0)\n",
198+
" val_labels = torch.cat((val_labels, labels.type(torch.long)), dim=0)"
199+
]
200+
},
201+
{
202+
"cell_type": "code",
203+
"execution_count": 17,
204+
"metadata": {},
205+
"outputs": [],
206+
"source": [
207+
"val_preds = val_preds.cpu()\n",
208+
"val_labels = val_labels.cpu()"
209+
]
210+
},
211+
{
212+
"cell_type": "code",
213+
"execution_count": 18,
214+
"metadata": {},
215+
"outputs": [
216+
{
217+
"name": "stdout",
218+
"output_type": "stream",
219+
"text": [
220+
"tensor([[ 0, 736, 0, 0, 0, 264, 0, 0, 0, 0],\n",
221+
" [ 0, 747, 0, 0, 0, 253, 0, 0, 0, 0],\n",
222+
" [ 0, 633, 0, 0, 0, 367, 0, 0, 0, 0],\n",
223+
" [ 0, 780, 0, 0, 0, 220, 0, 0, 0, 0],\n",
224+
" [ 0, 836, 0, 0, 0, 164, 0, 0, 0, 0],\n",
225+
" [ 0, 48, 0, 0, 0, 952, 0, 0, 0, 0],\n",
226+
" [ 0, 613, 0, 0, 0, 387, 0, 0, 0, 0],\n",
227+
" [ 0, 269, 0, 0, 0, 731, 0, 0, 0, 0],\n",
228+
" [ 0, 545, 0, 0, 0, 455, 0, 0, 0, 0],\n",
229+
" [ 0, 98, 0, 0, 0, 902, 0, 0, 0, 0]])\n"
230+
]
231+
}
232+
],
233+
"source": [
234+
"def confusion_matrix(preds, labels):\n",
235+
" stacked = torch.stack((val_labels, val_preds), dim=1)\n",
236+
"\n",
237+
" cmt = torch.zeros(10, 10, dtype=torch.int64)\n",
238+
" for p in stacked:\n",
239+
" j, k = p.tolist()\n",
240+
" cmt[j, k] += 1\n",
241+
" return cmt\n",
242+
"\n",
243+
"cmt = confusion_matrix(val_preds, val_labels)\n",
244+
"print(cmt)"
245+
]
246+
},
247+
{
248+
"cell_type": "code",
249+
"execution_count": 31,
250+
"metadata": {},
251+
"outputs": [],
252+
"source": [
253+
"from plot_confusion_matrix import plot_confusion_matrix"
254+
]
255+
},
256+
{
257+
"cell_type": "code",
258+
"execution_count": null,
259+
"metadata": {},
260+
"outputs": [],
261+
"source": [
262+
"names = ('T-shirt/top' ,'Trouser' ,'Pullover' ,'Dress' ,'Coat' ,'Sandal' ,'Shirt' ,'Sneaker' ,'Bag' ,'Ankle boot')\n",
263+
"\n",
264+
"plot_confusion_matrix2(cmt, names, normalize=True)"
265+
]
266+
}
267+
],
268+
"metadata": {
269+
"kernelspec": {
270+
"display_name": "open-mmlab",
271+
"language": "python",
272+
"name": "open-mmlab"
273+
},
274+
"language_info": {
275+
"codemirror_mode": {
276+
"name": "ipython",
277+
"version": 3
278+
},
279+
"file_extension": ".py",
280+
"mimetype": "text/x-python",
281+
"name": "python",
282+
"nbconvert_exporter": "python",
283+
"pygments_lexer": "ipython3",
284+
"version": "3.7.5"
285+
}
286+
},
287+
"nbformat": 4,
288+
"nbformat_minor": 4
289+
}

0 commit comments

Comments
 (0)