Skip to content

Commit 51f1c93

Browse files
MorvanZhouMorvan Zhou
authored and
Morvan Zhou
committed
update
1 parent dcc4e7c commit 51f1c93

7 files changed

+32
-79
lines changed

tutorial-contents/401_CNN.py

+4-14
Original file line numberDiff line numberDiff line change
@@ -85,26 +85,16 @@ def forward(self, x):
8585

8686
# following function (plot_with_labels) is for visualization, can be ignored if not interested
8787
from matplotlib import cm
88-
try:
89-
from sklearn.manifold import TSNE
90-
HAS_SK = True
91-
except:
92-
HAS_SK = False
93-
print('Please install sklearn for layer visualization')
88+
try: from sklearn.manifold import TSNE; HAS_SK = True
89+
except: HAS_SK = False; print('Please install sklearn for layer visualization')
9490
def plot_with_labels(lowDWeights, labels):
9591
plt.cla()
9692
X, Y = lowDWeights[:, 0], lowDWeights[:, 1]
9793
for x, y, s in zip(X, Y, labels):
98-
c = cm.rainbow(int(255 * s / 9))
99-
plt.text(x, y, s, backgroundcolor=c, fontsize=9)
100-
plt.xlim(X.min(), X.max())
101-
plt.ylim(Y.min(), Y.max())
102-
plt.title('Visualize last layer')
103-
plt.show()
104-
plt.pause(0.01)
94+
c = cm.rainbow(int(255 * s / 9)); plt.text(x, y, s, backgroundcolor=c, fontsize=9)
95+
plt.xlim(X.min(), X.max()); plt.ylim(Y.min(), Y.max()); plt.title('Visualize last layer'); plt.show(); plt.pause(0.01)
10596

10697
plt.ion()
107-
10898
# training and testing
10999
for epoch in range(EPOCH):
110100
for step, (x, y) in enumerate(train_loader): # gives batch data, normalize x when iterate train_loader

tutorial-contents/403_RNN_regressor.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -87,8 +87,7 @@ def forward(self, x, h_state):
8787
# plotting
8888
plt.plot(steps, y_np.flatten(), 'r-')
8989
plt.plot(steps, prediction.data.numpy().flatten(), 'b-')
90-
plt.draw()
91-
plt.pause(0.05)
90+
plt.draw(); plt.pause(0.05)
9291

9392
plt.ioff()
9493
plt.show()

tutorial-contents/404_autoencoder.py

+7-17
Original file line numberDiff line numberDiff line change
@@ -89,9 +89,7 @@ def forward(self, x):
8989
# original data (first row) for viewing
9090
view_data = Variable(train_data.train_data[:N_TEST_IMG].view(-1, 28*28).type(torch.FloatTensor)/255.)
9191
for i in range(N_TEST_IMG):
92-
a[0][i].imshow(np.reshape(view_data.data.numpy()[i], (28, 28)), cmap='gray')
93-
a[0][i].set_xticks(())
94-
a[0][i].set_yticks(())
92+
a[0][i].imshow(np.reshape(view_data.data.numpy()[i], (28, 28)), cmap='gray'); a[0][i].set_xticks(()); a[0][i].set_yticks(())
9593

9694
for epoch in range(EPOCH):
9795
for step, (x, y) in enumerate(train_loader):
@@ -114,27 +112,19 @@ def forward(self, x):
114112
for i in range(N_TEST_IMG):
115113
a[1][i].clear()
116114
a[1][i].imshow(np.reshape(decoded_data.data.numpy()[i], (28, 28)), cmap='gray')
117-
a[1][i].set_xticks(())
118-
a[1][i].set_yticks(())
119-
plt.draw()
120-
plt.pause(0.05)
115+
a[1][i].set_xticks(()); a[1][i].set_yticks(())
116+
plt.draw(); plt.pause(0.05)
121117

122118
plt.ioff()
123119
plt.show()
124120

125121
# visualize in 3D plot
126122
view_data = Variable(train_data.train_data[:200].view(-1, 28*28).type(torch.FloatTensor)/255.)
127123
encoded_data, _ = autoencoder(view_data)
128-
fig = plt.figure(2)
129-
ax = Axes3D(fig)
130-
X = encoded_data.data[:, 0].numpy()
131-
Y = encoded_data.data[:, 1].numpy()
132-
Z = encoded_data.data[:, 2].numpy()
124+
fig = plt.figure(2); ax = Axes3D(fig)
125+
X, Y, Z = encoded_data.data[:, 0].numpy(), encoded_data.data[:, 1].numpy(), encoded_data.data[:, 2].numpy()
133126
values = train_data.train_labels[:200].numpy()
134127
for x, y, z, s in zip(X, Y, Z, values):
135-
c = cm.rainbow(int(255*s/9))
136-
ax.text(x, y, z, s, backgroundcolor=c)
137-
ax.set_xlim(X.min(), X.max())
138-
ax.set_ylim(Y.min(), Y.max())
139-
ax.set_zlim(Z.min(), Z.max())
128+
c = cm.rainbow(int(255*s/9)); ax.text(x, y, z, s, backgroundcolor=c)
129+
ax.set_xlim(X.min(), X.max()); ax.set_ylim(Y.min(), Y.max()); ax.set_zlim(Z.min(), Z.max())
140130
plt.show()

tutorial-contents/406_GAN.py

+5-8
Original file line numberDiff line numberDiff line change
@@ -25,10 +25,10 @@
2525
PAINT_POINTS = np.vstack([np.linspace(-1, 1, ART_COMPONENTS) for _ in range(BATCH_SIZE)])
2626

2727
# show our beautiful painting range
28-
plt.plot(PAINT_POINTS[0], 2 * np.power(PAINT_POINTS[0], 2) + 1, c='#74BCFF', lw=3, label='upper bound')
29-
plt.plot(PAINT_POINTS[0], 1 * np.power(PAINT_POINTS[0], 2) + 0, c='#FF9359', lw=3, label='lower bound')
30-
plt.legend(loc='upper right')
31-
plt.show()
28+
# plt.plot(PAINT_POINTS[0], 2 * np.power(PAINT_POINTS[0], 2) + 1, c='#74BCFF', lw=3, label='upper bound')
29+
# plt.plot(PAINT_POINTS[0], 1 * np.power(PAINT_POINTS[0], 2) + 0, c='#FF9359', lw=3, label='lower bound')
30+
# plt.legend(loc='upper right')
31+
# plt.show()
3232

3333

3434
def artist_works(): # painting from the famous artist (real target)
@@ -81,10 +81,7 @@ def artist_works(): # painting from the famous artist (real target)
8181
plt.plot(PAINT_POINTS[0], 1 * np.power(PAINT_POINTS[0], 2) + 0, c='#FF9359', lw=3, label='lower bound')
8282
plt.text(-.5, 2.3, 'D accuracy=%.2f (0.5 for D to converge)' % prob_artist0.data.numpy().mean(), fontdict={'size': 15})
8383
plt.text(-.5, 2, 'D score= %.2f (-1.38 for G to converge)' % -D_loss.data.numpy(), fontdict={'size': 15})
84-
plt.ylim((0, 3))
85-
plt.legend(loc='upper right', fontsize=12)
86-
plt.draw()
87-
plt.pause(0.01)
84+
plt.ylim((0, 3));plt.legend(loc='upper right', fontsize=12);plt.draw();plt.pause(0.01)
8885

8986
plt.ioff()
9087
plt.show()

tutorial-contents/406_conditional_GAN.py

+6-11
Original file line numberDiff line numberDiff line change
@@ -25,10 +25,10 @@
2525
PAINT_POINTS = np.vstack([np.linspace(-1, 1, ART_COMPONENTS) for _ in range(BATCH_SIZE)])
2626

2727
# show our beautiful painting range
28-
plt.plot(PAINT_POINTS[0], 2 * np.power(PAINT_POINTS[0], 2) + 1, c='#74BCFF', lw=3, label='upper bound')
29-
plt.plot(PAINT_POINTS[0], 1 * np.power(PAINT_POINTS[0], 2) + 0, c='#FF9359', lw=3, label='lower bound')
30-
plt.legend(loc='upper right')
31-
plt.show()
28+
# plt.plot(PAINT_POINTS[0], 2 * np.power(PAINT_POINTS[0], 2) + 1, c='#74BCFF', lw=3, label='upper bound')
29+
# plt.plot(PAINT_POINTS[0], 1 * np.power(PAINT_POINTS[0], 2) + 0, c='#FF9359', lw=3, label='lower bound')
30+
# plt.legend(loc='upper right')
31+
# plt.show()
3232

3333

3434
def artist_works_with_labels(): # painting from the famous artist (real target)
@@ -91,10 +91,7 @@ def artist_works_with_labels(): # painting from the famous artist (real targ
9191
plt.text(-.5, 2.3, 'D accuracy=%.2f (0.5 for D to converge)' % prob_artist0.data.numpy().mean(), fontdict={'size': 15})
9292
plt.text(-.5, 2, 'D score= %.2f (-1.38 for G to converge)' % -D_loss.data.numpy(), fontdict={'size': 15})
9393
plt.text(-.5, 1.7, 'Class = %i' % int(labels.data[0, 0]), fontdict={'size': 15})
94-
plt.ylim((0, 3))
95-
plt.legend(loc='upper right', fontsize=12)
96-
plt.draw()
97-
plt.pause(0.1)
94+
plt.ylim((0, 3));plt.legend(loc='upper right', fontsize=12);plt.draw();plt.pause(0.1)
9895

9996
plt.ioff()
10097
plt.show()
@@ -107,6 +104,4 @@ def artist_works_with_labels(): # painting from the famous artist (real targ
107104
plt.plot(PAINT_POINTS[0], G_paintings.data.numpy()[0], c='#4AD631', lw=3, label='G painting for upper class',)
108105
plt.plot(PAINT_POINTS[0], 2 * np.power(PAINT_POINTS[0], 2) + bound[1], c='#74BCFF', lw=3, label='upper bound (class 1)')
109106
plt.plot(PAINT_POINTS[0], 1 * np.power(PAINT_POINTS[0], 2) + bound[0], c='#FF9359', lw=3, label='lower bound (class 1)')
110-
plt.ylim((0, 3))
111-
plt.legend(loc='upper right', fontsize=12)
112-
plt.show()
107+
plt.ylim((0, 3));plt.legend(loc='upper right', fontsize=12);plt.show()

tutorial-contents/503_dropout.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -87,9 +87,7 @@
8787
plt.plot(test_x.data.numpy(), test_pred_drop.data.numpy(), 'b--', lw=3, label='dropout(50%)')
8888
plt.text(0, -1.2, 'overfitting loss=%.4f' % loss_func(test_pred_ofit, test_y).data[0], fontdict={'size': 20, 'color': 'red'})
8989
plt.text(0, -1.5, 'dropout loss=%.4f' % loss_func(test_pred_drop, test_y).data[0], fontdict={'size': 20, 'color': 'blue'})
90-
plt.legend(loc='upper left')
91-
plt.ylim((-2.5, 2.5))
92-
plt.pause(0.1)
90+
plt.legend(loc='upper left'); plt.ylim((-2.5, 2.5));plt.pause(0.1)
9391

9492
# change back to train mode
9593
net_overfitting.train()

tutorial-contents/504_batch_normalization.py

+8-24
Original file line numberDiff line numberDiff line change
@@ -99,30 +99,17 @@ def forward(self, x):
9999
f, axs = plt.subplots(4, N_HIDDEN+1, figsize=(10, 5))
100100
plt.ion() # something about plotting
101101
plt.show()
102-
103102
def plot_histogram(l_in, l_in_bn, pre_ac, pre_ac_bn):
104103
for i, (ax_pa, ax_pa_bn, ax, ax_bn) in enumerate(zip(axs[0, :], axs[1, :], axs[2, :], axs[3, :])):
105104
[a.clear() for a in [ax_pa, ax_pa_bn, ax, ax_bn]]
106-
if i == 0:
107-
p_range = (-7, 10)
108-
the_range = (-7, 10)
109-
else:
110-
p_range = (-4, 4)
111-
the_range = (-1, 1)
105+
if i == 0: p_range = (-7, 10);the_range = (-7, 10)
106+
else:p_range = (-4, 4);the_range = (-1, 1)
112107
ax_pa.set_title('L' + str(i))
113-
ax_pa.hist(pre_ac[i].data.numpy().ravel(), bins=10, range=p_range, color='#FF9359', alpha=0.5)
114-
ax_pa_bn.hist(pre_ac_bn[i].data.numpy().ravel(), bins=10, range=p_range, color='#74BCFF', alpha=0.5)
115-
ax.hist(l_in[i].data.numpy().ravel(), bins=10, range=the_range, color='#FF9359')
116-
ax_bn.hist(l_in_bn[i].data.numpy().ravel(), bins=10, range=the_range, color='#74BCFF')
117-
for a in [ax_pa, ax, ax_pa_bn, ax_bn]:
118-
a.set_yticks(())
119-
a.set_xticks(())
120-
ax_pa_bn.set_xticks(p_range)
121-
ax_bn.set_xticks(the_range)
122-
axs[0, 0].set_ylabel('PreAct')
123-
axs[1, 0].set_ylabel('BN PreAct')
124-
axs[2, 0].set_ylabel('Act')
125-
axs[3, 0].set_ylabel('BN Act')
108+
ax_pa.hist(pre_ac[i].data.numpy().ravel(), bins=10, range=p_range, color='#FF9359', alpha=0.5);ax_pa_bn.hist(pre_ac_bn[i].data.numpy().ravel(), bins=10, range=p_range, color='#74BCFF', alpha=0.5)
109+
ax.hist(l_in[i].data.numpy().ravel(), bins=10, range=the_range, color='#FF9359');ax_bn.hist(l_in_bn[i].data.numpy().ravel(), bins=10, range=the_range, color='#74BCFF')
110+
for a in [ax_pa, ax, ax_pa_bn, ax_bn]: a.set_yticks(());a.set_xticks(())
111+
ax_pa_bn.set_xticks(p_range);ax_bn.set_xticks(the_range)
112+
axs[0, 0].set_ylabel('PreAct');axs[1, 0].set_ylabel('BN PreAct');axs[2, 0].set_ylabel('Act');axs[3, 0].set_ylabel('BN Act')
126113
plt.pause(0.01)
127114

128115
# training
@@ -155,10 +142,7 @@ def plot_histogram(l_in, l_in_bn, pre_ac, pre_ac_bn):
155142
plt.figure(2)
156143
plt.plot(losses[0], c='#FF9359', lw=3, label='Original')
157144
plt.plot(losses[1], c='#74BCFF', lw=3, label='Batch Normalization')
158-
plt.xlabel('step')
159-
plt.ylabel('test loss')
160-
plt.ylim((0, 2000))
161-
plt.legend(loc='best')
145+
plt.xlabel('step');plt.ylabel('test loss');plt.ylim((0, 2000));plt.legend(loc='best')
162146

163147
# evaluation
164148
# set net to eval mode to freeze the parameters in batch normalization layers

0 commit comments

Comments
 (0)