Skip to content

Commit 9fc5ae3

Browse files
committed
Change to TenorFlow 2 from version 1
Adjust to changes in sklearn, theano, keras
1 parent 1ebc7f5 commit 9fc5ae3

35 files changed

+171
-127
lines changed

n0_network.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,6 @@
1-
import tensorflow as tf
1+
#import tensorflow as tf
2+
from tensorflow.compat import v1 as tf
3+
tf.compat.v1.disable_eager_execution()
24
import numpy as np
35
from collections import namedtuple
46
import datetime

n1_2cnv1fc.py

+5-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,6 @@
1-
import tensorflow as tf
1+
#import tensorflow as tf
2+
from tensorflow.compat import v1 as tf
3+
tf.compat.v1.disable_eager_execution()
24
import numpy as np
35
from collections import namedtuple
46
import datetime
@@ -43,6 +45,7 @@ def __init__(self, truthed_features, dtype=np.float32):
4345
nm = 'x_'+nm
4446
if i>1:
4547
extra_features_width += truthed_features.feature_width[i]
48+
4649
lst.append(tf.placeholder(dtype, shape=[None, truthed_features.feature_width[i]], name=nm))
4750

4851
# ph is a named tuple with key names like 'image', 'm_label', and values that
@@ -218,7 +221,7 @@ def computeSize(s,tens):
218221
tShape = tens.get_shape()
219222
nDims = len(tShape)
220223
for i in range(nDims):
221-
sumC *= tShape[i].value
224+
sumC *= tShape[i]
222225
print ('\t{}\t{}'.format(s,sumC),flush=True)
223226
return sumC
224227

n1_2cnv2fc.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,6 @@
1-
import tensorflow as tf
1+
#import tensorflow as tf
2+
from tensorflow.compat import v1 as tf
3+
tf.compat.v1.disable_eager_execution()
24
import numpy as np
35
from collections import namedtuple
46
import datetime
@@ -250,7 +252,7 @@ def computeSize(s,tens):
250252
tShape = tens.get_shape()
251253
nDims = len(tShape)
252254
for i in range(nDims):
253-
sumC *= tShape[i].value
255+
sumC *= tShape[i]
254256
print ('\t{}\t{}'.format(s,sumC),flush=True)
255257
return sumC
256258

n1_baseTensorNN.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,6 @@
1-
import tensorflow as tf
1+
from tensorflow.compat import v1 as tf
2+
tf.compat.v1.disable_eager_execution()
3+
#import tf
24
import numpy as np
35
from collections import namedtuple
46
import datetime

n1_image_to_image.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
1-
import tensorflow as tf
1+
from tensorflow.compat import v1 as tf
2+
#import tf
23
import numpy as np
34
from collections import namedtuple
45
import datetime
@@ -258,7 +259,7 @@ def computeSize(s,tens):
258259
tShape = tens.get_shape()
259260
nDims = len(tShape)
260261
for i in range(nDims):
261-
sumC *= tShape[i].value
262+
sumC *= tShape[i]
262263
print ('\t{}\t{}'.format(s,sumC),flush=True)
263264
return sumC
264265

n1_residual3x4.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
88
99
'''
10-
import tensorflow as tf
10+
from tensorflow.compat import v1 as tf
1111
import numpy as np
1212
from collections import namedtuple
1313
import datetime
@@ -91,7 +91,7 @@ def bias_variable(shape, dtype):
9191
return tf.Variable(initial)
9292

9393
def shapeOuts(n):
94-
print ('n={}, hin={},w={}, b={} ,hout={}\n'.format(n, h[n]._shape, w[n]._variable._shape, b[n]._variable._shape, h[n+1]._shape))
94+
print ('n={}, hin={},w={}, b={} ,hout={}\n'.format(n, h[n].shape, w[n].shape, b[n].shape, h[n+1]._shape))
9595

9696
def section(n):
9797
with tf.name_scope('section_'+str(n)+'_0') as scope:
@@ -122,7 +122,7 @@ def computeSize(s,tens):
122122
tShape = tens.get_shape()
123123
nDims = len(tShape)
124124
for i in range(nDims):
125-
sumC *= tShape[i].value
125+
sumC *= tShape[i]
126126
print ('\t{}\t{}'.format(s,sumC),flush=True)
127127
return sumC
128128

o1_top_secret_cnn.py

+8-4
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,10 @@
4444
#import n1_2cnv1fc as nnetwork
4545
#import n1_residual3x4 as nnetwork
4646
import n1_2cnv2fc as nnetwork
47+
import skimage.transform as af
48+
from bitarray import bitarray
49+
50+
4751
input_filters_dict = {'m_label': list(range(48,58))+list(range(65,91))}
4852
output_feature_list = ['orientation_one_hot','image']
4953
dtype = np.float32
@@ -70,7 +74,7 @@
7074

7175

7276
# pick up the base characters from training_image_file
73-
# produce some skeared versions
77+
# produce some sheared versions
7478
# make into a training set
7579
# place in a ocr_utils TruthedCharacters class so we can use the
7680
# one hot and batch functions
@@ -94,7 +98,7 @@
9498
orientation=[]
9599
recognized_label =[]
96100

97-
import skimage.transform as af
101+
98102

99103
for j in range(shp[0]):
100104
for i,skew in enumerate(skewRange):
@@ -141,10 +145,10 @@
141145
image_file_jpg = image_file+'.jpg'
142146
df,t1 = ocr_utils.file_to_df(image_file,character_size, title = 'unencrypted file',white_space=white_space)
143147

144-
from bitarray import bitarray
148+
145149
secret_message = "top secret"
146150
a = bitarray()
147-
a.fromstring(secret_message)
151+
a.frombytes(secret_message.encode('utf_8'))
148152

149153
index = 0
150154
encoded_skews=[]

o2_top_secret_lda-tesseract.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
from sklearn.linear_model import LogisticRegression
1313
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
1414
from sklearn.metrics import accuracy_score
15+
from ruamel_yaml.compat import utf8
1516

1617
inputs = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghiklnopqrstuvwxyz'
1718
inputs_list = list(ord(x) for x in inputs)
@@ -70,8 +71,7 @@ def encode_and_save_file(input_base, output_base, character_size, white_space, s
7071

7172
from bitarray import bitarray
7273
a = bitarray()
73-
a.fromstring(secret_message)
74-
74+
a.frombytes(secret_message.encode('utf-8') )
7575
index = 0
7676

7777
def convert_to_shear(a):

o3_top_secret_python_box.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@
7373
#from sklearn.model_selection
7474
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
7575
from sklearn.linear_model import LogisticRegression
76-
from sklearn.cross_validation import train_test_split
76+
from sklearn.model_selection import train_test_split
7777

7878
# input_filters_dict = {'m_label': list(range(48,58))+list(range(65,91))}
7979
# output_feature_list = ['orientation_one_hot','image']

o4_image_to_image.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030
import pandas as pd
3131
import n1_image_to_image as nnetwork
3232
#import n1_residual3x4 as nnetwork
33-
import tensorflow as tf
33+
from tensorflow.compat import v1 as tf
3434
dtype = np.float32
3535
#with tf.device('/GPU:0'):
3636
#with tf.device('/cpu:0'):

ocr_utils.py

+8-3
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,8 @@
3838
import numpy as np
3939
import pandas as pd
4040
import math
41-
from pandas.io.common import ZipFile
41+
#from pandas.io.common import ZipFile
42+
from zipfile import ZipFile
4243
from matplotlib.colors import ListedColormap
4344
import matplotlib.pyplot as plt
4445
import sys
@@ -1064,9 +1065,13 @@ def plot_decision_regions(X=None, y=None, classifier=None, resolution = .005, te
10641065
plt.ylim(xx2.min()-d, xx2.max()+d)
10651066

10661067
# plot class samples
1068+
10671069
for idx, cl in enumerate(np.unique(y)):
1068-
plt.scatter(X[y == cl, 0], X[y == cl, 1],
1069-
alpha=0.8, c=cmap(idx),
1070+
xs = X[y == cl, 0]
1071+
ys = X[y == cl, 1]
1072+
c =cmap(idx)
1073+
plt.scatter(xs, ys,
1074+
alpha=0.8, color=c,
10701075
marker=markers[idx%len(markers)], label=cl)
10711076

10721077
# highlight test samples

p115_l1_l2_regularization.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@
6969

7070
from sklearn.linear_model import LogisticRegression
7171

72-
lr = LogisticRegression(penalty='l1', C=0.1, random_state=0)
72+
lr = LogisticRegression(penalty='l1', C=0.1, random_state=0, solver='liblinear',multi_class='auto')
7373
lr.fit(X_train_std, y_train)
7474
print('Training accuracy-l1 regularization:', lr.score(X_train_std, y_train))
7575
print('Test accuracy-l1 regularization:', lr.score(X_test_std, y_test))
@@ -79,7 +79,7 @@
7979
print('\t{}'.format(lr.coef_))
8080

8181

82-
lr = LogisticRegression(penalty='l2', C=0.1, random_state=0)
82+
lr = LogisticRegression(penalty='l2', C=0.1, random_state=0, solver='liblinear',multi_class='auto')
8383
lr.fit(X_train_std, y_train)
8484
print('Training accuracy-l2 regularization:', lr.score(X_train_std, y_train))
8585
print('Test accuracy-l2 regularization:', lr.score(X_test_std, y_test))
@@ -101,7 +101,7 @@
101101
def weight_graph(regularization = 'l1'):
102102
weights, params = [], []
103103
for c in np.arange(0, 6):
104-
lr = LogisticRegression(penalty=regularization, C=10**c, random_state=0)
104+
lr = LogisticRegression(penalty=regularization, C=10**c, random_state=0, solver='liblinear',multi_class='auto')
105105
lr.fit(X_train_std, y_train)
106106
weights.append(lr.coef_[1])
107107
params.append(10**c)

p119_squential_backward_selection.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@
4949
y, X, y_test, X_test, labels = ocr_utils.load_E13B(chars_to_train = (48,49,50) , columns=range(0,20), nChars=1000, random_state=0)
5050

5151

52-
from sklearn.cross_validation import train_test_split
52+
from sklearn.model_selection import train_test_split
5353

5454
X_train, X_test, y_train, y_test = train_test_split(
5555
X, y, test_size=0.3, random_state=0)

p131_principal_component_analysis.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@
130130
X_train_pca = pca.fit_transform(X_train_image)
131131
X_test_pca = pca.transform(X_test_image)
132132

133-
lr = LogisticRegression()
133+
lr = LogisticRegression(solver='liblinear',multi_class='auto')
134134
logistic_fitted =lr.fit(X_train_pca, y_train)
135135

136136
print('\nPCA Train Accuracy: {:4.6f}, n_components={}'.format(accuracy_score(y_train, logistic_fitted.predict(X_train_pca)),pca.n_components))
@@ -149,7 +149,7 @@
149149
X_train_pca = pca.fit_transform(X_train_image)
150150
X_test_pca = pca.transform(X_test_image)
151151

152-
lr = LogisticRegression()
152+
lr = LogisticRegression(solver='liblinear',multi_class='auto')
153153
logistic_fitted = lr.fit(X_train_pca, y_train)
154154

155155
y_train_pred = logistic_fitted.predict(X_train_pca)
@@ -191,7 +191,7 @@
191191
X_train_pca = pca.fit_transform(X_train_image)
192192
X_test_pca = pca.transform(X_test_image)
193193

194-
lr = LogisticRegression()
194+
lr = LogisticRegression(solver='liblinear',multi_class='auto')
195195
logistic_fitted=lr.fit(X_train_pca, y_train)
196196
y_train_pred = logistic_fitted.predict(X_train_pca)
197197
y_test_pred = logistic_fitted.predict(X_test_pca)

p141_linear_descriminant_analsys.py

+7-6
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@
6161
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
6262
print_limit = 20
6363
chars_to_train = range(48,58)
64+
n_classes = len(chars_to_train)
6465
columnsXY=range(0,20)
6566
column_str = 'column_sum{}'.format(list(columnsXY))
6667

@@ -107,7 +108,7 @@
107108
S_W = np.zeros((d, d))
108109
for label, mv in zip(unique_labels, mean_vecs):
109110
class_scatter = np.zeros((d, d))
110-
for row in X_train_std[[y_train == label]]:
111+
for row in X_train_std[y_train == label]:
111112
row, mv = row.reshape(d, 1), mv.reshape(d, 1)
112113
class_scatter += (row-mv).dot((row-mv).T)
113114
S_W += class_scatter
@@ -195,7 +196,7 @@
195196
X_test_lda = lda.transform(X_test_std)
196197

197198
from sklearn.linear_model import LogisticRegression
198-
lr = LogisticRegression()
199+
lr = LogisticRegression(solver='liblinear', multi_class='auto')
199200
lr = lr.fit(X_train_lda, y_train)
200201

201202
title = 'Linear Descriminant Analysis Training Set'
@@ -208,13 +209,13 @@
208209

209210
###############################################################################
210211
n_components = 10
211-
lda = LDA(n_components=n_components)
212+
lda = LDA(n_components=min(n_components,n_classes-1))
212213
X_train_lda = lda.fit_transform(X_train_std, y_train)
213214
X_test_lda = lda.transform(X_test_std)
214215

215216
print ('n_components={}'.format(lda.n_components))
216217

217-
lr = LogisticRegression()
218+
lr = LogisticRegression(solver='liblinear', multi_class='auto')
218219
logistic_fitted = lr.fit(X_train_lda, y_train)
219220

220221
from sklearn.metrics import accuracy_score
@@ -233,13 +234,13 @@
233234

234235
###############################################################################
235236
n_components = 10
236-
lda = LDA(n_components=n_components, solver='eigen')
237+
lda = LDA(n_components=n_components-1, solver='eigen')
237238
X_train_lda = lda.fit_transform(X_train_std, y_train)
238239
X_test_lda = lda.transform(X_test_std)
239240

240241
print ('n_components={}'.format(lda.n_components))
241242

242-
lr = LogisticRegression()
243+
lr = LogisticRegression(solver='liblinear', multi_class='auto')
243244
logistic_fitted = lr.fit(X_train_lda, y_train)
244245

245246
from sklearn.metrics import accuracy_score

p154_pca_nonlinear_mapings.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -83,8 +83,7 @@ def rbf_kernel_pca1(X, gamma, n_components):
8383
eigvals, eigvecs = eigh(K)
8484

8585
# Collect the top k eigenvectors (projected samples)
86-
X_pc = np.column_stack((eigvecs[:, -i]
87-
for i in range(1, n_components + 1)))
86+
X_pc = np.column_stack([eigvecs[:, -i] for i in range(1, n_components + 1)])
8887

8988
return X_pc
9089

@@ -269,7 +268,7 @@ def rbf_kernel_pca(X, gamma, n_components):
269268
eigvals, eigvecs = eigh(K)
270269

271270
# Collect the top k eigenvectors (projected samples)
272-
alphas = np.column_stack((eigvecs[:,-i] for i in range(1,n_components+1)))
271+
alphas = np.column_stack([eigvecs[:,-i] for i in range(1,n_components+1)])
273272

274273
# Collect the corresponding eigenvalues
275274
lambdas = [eigvals[-i] for i in range(1,n_components+1)]

0 commit comments

Comments
 (0)