|
61 | 61 | from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
|
62 | 62 | print_limit = 20
|
63 | 63 | chars_to_train = range(48,58)
|
| 64 | +n_classes = len(chars_to_train) |
64 | 65 | columnsXY=range(0,20)
|
65 | 66 | column_str = 'column_sum{}'.format(list(columnsXY))
|
66 | 67 |
|
|
107 | 108 | S_W = np.zeros((d, d))
|
108 | 109 | for label, mv in zip(unique_labels, mean_vecs):
|
109 | 110 | class_scatter = np.zeros((d, d))
|
110 |
| - for row in X_train_std[[y_train == label]]: |
| 111 | + for row in X_train_std[y_train == label]: |
111 | 112 | row, mv = row.reshape(d, 1), mv.reshape(d, 1)
|
112 | 113 | class_scatter += (row-mv).dot((row-mv).T)
|
113 | 114 | S_W += class_scatter
|
|
195 | 196 | X_test_lda = lda.transform(X_test_std)
|
196 | 197 |
|
197 | 198 | from sklearn.linear_model import LogisticRegression
|
198 |
| -lr = LogisticRegression() |
| 199 | +lr = LogisticRegression(solver='liblinear', multi_class='auto') |
199 | 200 | lr = lr.fit(X_train_lda, y_train)
|
200 | 201 |
|
201 | 202 | title = 'Linear Descriminant Analysis Training Set'
|
|
208 | 209 |
|
209 | 210 | ###############################################################################
|
210 | 211 | n_components = 10
|
211 |
| -lda = LDA(n_components=n_components) |
| 212 | +lda = LDA(n_components=min(n_components,n_classes-1)) |
212 | 213 | X_train_lda = lda.fit_transform(X_train_std, y_train)
|
213 | 214 | X_test_lda = lda.transform(X_test_std)
|
214 | 215 |
|
215 | 216 | print ('n_components={}'.format(lda.n_components))
|
216 | 217 |
|
217 |
| -lr = LogisticRegression() |
| 218 | +lr = LogisticRegression(solver='liblinear', multi_class='auto') |
218 | 219 | logistic_fitted = lr.fit(X_train_lda, y_train)
|
219 | 220 |
|
220 | 221 | from sklearn.metrics import accuracy_score
|
|
233 | 234 |
|
234 | 235 | ###############################################################################
|
235 | 236 | n_components = 10
|
236 |
| -lda = LDA(n_components=n_components, solver='eigen') |
| 237 | +lda = LDA(n_components=n_components-1, solver='eigen') |
237 | 238 | X_train_lda = lda.fit_transform(X_train_std, y_train)
|
238 | 239 | X_test_lda = lda.transform(X_test_std)
|
239 | 240 |
|
240 | 241 | print ('n_components={}'.format(lda.n_components))
|
241 | 242 |
|
242 |
| -lr = LogisticRegression() |
| 243 | +lr = LogisticRegression(solver='liblinear', multi_class='auto') |
243 | 244 | logistic_fitted = lr.fit(X_train_lda, y_train)
|
244 | 245 |
|
245 | 246 | from sklearn.metrics import accuracy_score
|
|
0 commit comments