Skip to content

Commit ec5029a

Browse files
committed
Remove random printouts so run to run output is the same.
1 parent 89f18a8 commit ec5029a

6 files changed

+11
-13
lines changed

p115_l1_l2_regularization.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@
6969

7070
from sklearn.linear_model import LogisticRegression
7171

72-
lr = LogisticRegression(penalty='l1', C=0.1)
72+
lr = LogisticRegression(penalty='l1', C=0.1, random_state=0)
7373
lr.fit(X_train_std, y_train)
7474
print('Training accuracy-l1 regularization:', lr.score(X_train_std, y_train))
7575
print('Test accuracy-l1 regularization:', lr.score(X_test_std, y_test))
@@ -79,7 +79,7 @@
7979
print('\t{}'.format(lr.coef_))
8080

8181

82-
lr = LogisticRegression(penalty='l2', C=0.1)
82+
lr = LogisticRegression(penalty='l2', C=0.1, random_state=0)
8383
lr.fit(X_train_std, y_train)
8484
print('Training accuracy-l2 regularization:', lr.score(X_train_std, y_train))
8585
print('Test accuracy-l2 regularization:', lr.score(X_test_std, y_test))

p186_grid_search.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@
6161
gs = gs.fit(X_train, y_train)
6262

6363
print('Support Vector Machine Grid Search best score: {}'.format(gs.best_score_))
64-
print('Support Vector Machine Grid Search best params: {}'.format(gs.best_params_))
64+
print('Support Vector Machine Grid Search best params: {}'.format(sorted(gs.best_params_.items())))
6565

6666
clf = gs.best_estimator_
6767
clf.fit(X_train, y_train)

p189_nested_cross_validation.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -85,8 +85,7 @@
8585

8686
gs = gs.fit(X_train, y_train)
8787
print('Support Vector Machine Grid Search best score: {}'.format(gs.best_score_))
88-
print('Support Vector Machine Grid Search best params: {}\n'.format(gs.best_params_))
89-
88+
print('Support Vector Machine Grid Search best params: {}'.format(sorted(gs.best_params_.items())))
9089
from sklearn.tree import DecisionTreeClassifier
9190
gs = GridSearchCV(estimator=DecisionTreeClassifier(random_state=0),
9291
param_grid=[{'max_depth': [1, 2, 3, 4, 5, 6, 7, None]}],

p193_model_precision_recall.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,5 @@
9393
n_jobs=-1)
9494
gs = gs.fit(X_train, y_train)
9595
print('\nGrid Search f1 scoring best score: {}'.format(gs.best_score_))
96-
print('Grid Search f1 scoring best params: {}'.format(gs.best_params_))
97-
96+
print('Grid Search f1 scoring best params: {}'.format(sorted(gs.best_params_.items())))
9897
print ('\n########################### No Errors ####################################')

p206_majority_vote_classifier.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@
4848
from sklearn.base import clone
4949
from sklearn.pipeline import _name_estimators
5050
import numpy as np
51-
import ocr_utils
51+
import ocr_utils
5252
from sklearn.cross_validation import train_test_split
5353
from sklearn.cross_validation import cross_val_score
5454
from sklearn.linear_model import LogisticRegression
@@ -326,9 +326,9 @@ def get_params(self, deep=True):
326326
grid.fit(X_train, y_train)
327327

328328
for params, mean_score, scores in grid.grid_scores_:
329-
print("%0.3f+/-%0.2f %r"
330-
% (mean_score, scores.std() / 2, params))
331-
print('\nBest parameters: %s' % grid.best_params_)
332-
print('Best Accuracy: %.2f' % grid.best_score_)
329+
print("%0.6f+/-%0.6f %r"
330+
% (mean_score, scores.std() / 2, sorted(params.items())))
331+
print('\nBest parameters: %s' % sorted(grid.best_params_.items()))
332+
print('Best Accuracy: %.6f' % grid.best_score_)
333333

334334
print ('\n########################### No Errors ####################################')

p44_adaline_sgd.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@ def predict(self, X):
163163
X_std = np.copy(X)
164164
X_std[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std()
165165
X_std[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std()
166-
ada = AdalineSGD(n_iter=15, eta=0.01)
166+
ada = AdalineSGD(n_iter=15, eta=0.01, random_state = 1)
167167
ada.fit(X_std, y)
168168

169169
ocr_utils.plot_decision_regions(X=X_std,

0 commit comments

Comments
 (0)