Skip to content

Commit 08c6802

Browse files
committed
Remove extra lines and fix lint errors
1 parent 7f759a6 commit 08c6802

File tree

3 files changed

+10
-12
lines changed

3 files changed

+10
-12
lines changed

lib/node_modules/@stdlib/ml/online-binary-classification/test/test.validate.js

-1
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,6 @@ tape( 'the function returns an error if provided an options argument which is no
3737
t.end();
3838
});
3939

40-
4140
tape( 'the function returns an error if provided a `learningRate` option which is not a string', function test( t ) {
4241
var values;
4342
var err;

lib/node_modules/@stdlib/ml/online-sgd-regression/test/test.loss.huber.js

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
/* eslint-disable no-underscore-dangle */
12
'use strict';
23

34
// MODULES //
@@ -27,7 +28,7 @@ tape( 'the sub-gradient of the squared-error loss times the learning rate is add
2728
lambda = 0.0;
2829

2930
weights = new WeightVector( 3, false );
30-
weights.add( [1.0,2.0,3.0] );
31+
weights.add( [ 1.0, 2.0, 3.0 ] );
3132
epsilon = 0.1;
3233
eta = 0.02;
3334

@@ -43,7 +44,6 @@ tape( 'the sub-gradient of the squared-error loss times the learning rate is add
4344
t.end();
4445
});
4546

46-
4747
tape( 'the sub-gradient of the linear loss times the learning rate is added to the weights for absolute errors greater or equal than epsilon (no regularization)', function test( t ) {
4848
var expected;
4949
var weights;
@@ -56,7 +56,7 @@ tape( 'the sub-gradient of the linear loss times the learning rate is added to t
5656
lambda = 0.0;
5757

5858
weights = new WeightVector( 3, false );
59-
weights.add( [1.0,2.0,3.0] );
59+
weights.add( [ 1.0, 2.0, 3.0 ] );
6060
epsilon = 0.1;
6161
eta = 0.02;
6262

lib/node_modules/@stdlib/ml/online-sgd-regression/test/test.validate.js

+7-8
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ tape( 'the function returns an error if provided an options argument which is no
2626
undefined,
2727
null,
2828
NaN,
29-
function(){},
29+
function noop() {},
3030
[]
3131
];
3232

@@ -37,7 +37,6 @@ tape( 'the function returns an error if provided an options argument which is no
3737
t.end();
3838
});
3939

40-
4140
tape( 'the function returns an error if provided a `learningRate` option which is not a string', function test( t ) {
4241
var values;
4342
var err;
@@ -50,7 +49,7 @@ tape( 'the function returns an error if provided a `learningRate` option which i
5049
NaN,
5150
[],
5251
{},
53-
function(){}
52+
function noop() {}
5453
];
5554

5655
for ( i = 0; i < values.length; i++ ) {
@@ -74,7 +73,7 @@ tape( 'the function returns an error if provided a `loss` option which is not a
7473
NaN,
7574
[],
7675
{},
77-
function(){}
76+
function noop() {}
7877
];
7978

8079
for ( i = 0; i < values.length; i++ ) {
@@ -99,7 +98,7 @@ tape( 'the function returns an error if provided an `intercept` option which is
9998
NaN,
10099
[],
101100
{},
102-
function(){}
101+
function noop() {}
103102
];
104103

105104
for ( i = 0; i < values.length; i++ ) {
@@ -125,7 +124,7 @@ tape( 'the function returns an error if provided an `eta0` option which is not a
125124
NaN,
126125
[],
127126
{},
128-
function(){}
127+
function noop() {}
129128
];
130129

131130
for ( i = 0; i < values.length; i++ ) {
@@ -151,7 +150,7 @@ tape( 'the function returns an error if provided an `epsilon` option which is no
151150
NaN,
152151
[],
153152
{},
154-
function(){}
153+
function noop() {}
155154
];
156155

157156
for ( i = 0; i < values.length; i++ ) {
@@ -176,7 +175,7 @@ tape( 'the function returns an error if provided an `lambda` option which is not
176175
NaN,
177176
[],
178177
{},
179-
function(){}
178+
function noop() {}
180179
];
181180

182181
for ( i = 0; i < values.length; i++ ) {

0 commit comments

Comments
 (0)