Skip to content
This repository was archived by the owner on Jul 1, 2023. It is now read-only.

Commit 8580a3e

Browse files
authored
Add numerical tests for optimizers (#732)
Add test(SGD|RMSProp|Adam|AdaDelta)Numerical
1 parent 12e281d commit 8580a3e

File tree

1 file changed

+101
-0
lines changed

1 file changed

+101
-0
lines changed

Tests/TensorFlowTests/OptimizerTests.swift

+101
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,103 @@ class OptimizerTests: XCTestCase {
100100
convergenceTest(optimizer: optimizer, model: model)
101101
}
102102

103+
struct ModelNumerical: Differentiable, KeyPathIterable {
104+
var tensor = Tensor<Float>([0, 1, 2])
105+
static let grad = ModelNumerical.TangentVector(tensor: [0.0, 0.1, 0.2])
106+
}
107+
108+
func testSGDNumerical() {
109+
// The expected value was computed using the following Python code:
110+
// ```
111+
// import tensorflow as tf
112+
// var = tf.Variable([0, 1, 2], dtype=tf.float32)
113+
// grad = tf.Variable([0, 0.1, 0.2], dtype=tf.dtypes.float32)
114+
// optimizer = tf.keras.optimizers.SGD()
115+
// optimizer.apply_gradients(list(zip([grad], [var])))
116+
// print(var.read_value())
117+
// for i in range(10):
118+
// optimizer.apply_gradients(list(zip([grad], [var])))
119+
// print(var.read_value())
120+
// ```
121+
var model = ModelNumerical()
122+
let opt = SGD(for: model)
123+
opt.update(&model, along: ModelNumerical.grad)
124+
XCTAssertEqual(model.tensor, [0, 0.999, 1.998])
125+
for _ in 0..<10 {
126+
opt.update(&model, along: ModelNumerical.grad)
127+
}
128+
XCTAssertEqual(model.tensor, [0, 0.98900014, 1.9780003])
129+
}
130+
131+
func testRMSPropNumerical() {
132+
// The expected value was computed using the following Python code:
133+
// ```
134+
// import tensorflow as tf
135+
// var = tf.Variable([0, 1, 2], dtype=tf.float32)
136+
// grad = tf.Variable([0, 0.1, 0.2], dtype=tf.dtypes.float32)
137+
// optimizer = tf.keras.optimizers.RMSProp()
138+
// optimizer.apply_gradients(list(zip([grad], [var])))
139+
// print(var.read_value())
140+
// for i in range(10):
141+
// optimizer.apply_gradients(list(zip([grad], [var])))
142+
// print(var.read_value())
143+
// ```
144+
var model = ModelNumerical()
145+
let opt = RMSProp(for: model, epsilon: 1e-7)
146+
opt.update(&model, along: ModelNumerical.grad)
147+
XCTAssertEqual(model.tensor, [0, 0.99683774, 1.9968377])
148+
for _ in 0..<10 {
149+
opt.update(&model, along: ModelNumerical.grad)
150+
}
151+
XCTAssertEqual(model.tensor, [0, 0.9814604, 1.9814601])
152+
}
153+
154+
func testAdamNumerical() {
155+
// The expected value was computed using the following Python code:
156+
// ```
157+
// import tensorflow as tf
158+
// var = tf.Variable([0, 1, 2], dtype=tf.float32)
159+
// grad = tf.Variable([0, 0.1, 0.2], dtype=tf.dtypes.float32)
160+
// optimizer = tf.keras.optimizers.Adam()
161+
// optimizer.apply_gradients(list(zip([grad], [var])))
162+
// print(var.read_value())
163+
// for i in range(10):
164+
// optimizer.apply_gradients(list(zip([grad], [var])))
165+
// print(var.read_value())
166+
// ```
167+
var model = ModelNumerical()
168+
let opt = Adam(for: model, epsilon: 1e-7)
169+
opt.update(&model, along: ModelNumerical.grad)
170+
XCTAssertEqual(model.tensor, [0, 0.999, 1.9990001])
171+
for _ in 0..<10 {
172+
opt.update(&model, along: ModelNumerical.grad)
173+
}
174+
XCTAssertEqual(model.tensor, [0, 0.98900014, 1.9889997])
175+
}
176+
177+
func testAdaDeltaNumerical() {
178+
// The expected value was computed using the following Python code:
179+
// ```
180+
// import tensorflow as tf
181+
// var = tf.Variable([0, 1, 2], dtype=tf.float32)
182+
// grad = tf.Variable([0, 0.1, 0.2], dtype=tf.dtypes.float32)
183+
// optimizer = tf.keras.optimizers.Adadelta()
184+
// optimizer.apply_gradients(list(zip([grad], [var])))
185+
// print(var.read_value())
186+
// for i in range(10):
187+
// optimizer.apply_gradients(list(zip([grad], [var])))
188+
// print(var.read_value())
189+
// ```
190+
var model = ModelNumerical()
191+
let opt = AdaDelta(for: model, learningRate: 1e-3, epsilon: 1e-7)
192+
opt.update(&model, along: ModelNumerical.grad)
193+
XCTAssertEqual(model.tensor, [0, 0.99999857, 1.9999986])
194+
for _ in 0..<10 {
195+
opt.update(&model, along: ModelNumerical.grad)
196+
}
197+
XCTAssertEqual(model.tensor, [0, 0.99998385, 1.9999841])
198+
}
199+
103200
static var allTests = [
104201
("testSGD", testSGD),
105202
("testRMSProp", testRMSProp),
@@ -109,5 +206,9 @@ class OptimizerTests: XCTestCase {
109206
("testAdaMax", testAdaMax),
110207
("testAMSGrad", testAMSGrad),
111208
("testRAdam", testRAdam),
209+
("testSGDNumerical", testSGDNumerical),
210+
("testRMSPropNumerical", testRMSPropNumerical),
211+
("testAdamNumerical", testAdamNumerical),
212+
("testAdaDeltaNumerical", testAdaDeltaNumerical),
112213
]
113214
}

0 commit comments

Comments
 (0)