Skip to content
This repository was archived by the owner on Mar 30, 2022. It is now read-only.

Commit 19c2fca

Browse files
neild0rxwei
authored andcommitted
Updated technical documents to use the new Layer protocol. (#189)
1 parent 3f4f2de commit 19c2fca

3 files changed

+15
-15
lines changed

docs/DifferentiableFunctions.md

+4-4
Original file line numberDiff line numberDiff line change
@@ -231,7 +231,7 @@ specify the same attribute. This enables generic code using differentiation
231231
defined in terms of protocol requirements.
232232

233233
Here is an example of a neural network `Layer` protocol that defines a
234-
`@differentiable` method called `applied(to:)`. As shown, the `applied(to:)`
234+
`@differentiable` method called `call(_:)`. As shown, the `call(_:)`
235235
method can be differentiated in a `Layer` protocol extension, even though it is
236236
not a concrete method.
237237

@@ -246,7 +246,7 @@ protocol Layer: Differentiable {
246246
associatedtype Output: Differentiable
247247
/// Returns the output obtained from applying the layer to the given input.
248248
@differentiable
249-
func applied(to input: Input) -> Output
249+
func call(_ input: Input) -> Output
250250
}
251251

252252
extension Layer {
@@ -262,7 +262,7 @@ extension Layer {
262262
backpropagator: (_ direction: Output.CotangentVector)
263263
-> (layerGradient: CotangentVector, inputGradient: Input.CotangentVector)) {
264264
let (out, pullback) = valueWithPullback(at: input) { layer, input in
265-
return layer.applied(to: input)
265+
return layer(input)
266266
}
267267
return (out, pullback)
268268
}
@@ -274,7 +274,7 @@ struct DenseLayer: Layer {
274274
var bias: Tensor<Float>
275275

276276
@differentiable
277-
func applied(to input: Tensor<Float>) -> Tensor<Float> {
277+
func call(_ input: Tensor<Float>) -> Tensor<Float> {
278278
return matmul(input, weight) + bias
279279
}
280280
}

docs/DifferentiableTypes.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -77,14 +77,14 @@ struct DenseLayer: Differentiable {
7777
// The compiler synthesizes all `Differentiable` protocol requirements, adding only properties
7878
// not marked with `@noDerivative` to associated tangent space types.
7979

80-
func applied(to input: Tensor<Float>) -> Tensor<Float> {
80+
func call(_ input: Tensor<Float>) -> Tensor<Float> {
8181
return matmul(input, weight) + bias
8282
}
8383
}
8484

8585
// Differential operators like `gradient(at:in:)` just work!
8686
let dense = DenseLayer(weight: [[1, 1], [1, 1]], bias: [0, 0])
87-
let 𝛁dense = gradient(at: dense) { dense in dense.applied(to: [[3, 3]]).sum() }
87+
let 𝛁dense = gradient(at: dense) { dense in dense([[3, 3]]).sum() }
8888

8989
dump(𝛁dense)
9090
// ▿ DenseLayer.AllDifferentiableVariables

docs/ParameterOptimization.md

+9-9
Original file line numberDiff line numberDiff line change
@@ -20,15 +20,15 @@ struct MyMLModel {
2020
var weight1, weight2: Tensor<Float>
2121
var bias1, bias2: Tensor<Float>
2222

23-
func applied(to input: Tensor<Float>) {
23+
func call(_ input: Tensor<Float>) {
2424
let h = relu(input weight1 + bias1)
2525
return sigmoid(h weight2 + bias2)
2626
}
2727
}
2828

2929
let model = MyMLModel(...)
3030
let input = Tensor<Float>([0.2, 0.4])
31-
print(model.applied(to: input))
31+
print(model(input))
3232
```
3333

3434
Here are some additional rules about models and parameters:
@@ -187,7 +187,7 @@ struct DenseLayer: KeyPathIterable {
187187
var bias: Tensor<Float>
188188
var activation: (Tensor<Float>) -> (Tensor<Float>) = relu
189189

190-
func applied(to input: Tensor<Float>) -> Tensor<Float> {
190+
func call(_ input: Tensor<Float>) -> Tensor<Float> {
191191
return activation(matmul(input, weight) + bias)
192192
}
193193

@@ -325,7 +325,7 @@ struct DenseLayer: KeyPathIterable, Differentiable {
325325
@noDerivative var activation: @differentiable (Tensor<Float>) -> Tensor<Float> = relu
326326

327327
@differentiable
328-
func applied(to input: Tensor<Float>) -> Tensor<Float> {
328+
func call(_ input: Tensor<Float>) -> Tensor<Float> {
329329
return activation(matmul(input, weight) + bias)
330330
}
331331
}
@@ -368,7 +368,7 @@ class SGD<Model, Scalar: TensorFlowFloatingPoint>
368368
// Example optimizer usage.
369369
var dense = DenseLayer(weight: [[1, 1], [1, 1]], bias: [1, 1])
370370
let input = Tensor<Float>(ones: [2, 2])
371-
let 𝛁dense = dense.gradient { dense in dense.applied(to: input) }
371+
let 𝛁dense = dense.gradient { dense in dense(input) }
372372

373373
let optimizer = SGD<DenseLayer, Float>()
374374
optimizer.update(&dense.allDifferentiableVariables, with: 𝛁dense)
@@ -472,9 +472,9 @@ struct Classifier: Layer {
472472
l2 = Dense<Float>(inputSize: hiddenSize, outputSize: 1, activation: relu)
473473
}
474474
@differentiable
475-
func applied(to input: Tensor<Float>) -> Tensor<Float> {
476-
let h1 = l1.applied(to: input)
477-
return l2.applied(to: h1)
475+
func call(_ input: Tensor<Float>) -> Tensor<Float> {
476+
let h1 = l1(input)
477+
return l2(h1)
478478
}
479479
}
480480
var classifier = Classifier(hiddenSize: 4)
@@ -484,7 +484,7 @@ let y: Tensor<Float> = [[0], [1], [1], [0]]
484484

485485
for _ in 0..<3000 {
486486
let 𝛁model = classifier.gradient { classifier -> Tensor<Float> in
487-
let ŷ = classifier.applied(to: x)
487+
let ŷ = classifier(x)
488488
return meanSquaredError(predicted: ŷ, expected: y)
489489
}
490490
// Parameter optimization here!

0 commit comments

Comments
 (0)