@@ -20,15 +20,15 @@ struct MyMLModel {
20
20
var weight1, weight2: Tensor< Float >
21
21
var bias1, bias2: Tensor< Float >
22
22
23
- func applied ( to input : Tensor<Float >) {
23
+ func call ( _ input : Tensor<Float >) {
24
24
let h = relu (input • weight1 + bias1)
25
25
return sigmoid (h • weight2 + bias2)
26
26
}
27
27
}
28
28
29
29
let model = MyMLModel (... )
30
30
let input = Tensor< Float > ([0.2 , 0.4 ])
31
- print (model. applied ( to : input))
31
+ print (model ( input))
32
32
```
33
33
34
34
Here are some additional rules about models and parameters:
@@ -187,7 +187,7 @@ struct DenseLayer: KeyPathIterable {
187
187
var bias: Tensor<Float >
188
188
var activation: (Tensor<Float >) -> (Tensor<Float >) = relu
189
189
190
- func applied ( to input : Tensor<Float >) -> Tensor<Float > {
190
+ func call ( _ input : Tensor<Float >) -> Tensor<Float > {
191
191
return activation (matmul (input, weight) + bias)
192
192
}
193
193
@@ -325,7 +325,7 @@ struct DenseLayer: KeyPathIterable, Differentiable {
325
325
@noDerivative var activation: @differentiable (Tensor<Float >) -> Tensor<Float > = relu
326
326
327
327
@differentiable
328
- func applied ( to input : Tensor<Float >) -> Tensor<Float > {
328
+ func call ( _ input : Tensor<Float >) -> Tensor<Float > {
329
329
return activation (matmul (input, weight) + bias)
330
330
}
331
331
}
@@ -368,7 +368,7 @@ class SGD<Model, Scalar: TensorFlowFloatingPoint>
368
368
// Example optimizer usage.
369
369
var dense = DenseLayer (weight : [[1 , 1 ], [1 , 1 ]], bias : [1 , 1 ])
370
370
let input = Tensor< Float > (ones : [2 , 2 ])
371
- let 𝛁dense = dense.gradient { dense in dense. applied ( to : input) }
371
+ let 𝛁dense = dense.gradient { dense in dense ( input) }
372
372
373
373
let optimizer = SGD< DenseLayer, Float > ()
374
374
optimizer.update (& dense.allDifferentiableVariables , with : 𝛁dense)
@@ -472,9 +472,9 @@ struct Classifier: Layer {
472
472
l2 = Dense< Float > (inputSize : hiddenSize, outputSize : 1 , activation : relu)
473
473
}
474
474
@differentiable
475
- func applied ( to input : Tensor<Float >) -> Tensor<Float > {
476
- let h1 = l1. applied ( to : input)
477
- return l2. applied ( to : h1)
475
+ func call ( _ input : Tensor<Float >) -> Tensor<Float > {
476
+ let h1 = l1 ( input)
477
+ return l2 ( h1)
478
478
}
479
479
}
480
480
var classifier = Classifier (hiddenSize : 4 )
@@ -484,7 +484,7 @@ let y: Tensor<Float> = [[0], [1], [1], [0]]
484
484
485
485
for _ in 0 ..< 3000 {
486
486
let 𝛁model = classifier.gradient { classifier -> Tensor< Float > in
487
- let ŷ = classifier. applied ( to : x)
487
+ let ŷ = classifier ( x)
488
488
return meanSquaredError (predicted : ŷ, expected : y)
489
489
}
490
490
// Parameter optimization here!
0 commit comments