Skip to content
This repository was archived by the owner on Jul 1, 2023. It is now read-only.

Commit da8c8d6

Browse files
authored
Fix deprecation warnings for method-style differential operators. (#577)
Missing backslash in multiline string.
1 parent efb4596 commit da8c8d6

File tree

5 files changed

+18
-18
lines changed

5 files changed

+18
-18
lines changed

Sources/TensorFlow/Core/DifferentialOperators.swift

+4-4
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818

1919
public extension Differentiable {
2020
@available(*, deprecated, message: """
21-
Method-style differential operators are deprecated and will be removed; use top-level
21+
Method-style differential operators are deprecated and will be removed; use top-level \
2222
function 'TensorFlow.gradient(at:in:)' instead
2323
""")
2424
@inlinable
@@ -29,7 +29,7 @@ public extension Differentiable {
2929
}
3030

3131
@available(*, deprecated, message: """
32-
Method-style differential operators are deprecated and will be removed; use top-level
32+
Method-style differential operators are deprecated and will be removed; use top-level \
3333
function 'TensorFlow.valueWithGradient(at:in:)' instead
3434
""")
3535
@inlinable
@@ -45,7 +45,7 @@ public extension Differentiable {
4545
}
4646

4747
@available(*, deprecated, message: """
48-
Method-style differential operators are deprecated and will be removed; use top-level
48+
Method-style differential operators are deprecated and will be removed; use top-level \
4949
function 'TensorFlow.gradient(at:_:in:)' instead
5050
""")
5151
@inlinable
@@ -57,7 +57,7 @@ public extension Differentiable {
5757
}
5858

5959
@available(*, deprecated, message: """
60-
Method-style differential operators are deprecated and will be removed; use top-level
60+
Method-style differential operators are deprecated and will be removed; use top-level \
6161
function 'TensorFlow.valueWithGradient(at:_:in:)' instead
6262
""")
6363
@inlinable

Tests/TensorFlowTests/LayerTests.swift

+4-4
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ final class LayerTests: XCTestCase {
4848
expected: y)
4949
withTensorLeakChecking {
5050
for _ in 0..<10 {
51-
let 𝛁model = model.gradient { model -> Tensor<Float> in
51+
let 𝛁model = gradient(at: model) { model -> Tensor<Float> in
5252
meanSquaredError(
5353
predicted: model(x).squeezingShape(at: 1),
5454
expected: y)
@@ -1142,7 +1142,7 @@ final class LayerTests: XCTestCase {
11421142
let rnn = RNN(SimpleRNNCell<Float>(inputSize: 4, hiddenSize: 4,
11431143
seed: (0xFeed, 0xBeef)))
11441144
withTensorLeakChecking {
1145-
let (outputs, _) = rnn.valueWithPullback(at: inputs) { rnn, inputs in
1145+
let (outputs, _) = valueWithPullback(at: rnn, inputs) { rnn, inputs in
11461146
return rnn(inputs)
11471147
}
11481148
assertEqual(
@@ -1173,7 +1173,7 @@ final class LayerTests: XCTestCase {
11731173
let inputs: [Tensor<Float>] = Array(repeating: x, count: 4)
11741174
let rnn = RNN(LSTMCell<Float>(inputSize: 4, hiddenSize: 4))
11751175
withTensorLeakChecking {
1176-
let (outputs, _) = rnn.valueWithPullback(at: inputs) { rnn, inputs in
1176+
let (outputs, _) = valueWithPullback(at: rnn, inputs) { rnn, inputs in
11771177
return rnn(inputs)
11781178
}
11791179
assertEqual(
@@ -1204,7 +1204,7 @@ final class LayerTests: XCTestCase {
12041204
biasInitializer: zeros())
12051205
)
12061206
withTensorLeakChecking {
1207-
let (outputs, _) = rnn.valueWithPullback(at: inputs) { rnn, inputs in
1207+
let (outputs, _) = valueWithPullback(at: rnn, inputs) { rnn, inputs in
12081208
return rnn(inputs)
12091209
}
12101210
XCTAssertEqual(outputs.map { $0.hidden },

Tests/TensorFlowTests/OperatorTests/BasicTests.swift

+8-8
Original file line numberDiff line numberDiff line change
@@ -76,8 +76,8 @@ final class BasicOperatorTests: XCTestCase {
7676
func testVJPPadded() {
7777
let x = Tensor<Float>(ones: [3, 2])
7878
let target = Tensor<Float>([[2, 2], [2, 2], [2, 2]])
79-
let grads = x.gradient { a -> Tensor<Float> in
80-
let paddedTensor = a.padded(forSizes: [(1, 0), (0, 1)], with: 3.0)
79+
let grads = gradient(at: x) { x -> Tensor<Float> in
80+
let paddedTensor = x.padded(forSizes: [(1, 0), (0, 1)], with: 3.0)
8181
return (paddedTensor * paddedTensor).sum()
8282
}
8383
XCTAssertEqual(grads, target)
@@ -86,8 +86,8 @@ final class BasicOperatorTests: XCTestCase {
8686
func testVJPPaddedConstant() {
8787
let x = Tensor<Float>(ones: [3, 2])
8888
let target = Tensor<Float>([[2, 2], [2, 2], [2, 2]])
89-
let grads = x.gradient { a -> Tensor<Float> in
90-
let paddedTensor = a.padded(forSizes: [(1, 0), (0, 1)], mode: .constant(3.0))
89+
let grads = gradient(at: x) { x -> Tensor<Float> in
90+
let paddedTensor = x.padded(forSizes: [(1, 0), (0, 1)], mode: .constant(3.0))
9191
return (paddedTensor * paddedTensor).sum()
9292
}
9393
XCTAssertEqual(grads, target)
@@ -96,8 +96,8 @@ final class BasicOperatorTests: XCTestCase {
9696
func testVJPPaddedReflect() {
9797
let x = Tensor<Float>([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
9898
let target = Tensor<Float>([[4, 8, 6], [32, 40, 24], [56, 64, 36]])
99-
let grads = x.gradient { a -> Tensor<Float> in
100-
let paddedTensor = a.padded(forSizes: [(2, 0), (0, 2)], mode: .reflect)
99+
let grads = gradient(at: x) { x -> Tensor<Float> in
100+
let paddedTensor = x.padded(forSizes: [(2, 0), (0, 2)], mode: .reflect)
101101
return (paddedTensor * paddedTensor).sum()
102102
}
103103
XCTAssertEqual(grads, target)
@@ -106,8 +106,8 @@ final class BasicOperatorTests: XCTestCase {
106106
func testVJPPaddedSymmetric() {
107107
let x = Tensor<Float>([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
108108
let target = Tensor<Float>([[4, 16, 24], [16, 40, 48], [14, 32, 36]])
109-
let grads = x.gradient { a -> Tensor<Float> in
110-
let paddedTensor = a.padded(forSizes: [(2, 0), (0, 2)], mode: .symmetric)
109+
let grads = gradient(at: x) { x -> Tensor<Float> in
110+
let paddedTensor = x.padded(forSizes: [(2, 0), (0, 2)], mode: .symmetric)
111111
return (paddedTensor * paddedTensor).sum()
112112
}
113113
XCTAssertEqual(grads, target)

Tests/TensorFlowTests/SequentialTests.swift

+1-1
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ final class SequentialTests: XCTestCase {
4848
Context.local.learningPhase = .training
4949
withTensorLeakChecking {
5050
for _ in 0..<1000 {
51-
let 𝛁model = model.gradient { model -> Tensor<Float> in
51+
let 𝛁model = gradient(at: model) { model -> Tensor<Float> in
5252
let ŷ = model(x)
5353
return meanSquaredError(predicted: ŷ, expected: y)
5454
}

Tests/TensorFlowTests/TrivialModelTests.swift

+1-1
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ final class TrivialModelTests: XCTestCase {
4545
Context.local.learningPhase = .training
4646
withTensorLeakChecking {
4747
for _ in 0..<3000 {
48-
let 𝛁model = classifier.gradient { classifier -> Tensor<Float> in
48+
let 𝛁model = gradient(at: classifier) { classifier -> Tensor<Float> in
4949
let ŷ = classifier(x)
5050
return meanSquaredError(predicted: ŷ, expected: y)
5151
}

0 commit comments

Comments
 (0)