Skip to content
This repository was archived by the owner on Jul 1, 2023. It is now read-only.

Commit bb62115

Browse files
asuhandan-zheng
andauthored
Auto-format via swift-format (#870)
Co-authored-by: Dan Zheng <danielzheng@google.com>
1 parent 38608ef commit bb62115

31 files changed

+404
-384
lines changed

Package.swift

+1-1
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ import PackageDescription
2020
let package = Package(
2121
name: "TensorFlow",
2222
platforms: [
23-
.macOS(.v10_13),
23+
.macOS(.v10_13)
2424
],
2525
products: [
2626
.library(

Sources/TensorFlow/Core/MixedPrecision.swift

+1-1
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,7 @@ extension Tensor {
157157
}
158158

159159
/// Promotes a scalar to a tensor with the same device and precision as the given tensor.
160-
@differentiable( where Scalar: TensorFlowFloatingPoint)
160+
@differentiable(where Scalar: TensorFlowFloatingPoint)
161161
public init(_ value: Scalar, deviceAndPrecisionLike tensor: Tensor) {
162162
let device = tensor.device
163163
let tmp = Tensor(value, on: device)

Sources/TensorFlow/Core/Runtime.swift

+21-21
Original file line numberDiff line numberDiff line change
@@ -347,33 +347,33 @@ extension _ExecutionContext {
347347
}
348348
}
349349

350-
internal func _trace<In: TensorGroup, Out: TensorGroup>(_ fn: (In) -> Out) -> TFFunction {
351-
let useLazyTensor = _ThreadLocalState.useLazyTensor
352-
defer { _ThreadLocalState.useLazyTensor = useLazyTensor }
353-
_ThreadLocalState.useLazyTensor = true
354-
let trace = LazyTensorTraceBuilder.trace(fn)
355-
return TFFunction(trace: trace)
356-
}
350+
internal func _trace<In: TensorGroup, Out: TensorGroup>(_ fn: (In) -> Out) -> TFFunction {
351+
let useLazyTensor = _ThreadLocalState.useLazyTensor
352+
defer { _ThreadLocalState.useLazyTensor = useLazyTensor }
353+
_ThreadLocalState.useLazyTensor = true
354+
let trace = LazyTensorTraceBuilder.trace(fn)
355+
return TFFunction(trace: trace)
356+
}
357357

358-
// Trace the given function to generate a TF graph and return a closure that can be used to launch
359-
// the graph.
360-
public func _graph<In: TensorGroup, Out: TensorGroup>(
361-
_ fn: (In) -> Out,
362-
useXLA: Bool = false
363-
) -> (In) -> Out {
364-
let tffunc = _trace(fn)
365-
return { input in
366-
let inputHandles = input._tensorHandles.map { $0._tfeTensorHandle }
367-
let outputHandles = tffunc.execute(inputHandles, usingXLA: useXLA)
368-
return Out(_handles: outputHandles)
369-
}
358+
// Trace the given function to generate a TF graph and return a closure that can be used to launch
359+
// the graph.
360+
public func _graph<In: TensorGroup, Out: TensorGroup>(
361+
_ fn: (In) -> Out,
362+
useXLA: Bool = false
363+
) -> (In) -> Out {
364+
let tffunc = _trace(fn)
365+
return { input in
366+
let inputHandles = input._tensorHandles.map { $0._tfeTensorHandle }
367+
let outputHandles = tffunc.execute(inputHandles, usingXLA: useXLA)
368+
return Out(_handles: outputHandles)
370369
}
370+
}
371371

372372
/// Trace the given function and return the name of the corresponding `TF_Function: In -> Out` that
373373
/// was created.
374374
public func _tffunc<In: TensorGroup, Out: TensorGroup>(_ fn: (In) -> Out) -> String {
375-
let tffunc = _trace(fn)
376-
return tffunc.name
375+
let tffunc = _trace(fn)
376+
return tffunc.name
377377
}
378378

379379
extension _ExecutionContext {

Sources/TensorFlow/Core/Tensor.swift

+7-7
Original file line numberDiff line numberDiff line change
@@ -131,7 +131,7 @@ extension Tensor {
131131
/// Reshape to scalar.
132132
/// - Precondition: The tensor has exactly one scalar.
133133
@inlinable
134-
@differentiable( where Scalar: TensorFlowFloatingPoint)
134+
@differentiable(where Scalar: TensorFlowFloatingPoint)
135135
public func scalarized() -> Scalar {
136136
precondition(
137137
shape.contiguousSize == 1,
@@ -174,7 +174,7 @@ extension Tensor {
174174
return handle.makeHostCopy()
175175
}
176176

177-
@differentiable( where Scalar: TensorFlowFloatingPoint)
177+
@differentiable(where Scalar: TensorFlowFloatingPoint)
178178
public var scalars: [Scalar] {
179179
#if USING_X10_BACKEND
180180
if handle.backend == .XLA {
@@ -205,7 +205,7 @@ extension Tensor where Scalar: TensorFlowFloatingPoint {
205205

206206
extension Tensor {
207207
/// Creates a 0-D tensor from a scalar value.
208-
@differentiable( where Scalar: TensorFlowFloatingPoint)
208+
@differentiable(where Scalar: TensorFlowFloatingPoint)
209209
public init(_ value: Scalar, on device: Device = .default) {
210210
#if USING_X10_BACKEND
211211
switch device.backend {
@@ -233,7 +233,7 @@ extension Tensor where Scalar: TensorFlowFloatingPoint {
233233
extension Tensor {
234234
/// Creates a 1D tensor from scalars.
235235
@inlinable
236-
@differentiable( where Scalar: TensorFlowFloatingPoint)
236+
@differentiable(where Scalar: TensorFlowFloatingPoint)
237237
public init(_ scalars: [Scalar], on device: Device = .default) {
238238
self.init(shape: [scalars.count], scalars: scalars, on: device)
239239
}
@@ -266,7 +266,7 @@ extension Tensor {
266266
/// - scalars: The scalar contents of the tensor.
267267
/// - Precondition: The product of the dimensions of the shape must equal the number of scalars.
268268
@inlinable
269-
@differentiable( where Scalar: TensorFlowFloatingPoint)
269+
@differentiable(where Scalar: TensorFlowFloatingPoint)
270270
public init(shape: TensorShape, scalars: [Scalar], on device: Device = .default) {
271271
precondition(
272272
shape.contiguousSize == scalars.count,
@@ -677,15 +677,15 @@ extension Tensor: AdditiveArithmetic where Scalar: Numeric {
677677
/// Adds two tensors and produces their sum.
678678
/// - Note: `+` supports broadcasting.
679679
@inlinable
680-
@differentiable( where Scalar: TensorFlowFloatingPoint)
680+
@differentiable(where Scalar: TensorFlowFloatingPoint)
681681
public static func + (lhs: Tensor, rhs: Tensor) -> Tensor {
682682
_Raw.addV2(lhs, rhs)
683683
}
684684

685685
/// Subtracts one tensor from another and produces their difference.
686686
/// - Note: `-` supports broadcasting.
687687
@inlinable
688-
@differentiable( where Scalar: TensorFlowFloatingPoint)
688+
@differentiable(where Scalar: TensorFlowFloatingPoint)
689689
public static func - (lhs: Tensor, rhs: Tensor) -> Tensor {
690690
_Raw.sub(lhs, rhs)
691691
}

Sources/TensorFlow/Core/TensorHandle.swift

+7-7
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ public class TFETensorHandle: _AnyTensorHandle {
8282
}
8383
}
8484

85-
public var backend: Device.Backend { .TF_EAGER }
85+
public var backend: Device.Backend { .TF_EAGER }
8686
}
8787

8888
/// `TensorHandle` is the type used by ops. It includes a `Scalar` type, which
@@ -173,12 +173,12 @@ extension TensorHandle {
173173
get { handle.shape }
174174
}
175175

176-
/// The backend used to dispatch ops.
177-
@inlinable
178-
public var backend: Device.Backend {
179-
@_semantics("autodiff.nonvarying")
180-
get { handle.backend }
181-
}
176+
/// The backend used to dispatch ops.
177+
@inlinable
178+
public var backend: Device.Backend {
179+
@_semantics("autodiff.nonvarying")
180+
get { handle.backend }
181+
}
182182
}
183183

184184
extension TensorHandle {

Sources/TensorFlow/Initializers.swift

+5-5
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ extension Tensor {
3434
/// - repeatedValue: The scalar value to repeat.
3535
/// - shape: The dimensions of the tensor.
3636
@inlinable
37-
@differentiable( where Scalar: TensorFlowFloatingPoint)
37+
@differentiable(where Scalar: TensorFlowFloatingPoint)
3838
public init(
3939
repeating repeatedValue: Scalar, shape: TensorShape,
4040
on device: Device = .default
@@ -91,7 +91,7 @@ extension Tensor where Scalar: Numeric {
9191

9292
/// Perform an element-wise conversion from another `Tensor`.
9393
@inlinable
94-
@differentiable( where Scalar: TensorFlowFloatingPoint, OtherScalar: TensorFlowFloatingPoint)
94+
@differentiable(where Scalar: TensorFlowFloatingPoint, OtherScalar: TensorFlowFloatingPoint)
9595
public init<OtherScalar: Numeric>(_ other: Tensor<OtherScalar>) {
9696
self = _Raw.cast(other)
9797
}
@@ -114,7 +114,7 @@ extension Tensor where Scalar: TensorFlowFloatingPoint {
114114
extension Tensor {
115115
/// Creates a tensor from an array of tensors (which may themselves be scalars).
116116
@inlinable
117-
@differentiable( where Scalar: TensorFlowFloatingPoint)
117+
@differentiable(where Scalar: TensorFlowFloatingPoint)
118118
public init(_ elements: [Tensor]) {
119119
self = _Raw.pack(elements)
120120
}
@@ -148,7 +148,7 @@ extension Tensor {
148148
///
149149
/// - Returns: The stacked tensor.
150150
@inlinable
151-
@differentiable( where Scalar: TensorFlowFloatingPoint)
151+
@differentiable(where Scalar: TensorFlowFloatingPoint)
152152
public init(stacking tensors: [Tensor], alongAxis axis: Int = 0) {
153153
self = _Raw.pack(tensors, axis: Int64(axis))
154154
}
@@ -186,7 +186,7 @@ extension Tensor {
186186
///
187187
/// - Returns: The concatenated tensor.
188188
@inlinable
189-
@differentiable( where Scalar: TensorFlowFloatingPoint)
189+
@differentiable(where Scalar: TensorFlowFloatingPoint)
190190
public init(concatenating tensors: [Tensor], alongAxis axis: Int = 0) {
191191
precondition(tensors.count > 0)
192192
self = _Raw.concatV2(tensors, axis: Tensor<Int32>(Int32(axis), on: tensors.first!.device))

Sources/TensorFlow/Layers/Dropout.swift

+3-3
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ public struct Dropout<Scalar: TensorFlowFloatingPoint>: ParameterlessLayer {
8585
@frozen
8686
public struct AlphaDropout<Scalar: TensorFlowFloatingPoint>: ParameterlessLayer {
8787
@noDerivative public let probability: Double
88-
88+
8989
/// Initializes an `AlphaDropout` layer with a configurable `probability`.
9090
///
9191
/// - Parameter probability: The probability of a node dropping out.
@@ -113,11 +113,11 @@ public struct AlphaDropout<Scalar: TensorFlowFloatingPoint>: ParameterlessLayer
113113
let b = -a * alpha_p * probability
114114

115115
// Apply mask
116-
var x = input * Tensor(noise)
116+
var x = input * Tensor(noise)
117117
x = x + Scalar(alpha_p) * (1 - Tensor(noise))
118118

119119
// Do affine transformation
120-
return Scalar(a) * x + Scalar(b)
120+
return Scalar(a) * x + Scalar(b)
121121
case .inference:
122122
return input
123123
}

Sources/TensorFlow/Layers/Pooling.swift

+9-5
Original file line numberDiff line numberDiff line change
@@ -444,8 +444,10 @@ public struct FractionalMaxPool2D<Scalar: TensorFlowFloatingPoint>: Parameterles
444444
/// A second seed to avoid seed collision
445445
@noDerivative public let seed2: Int64
446446
/// Initializes a `FractionalMaxPool` layer with configurable `poolingRatio`.
447-
public init(poolingRatio: (Double, Double, Double, Double), pseudoRandom: Bool = false,
448-
overlapping: Bool = false, deterministic: Bool = false, seed: Int64 = 0, seed2: Int64 = 0) {
447+
public init(
448+
poolingRatio: (Double, Double, Double, Double), pseudoRandom: Bool = false,
449+
overlapping: Bool = false, deterministic: Bool = false, seed: Int64 = 0, seed2: Int64 = 0
450+
) {
449451
precondition(
450452
poolingRatio.0 == 1.0 && poolingRatio.3 == 1.0,
451453
"Pooling on batch and channels dimensions not supported.")
@@ -468,7 +470,7 @@ public struct FractionalMaxPool2D<Scalar: TensorFlowFloatingPoint>: Parameterles
468470
public func callAsFunction(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
469471
fractionalMaxPool2D(
470472
input,
471-
poolingRatio: poolingRatio,
473+
poolingRatio: poolingRatio,
472474
pseudoRandom: pseudoRandom,
473475
overlapping: overlapping,
474476
deterministic: deterministic,
@@ -490,8 +492,10 @@ extension FractionalMaxPool2D {
490492
/// used when iterating over a FractionalMaxPool2D node in the computation graph.
491493
/// - seed: A seed for random number generator.
492494
/// - seed2: A second seed to avoid seed collision.
493-
public init(poolingRatio: (Double, Double), pseudoRandom: Bool = false,
494-
overlapping: Bool = false, deterministic: Bool = false, seed: Int64 = 0, seed2: Int64 = 0) {
495+
public init(
496+
poolingRatio: (Double, Double), pseudoRandom: Bool = false,
497+
overlapping: Bool = false, deterministic: Bool = false, seed: Int64 = 0, seed2: Int64 = 0
498+
) {
495499
self.init(
496500
poolingRatio: (1.0, poolingRatio.0, poolingRatio.1, 1.0),
497501
pseudoRandom: pseudoRandom,

Sources/TensorFlow/Layers/Recurrent.swift

+1-1
Original file line numberDiff line numberDiff line change
@@ -369,7 +369,7 @@ public struct RecurrentLayer<Cell: RecurrentLayerCell>: Layer {
369369
return timeStepOutputs
370370
}
371371

372-
@differentiable(wrt: (self,inputs,initialState))
372+
@differentiable(wrt: (self, inputs, initialState))
373373
public func call(
374374
_ inputs: [Cell.TimeStepInput],
375375
initialState: Cell.State

Sources/TensorFlow/Operators/Basic.swift

+5-5
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ infix operator .!=: ComparisonPrecedence
1616

1717
/// Returns a tensor with the same shape and scalars as the specified tensor.
1818
@inlinable
19-
@differentiable( where Scalar: TensorFlowFloatingPoint)
19+
@differentiable(where Scalar: TensorFlowFloatingPoint)
2020
public func identity<Scalar>(_ x: Tensor<Scalar>) -> Tensor<Scalar> {
2121
x
2222
}
@@ -57,7 +57,7 @@ extension Tensor {
5757
///
5858
/// - Returns: Array containing the unstacked tensors.
5959
@inlinable
60-
@differentiable( where Scalar: TensorFlowFloatingPoint)
60+
@differentiable(where Scalar: TensorFlowFloatingPoint)
6161
public func unstacked(alongAxis axis: Int = 0) -> [Tensor] {
6262
ensureValid(axis: axis)
6363
let posAxis = axis < 0 ? axis + rank : axis
@@ -87,7 +87,7 @@ extension Tensor {
8787
///
8888
/// - Returns: An array containing the tensors part.
8989
@inlinable
90-
@differentiable( where Scalar: TensorFlowFloatingPoint)
90+
@differentiable(where Scalar: TensorFlowFloatingPoint)
9191
public func split(count: Int, alongAxis axis: Int = 0) -> [Tensor] {
9292
ensureValid(axis: axis)
9393
precondition(
@@ -429,7 +429,7 @@ extension Tensor {
429429
/// specified axis.
430430
/// - Precondition: The axis must be in the range `-rank..<rank`.
431431
@inlinable
432-
@differentiable( where Scalar: TensorFlowFloatingPoint)
432+
@differentiable(where Scalar: TensorFlowFloatingPoint)
433433
public func concatenated(with other: Tensor, alongAxis axis: Int = 0) -> Tensor {
434434
return Tensor(concatenating: [self, other], alongAxis: axis)
435435
}
@@ -440,7 +440,7 @@ extension Tensor {
440440
/// and may be controversial. The existence/naming of `++` will be discussed
441441
/// during a later API design phase.
442442
@inlinable
443-
@differentiable( where Scalar: TensorFlowFloatingPoint)
443+
@differentiable(where Scalar: TensorFlowFloatingPoint)
444444
public static func ++ (lhs: Tensor, rhs: Tensor) -> Tensor {
445445
return lhs.concatenated(with: rhs)
446446
}

Sources/TensorFlow/Operators/Dataset.swift

+6-2
Original file line numberDiff line numberDiff line change
@@ -41,9 +41,13 @@ func _tensorSeeds(_ seed: Tensor<Int64>) -> (Tensor<Int64>, Tensor<Int64>) {
4141
/// Represents a potentially large set of elements.
4242
///
4343
/// A `Dataset` can be used to represent an input pipeline as a collection of element tensors.
44-
@available(*, deprecated, message: """
44+
@available(
45+
*, deprecated,
46+
message:
47+
"""
4548
Datasets will be removed in S4TF v0.10. Please use the new Batches API instead.
46-
""")
49+
"""
50+
)
4751
@frozen
4852
public struct Dataset<Element: TensorGroup> {
4953
public let _handle: VariantHandle

Sources/TensorFlow/Operators/LinearAlgebra.swift

+3-3
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ extension Tensor where Scalar: TensorFlowNumeric {
3030
/// // [1, 2, 3, 4]
3131
/// ```
3232
@inlinable
33-
@differentiable( where Scalar: TensorFlowFloatingPoint)
33+
@differentiable(where Scalar: TensorFlowFloatingPoint)
3434
public func diagonalPart() -> Tensor {
3535
precondition(rank >= 2, "The tensor must have at least rank 2.")
3636
return _Raw.matrixDiagPart(self)
@@ -51,7 +51,7 @@ extension Tensor where Scalar: TensorFlowNumeric {
5151
/// // [0, 0, 0, 4]]
5252
/// ```
5353
@inlinable
54-
@differentiable( where Scalar: TensorFlowFloatingPoint)
54+
@differentiable(where Scalar: TensorFlowFloatingPoint)
5555
public func diagonal() -> Tensor {
5656
_Raw.matrixDiag(diagonal: self)
5757
}
@@ -103,7 +103,7 @@ extension Tensor where Scalar: TensorFlowNumeric {
103103
/// - superdiagonalCount: The number of superdiagonals to keep. If negative, keep entire upper
104104
/// triangle.
105105
@inlinable
106-
@differentiable( where Scalar: TensorFlowFloatingPoint)
106+
@differentiable(where Scalar: TensorFlowFloatingPoint)
107107
public func bandPart(subdiagonalCount: Int, superdiagonalCount: Int) -> Tensor {
108108
precondition(rank >= 2, "The tensor must have at least rank 2.")
109109
let lower = Tensor<Int32>(Int32(subdiagonalCount), on: self.device)

0 commit comments

Comments
 (0)