From cde8a000da892b6a7e1a1f4d0a443491e4a45e33 Mon Sep 17 00:00:00 2001 From: Saleem Abdulrasool Date: Tue, 5 Jan 2021 14:25:28 -0800 Subject: [PATCH] TensorFlow: remove dead code Both the CMake and SPM builds now unconditionally enable the X10 backend. As there is no configuration knob to control whether X10 backend support can be disabled, remove the now dead code path. --- Package.swift | 1 - .../TensorFlow/Bindings/RawOpsAugmented.swift | 634 +- .../Bindings/RawOpsDispatching.swift | 80202 ++++++++-------- .../TensorFlow/Bindings/generate_wrappers.py | 4 - Sources/TensorFlow/CMakeLists.txt | 1 - Sources/TensorFlow/Core/DataTypes.swift | 6 +- Sources/TensorFlow/Core/MixedPrecision.swift | 275 +- Sources/TensorFlow/Core/Runtime.swift | 19 - Sources/TensorFlow/Core/Tensor.swift | 291 +- Sources/TensorFlow/Core/TensorHandle.swift | 8 +- Sources/TensorFlow/Layer.swift | 90 +- 11 files changed, 40692 insertions(+), 40839 deletions(-) diff --git a/Package.swift b/Package.swift index 29c413163..eac3bc53c 100644 --- a/Package.swift +++ b/Package.swift @@ -68,7 +68,6 @@ let package = Package( .product(name: "Numerics", package: "swift-numerics"), ], swiftSettings: [ - .define("USING_X10_BACKEND"), .define("DEFAULT_BACKEND_EAGER"), ]), .target( diff --git a/Sources/TensorFlow/Bindings/RawOpsAugmented.swift b/Sources/TensorFlow/Bindings/RawOpsAugmented.swift index 2977a9b45..bb8d695b3 100644 --- a/Sources/TensorFlow/Bindings/RawOpsAugmented.swift +++ b/Sources/TensorFlow/Bindings/RawOpsAugmented.swift @@ -12,9 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#if USING_X10_BACKEND - @_implementationOnly import x10_xla_tensor_wrapper -#endif +@_implementationOnly import x10_xla_tensor_wrapper // Augment the `_Raw` interface with ops that take Swift integers for the // shape attributes rather than requiring that they be passed as `Int32` tensors. @@ -222,360 +220,358 @@ extension _RawTFEager { } } -#if USING_X10_BACKEND - extension _Raw { - public static func commonBackend( - _ a: Device.Backend, _ b: Device.Backend, file: StaticString = #file, line: UInt = #line - ) -> Device.Backend { - if a != b { - fatalError("Op must have the same backend type: \(a) vs \(b)", file: file, line: line) - } - return a +extension _Raw { + public static func commonBackend( + _ a: Device.Backend, _ b: Device.Backend, file: StaticString = #file, line: UInt = #line + ) -> Device.Backend { + if a != b { + fatalError("Op must have the same backend type: \(a) vs \(b)", file: file, line: line) } + return a + } - public static func commonBackend(_ tensors: [Tensor]) -> Device.Backend { - var result = tensors.first!.handle.backend - for tensor in tensors { result = commonBackend(result, tensor.handle.backend) } - return result - } + public static func commonBackend(_ tensors: [Tensor]) -> Device.Backend { + var result = tensors.first!.handle.backend + for tensor in tensors { result = commonBackend(result, tensor.handle.backend) } + return result + } - public static func argMax< - T: TensorFlowNumeric, - OutputType: TensorFlowIndex - >( - _ input: Tensor, - dimension: Int64 - ) -> Tensor { - switch input.handle.backend { - case .XLA: - return _RawXLA.argMax(input, dimension: dimension) - case .TF_EAGER: - return _RawTFEager.argMax( - input, dimension: Tensor(Int32(dimension), on: .defaultTFEager)) - } + public static func argMax< + T: TensorFlowNumeric, + OutputType: TensorFlowIndex + >( + _ input: Tensor, + dimension: Int64 + ) -> Tensor { + switch input.handle.backend { + case .XLA: + return _RawXLA.argMax(input, dimension: dimension) + case .TF_EAGER: + return _RawTFEager.argMax( + input, dimension: Tensor(Int32(dimension), on: .defaultTFEager)) } + } - public static func mean< - T: TensorFlowNumeric - >( - _ input: Tensor, - reductionIndices: [Int64], - keepDims: Bool = false - ) -> Tensor { - switch input.handle.backend { - case .XLA: - return _RawXLA.mean( - input, reductionIndices: reductionIndices, - keepDims: keepDims) - case .TF_EAGER: - return _RawTFEager.mean( - input, - reductionIndices: Tensor(reductionIndices.map { Int32($0) }, on: .defaultTFEager), - keepDims: keepDims) - } + public static func mean< + T: TensorFlowNumeric + >( + _ input: Tensor, + reductionIndices: [Int64], + keepDims: Bool = false + ) -> Tensor { + switch input.handle.backend { + case .XLA: + return _RawXLA.mean( + input, reductionIndices: reductionIndices, + keepDims: keepDims) + case .TF_EAGER: + return _RawTFEager.mean( + input, + reductionIndices: Tensor(reductionIndices.map { Int32($0) }, on: .defaultTFEager), + keepDims: keepDims) } + } - public static func pad< - T: TensorFlowScalar - >( - _ input: Tensor, - paddings: [Int] - ) -> Tensor { - switch input.handle.backend { - case .XLA: - return _RawXLA.pad(input, paddings: paddings) - case .TF_EAGER: - return _RawTFEager.pad(input, paddings: paddings) - } + public static func pad< + T: TensorFlowScalar + >( + _ input: Tensor, + paddings: [Int] + ) -> Tensor { + switch input.handle.backend { + case .XLA: + return _RawXLA.pad(input, paddings: paddings) + case .TF_EAGER: + return _RawTFEager.pad(input, paddings: paddings) } + } - public static func reshape< - T: TensorFlowScalar - >( - _ tensor: Tensor, - shape: [Int64] - ) -> Tensor { - switch tensor.handle.backend { - case .XLA: - return _RawXLA.reshape(tensor, shape: shape) - case .TF_EAGER: - return _RawTFEager.reshape( - tensor, shape: Tensor(shape.map { Int32($0) }, on: .defaultTFEager)) - } + public static func reshape< + T: TensorFlowScalar + >( + _ tensor: Tensor, + shape: [Int64] + ) -> Tensor { + switch tensor.handle.backend { + case .XLA: + return _RawXLA.reshape(tensor, shape: shape) + case .TF_EAGER: + return _RawTFEager.reshape( + tensor, shape: Tensor(shape.map { Int32($0) }, on: .defaultTFEager)) } + } - public static func slice< - T: TensorFlowScalar - >( - _ input: Tensor, - begin: [Int], - size: [Int] - ) -> Tensor { - switch input.handle.backend { - case .XLA: - return _RawXLA.slice(input, begin: begin, size: size) - case .TF_EAGER: - return _RawTFEager.slice( - input, begin: Tensor(begin.map { Int32($0) }, on: .defaultTFEager), - size: Tensor(size.map { Int32($0) }, on: .defaultTFEager)) - } + public static func slice< + T: TensorFlowScalar + >( + _ input: Tensor, + begin: [Int], + size: [Int] + ) -> Tensor { + switch input.handle.backend { + case .XLA: + return _RawXLA.slice(input, begin: begin, size: size) + case .TF_EAGER: + return _RawTFEager.slice( + input, begin: Tensor(begin.map { Int32($0) }, on: .defaultTFEager), + size: Tensor(size.map { Int32($0) }, on: .defaultTFEager)) } + } - public static func split( - splitDim: Int, - value: Tensor, - numSplit: Int64 - ) -> [Tensor] { - switch value.handle.backend { - case .XLA: - return _RawXLA.split(splitDim: splitDim, value: value, numSplit: numSplit) - case .TF_EAGER: - return _RawTFEager.split( - splitDim: Tensor(Int32(splitDim), on: .defaultTFEager), value: value, - numSplit: numSplit) - } + public static func split( + splitDim: Int, + value: Tensor, + numSplit: Int64 + ) -> [Tensor] { + switch value.handle.backend { + case .XLA: + return _RawXLA.split(splitDim: splitDim, value: value, numSplit: numSplit) + case .TF_EAGER: + return _RawTFEager.split( + splitDim: Tensor(Int32(splitDim), on: .defaultTFEager), value: value, + numSplit: numSplit) } + } - public static func splitV< - T: TensorFlowScalar - >( - value: Tensor, - sizeSplits: [Int], - splitDim: Int - ) -> [Tensor] { - switch value.handle.backend { - case .XLA: - return _RawXLA.splitV(value: value, sizeSplits: sizeSplits, splitDim: splitDim) - case .TF_EAGER: - return _RawTFEager.splitV( - value: value, - sizeSplits: Tensor(sizeSplits.map { Int32($0) }, on: .defaultTFEager), - splitDim: Tensor(Int32(splitDim), on: .defaultTFEager), - numSplit: Int64(sizeSplits.count)) - } + public static func splitV< + T: TensorFlowScalar + >( + value: Tensor, + sizeSplits: [Int], + splitDim: Int + ) -> [Tensor] { + switch value.handle.backend { + case .XLA: + return _RawXLA.splitV(value: value, sizeSplits: sizeSplits, splitDim: splitDim) + case .TF_EAGER: + return _RawTFEager.splitV( + value: value, + sizeSplits: Tensor(sizeSplits.map { Int32($0) }, on: .defaultTFEager), + splitDim: Tensor(Int32(splitDim), on: .defaultTFEager), + numSplit: Int64(sizeSplits.count)) } + } - public static func tile< - T: TensorFlowScalar - >( - _ input: Tensor, - multiples: [Int] - ) -> Tensor { - switch input.handle.backend { - case .XLA: - return _RawXLA.tile(input, multiples: multiples) - case .TF_EAGER: - return _RawTFEager.tile(input, multiples: multiples) - } + public static func tile< + T: TensorFlowScalar + >( + _ input: Tensor, + multiples: [Int] + ) -> Tensor { + switch input.handle.backend { + case .XLA: + return _RawXLA.tile(input, multiples: multiples) + case .TF_EAGER: + return _RawTFEager.tile(input, multiples: multiples) } + } - public static func sum< - T: TensorFlowNumeric - >( - _ input: Tensor, - reductionIndices: [Int64], - keepDims: Bool = false - ) -> Tensor { - switch input.handle.backend { - case .XLA: - return _RawXLA.sum( - input, reductionIndices: reductionIndices, - keepDims: keepDims) - case .TF_EAGER: - return _RawTFEager.sum( - input, - reductionIndices: Tensor(reductionIndices.map { Int32($0) }, on: .defaultTFEager), - keepDims: keepDims) - } + public static func sum< + T: TensorFlowNumeric + >( + _ input: Tensor, + reductionIndices: [Int64], + keepDims: Bool = false + ) -> Tensor { + switch input.handle.backend { + case .XLA: + return _RawXLA.sum( + input, reductionIndices: reductionIndices, + keepDims: keepDims) + case .TF_EAGER: + return _RawTFEager.sum( + input, + reductionIndices: Tensor(reductionIndices.map { Int32($0) }, on: .defaultTFEager), + keepDims: keepDims) } + } - public static func transpose< - T: TensorFlowScalar - >( - _ x: Tensor, - perm: [Int] - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.transpose(x, perm: perm) - case .TF_EAGER: - return _RawTFEager.transpose( - x, perm: Tensor(perm.map { Int32($0) }, on: .defaultTFEager)) - } + public static func transpose< + T: TensorFlowScalar + >( + _ x: Tensor, + perm: [Int] + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.transpose(x, perm: perm) + case .TF_EAGER: + return _RawTFEager.transpose( + x, perm: Tensor(perm.map { Int32($0) }, on: .defaultTFEager)) } + } - public static func unsortedSegmentSum< - T: TensorFlowNumeric, - Tindices: TensorFlowIndex - >( - data: Tensor, - segmentIds: Tensor, - numSegments: Int - ) -> Tensor { - switch data.handle.backend { - case .XLA: - return _RawXLA.unsortedSegmentSum( - data: data, segmentIds: segmentIds, numSegments: numSegments) - case .TF_EAGER: - return _RawTFEager.unsortedSegmentSum( - data: data, segmentIds: segmentIds, - numSegments: Tensor(Int32(numSegments), on: .defaultTFEager)) - } + public static func unsortedSegmentSum< + T: TensorFlowNumeric, + Tindices: TensorFlowIndex + >( + data: Tensor, + segmentIds: Tensor, + numSegments: Int + ) -> Tensor { + switch data.handle.backend { + case .XLA: + return _RawXLA.unsortedSegmentSum( + data: data, segmentIds: segmentIds, numSegments: numSegments) + case .TF_EAGER: + return _RawTFEager.unsortedSegmentSum( + data: data, segmentIds: segmentIds, + numSegments: Tensor(Int32(numSegments), on: .defaultTFEager)) } + } - public static func broadcastTo< - T: TensorFlowScalar - >( - _ input: Tensor, - shape: [Int64] - ) -> Tensor { - switch input.handle.backend { - case .XLA: - return _RawXLA.broadcastTo(input, shape: shape) - case .TF_EAGER: - return _RawTFEager.broadcastTo( - input, shape: Tensor(shape.map { Int32($0) }, on: .defaultTFEager)) - } + public static func broadcastTo< + T: TensorFlowScalar + >( + _ input: Tensor, + shape: [Int64] + ) -> Tensor { + switch input.handle.backend { + case .XLA: + return _RawXLA.broadcastTo(input, shape: shape) + case .TF_EAGER: + return _RawTFEager.broadcastTo( + input, shape: Tensor(shape.map { Int32($0) }, on: .defaultTFEager)) } + } - public static func conv2DBackpropFilter( - _ input: Tensor, - filterSizes: [Int64], - outBackprop: Tensor, - strides: [Int32], - useCudnnOnGpu: Bool = true, - padding: Padding1, - explicitPaddings: [Int32], - dataFormat: DataFormat = .nhwc, - dilations: [Int32] = [1, 1, 1, 1] - ) -> Tensor { - switch commonBackend(input.handle.backend, outBackprop.handle.backend) { - case .XLA: - return _RawXLA.conv2DBackpropFilter( - input, filterSizes: filterSizes, - outBackprop: outBackprop, strides: strides, useCudnnOnGpu: useCudnnOnGpu, - padding: padding, explicitPaddings: explicitPaddings, dataFormat: dataFormat, - dilations: dilations) - case .TF_EAGER: - return _RawTFEager.conv2DBackpropFilter( - input, filterSizes: Tensor(filterSizes.map { Int32($0) }, on: .defaultTFEager), - outBackprop: outBackprop, strides: strides, useCudnnOnGpu: useCudnnOnGpu, - padding: padding, explicitPaddings: explicitPaddings, dataFormat: dataFormat, - dilations: dilations) - } + public static func conv2DBackpropFilter( + _ input: Tensor, + filterSizes: [Int64], + outBackprop: Tensor, + strides: [Int32], + useCudnnOnGpu: Bool = true, + padding: Padding1, + explicitPaddings: [Int32], + dataFormat: DataFormat = .nhwc, + dilations: [Int32] = [1, 1, 1, 1] + ) -> Tensor { + switch commonBackend(input.handle.backend, outBackprop.handle.backend) { + case .XLA: + return _RawXLA.conv2DBackpropFilter( + input, filterSizes: filterSizes, + outBackprop: outBackprop, strides: strides, useCudnnOnGpu: useCudnnOnGpu, + padding: padding, explicitPaddings: explicitPaddings, dataFormat: dataFormat, + dilations: dilations) + case .TF_EAGER: + return _RawTFEager.conv2DBackpropFilter( + input, filterSizes: Tensor(filterSizes.map { Int32($0) }, on: .defaultTFEager), + outBackprop: outBackprop, strides: strides, useCudnnOnGpu: useCudnnOnGpu, + padding: padding, explicitPaddings: explicitPaddings, dataFormat: dataFormat, + dilations: dilations) } + } - public static func conv2DBackpropInput( - inputSizes: [Int64], - filter: Tensor, - outBackprop: Tensor, - strides: [Int32], - useCudnnOnGpu: Bool = true, - padding: Padding1, - explicitPaddings: [Int32], - dataFormat: DataFormat = .nhwc, - dilations: [Int32] = [1, 1, 1, 1] - ) -> Tensor { - switch commonBackend(filter.handle.backend, outBackprop.handle.backend) { - case .XLA: - return _RawXLA.conv2DBackpropInput( - inputSizes: inputSizes, filter: filter, - outBackprop: outBackprop, - strides: strides, useCudnnOnGpu: useCudnnOnGpu, padding: padding, - explicitPaddings: explicitPaddings, dataFormat: dataFormat, dilations: dilations) - case .TF_EAGER: - return _RawTFEager.conv2DBackpropInput( - inputSizes: Tensor(inputSizes.map { Int32($0) }, on: .defaultTFEager), - filter: filter, - outBackprop: outBackprop, - strides: strides, useCudnnOnGpu: useCudnnOnGpu, padding: padding, - explicitPaddings: explicitPaddings, dataFormat: dataFormat, dilations: dilations) - } + public static func conv2DBackpropInput( + inputSizes: [Int64], + filter: Tensor, + outBackprop: Tensor, + strides: [Int32], + useCudnnOnGpu: Bool = true, + padding: Padding1, + explicitPaddings: [Int32], + dataFormat: DataFormat = .nhwc, + dilations: [Int32] = [1, 1, 1, 1] + ) -> Tensor { + switch commonBackend(filter.handle.backend, outBackprop.handle.backend) { + case .XLA: + return _RawXLA.conv2DBackpropInput( + inputSizes: inputSizes, filter: filter, + outBackprop: outBackprop, + strides: strides, useCudnnOnGpu: useCudnnOnGpu, padding: padding, + explicitPaddings: explicitPaddings, dataFormat: dataFormat, dilations: dilations) + case .TF_EAGER: + return _RawTFEager.conv2DBackpropInput( + inputSizes: Tensor(inputSizes.map { Int32($0) }, on: .defaultTFEager), + filter: filter, + outBackprop: outBackprop, + strides: strides, useCudnnOnGpu: useCudnnOnGpu, padding: padding, + explicitPaddings: explicitPaddings, dataFormat: dataFormat, dilations: dilations) } + } - public static func maxPoolV2( - _ input: Tensor, - ksize: [Int64], - strides: [Int64], - padding: Padding, - dataFormat: DataFormat2 = .nhwc - ) -> Tensor { - switch input.handle.backend { - case .XLA: - return _RawXLA.maxPoolV2( - input, ksize: ksize, - strides: strides, padding: padding, dataFormat: dataFormat) - case .TF_EAGER: - return _RawTFEager.maxPoolV2( - input, ksize: Tensor(ksize.map { Int32($0) }, on: .defaultTFEager), - strides: Tensor(strides.map { Int32($0) }, on: .defaultTFEager), padding: padding, - dataFormat: dataFormat) - } + public static func maxPoolV2( + _ input: Tensor, + ksize: [Int64], + strides: [Int64], + padding: Padding, + dataFormat: DataFormat2 = .nhwc + ) -> Tensor { + switch input.handle.backend { + case .XLA: + return _RawXLA.maxPoolV2( + input, ksize: ksize, + strides: strides, padding: padding, dataFormat: dataFormat) + case .TF_EAGER: + return _RawTFEager.maxPoolV2( + input, ksize: Tensor(ksize.map { Int32($0) }, on: .defaultTFEager), + strides: Tensor(strides.map { Int32($0) }, on: .defaultTFEager), padding: padding, + dataFormat: dataFormat) } + } - public static func maxPoolGradV2( - origInput: Tensor, - origOutput: Tensor, - grad: Tensor, - ksize: [Int64], - strides: [Int64], - padding: Padding, - dataFormat: DataFormat = .nhwc - ) -> Tensor { - switch commonBackend(origInput.handle.backend, origOutput.handle.backend) { - case .XLA: - return _RawXLA.maxPoolGradV2( - origInput: origInput, origOutput: origOutput, grad: grad, - ksize: ksize, - strides: strides, - padding: padding, dataFormat: dataFormat) - case .TF_EAGER: - return _RawTFEager.maxPoolGradV2( - origInput: origInput, origOutput: origOutput, grad: grad, - ksize: Tensor(ksize.map { Int32($0) }, on: .defaultTFEager), - strides: Tensor(strides.map { Int32($0) }, on: .defaultTFEager), - padding: padding, dataFormat: dataFormat) - } + public static func maxPoolGradV2( + origInput: Tensor, + origOutput: Tensor, + grad: Tensor, + ksize: [Int64], + strides: [Int64], + padding: Padding, + dataFormat: DataFormat = .nhwc + ) -> Tensor { + switch commonBackend(origInput.handle.backend, origOutput.handle.backend) { + case .XLA: + return _RawXLA.maxPoolGradV2( + origInput: origInput, origOutput: origOutput, grad: grad, + ksize: ksize, + strides: strides, + padding: padding, dataFormat: dataFormat) + case .TF_EAGER: + return _RawTFEager.maxPoolGradV2( + origInput: origInput, origOutput: origOutput, grad: grad, + ksize: Tensor(ksize.map { Int32($0) }, on: .defaultTFEager), + strides: Tensor(strides.map { Int32($0) }, on: .defaultTFEager), + padding: padding, dataFormat: dataFormat) } + } - /// A simplified version of cross replica sum, with scaling. - public static func crossReplicaSum( - _ inputs: [Tensor], - _ scale: Double - ) -> [Tensor] { - _RawXLA.crossReplicaSum(inputs, scale) - } + /// A simplified version of cross replica sum, with scaling. + public static func crossReplicaSum( + _ inputs: [Tensor], + _ scale: Double + ) -> [Tensor] { + _RawXLA.crossReplicaSum(inputs, scale) + } - /// Transfer a tensor to a different device. - public static func toDevice(_ x: Tensor, _ device: Device) -> Tensor - { - if x.handle.backend == device.backend && device.backend == .XLA { - return _RawXLA.toDevice(x, device) - } - return Tensor(shape: x.shape, scalars: x.scalars, on: device) + /// Transfer a tensor to a different device. + public static func toDevice(_ x: Tensor, _ device: Device) -> Tensor + { + if x.handle.backend == device.backend && device.backend == .XLA { + return _RawXLA.toDevice(x, device) } + return Tensor(shape: x.shape, scalars: x.scalars, on: device) + } - public static func physicalCast( - _ input: Tensor, destType: R.Type - ) -> Tensor { - _RawXLA.physicalCast(input, destType: destType) - } + public static func physicalCast( + _ input: Tensor, destType: R.Type + ) -> Tensor { + _RawXLA.physicalCast(input, destType: destType) + } - // Currently only used for deterministic testing. - public static func rand(_ dims: [Int], _ seed: Int) -> Tensor { - _RawXLA.rand(dims, seed) - } + // Currently only used for deterministic testing. + public static func rand(_ dims: [Int], _ seed: Int) -> Tensor { + _RawXLA.rand(dims, seed) + } - public static func linSpace< - T: FloatingPoint & TensorFlowScalar, - Tidx: TensorFlowIndex - >( - start: Tensor, - stop: Tensor, - num: Tensor, - device: Device - ) -> Tensor { - _RawXLA.linSpace(start: start, stop: stop, num: num, device: device) - } + public static func linSpace< + T: FloatingPoint & TensorFlowScalar, + Tidx: TensorFlowIndex + >( + start: Tensor, + stop: Tensor, + num: Tensor, + device: Device + ) -> Tensor { + _RawXLA.linSpace(start: start, stop: stop, num: num, device: device) } -#endif +} diff --git a/Sources/TensorFlow/Bindings/RawOpsDispatching.swift b/Sources/TensorFlow/Bindings/RawOpsDispatching.swift index f86fcb0d2..9c4b4dcbe 100644 --- a/Sources/TensorFlow/Bindings/RawOpsDispatching.swift +++ b/Sources/TensorFlow/Bindings/RawOpsDispatching.swift @@ -23,40284 +23,40280 @@ ) public typealias Raw = _Raw -#if USING_X10_BACKEND - public enum _Raw { +public enum _Raw { - static let generatedTensorFlowVersion = "2.1.0" - static let generatedTensorFlowGitVersion = "v2.1.0-rc2-17-ge5bf8de" + static let generatedTensorFlowVersion = "2.1.0" + static let generatedTensorFlowGitVersion = "v2.1.0-rc2-17-ge5bf8de" - public typealias A = _RawTFEager.A + public typealias A = _RawTFEager.A - public typealias DataFormat = _RawTFEager.DataFormat + public typealias DataFormat = _RawTFEager.DataFormat - public typealias DataFormat1 = _RawTFEager.DataFormat1 + public typealias DataFormat1 = _RawTFEager.DataFormat1 - public typealias DataFormat2 = _RawTFEager.DataFormat2 + public typealias DataFormat2 = _RawTFEager.DataFormat2 - public typealias DensityUnit = _RawTFEager.DensityUnit + public typealias DensityUnit = _RawTFEager.DensityUnit - public typealias Direction = _RawTFEager.Direction + public typealias Direction = _RawTFEager.Direction - public typealias Errors = _RawTFEager.Errors + public typealias Errors = _RawTFEager.Errors - public typealias FinalOp = _RawTFEager.FinalOp + public typealias FinalOp = _RawTFEager.FinalOp - public typealias Format = _RawTFEager.Format + public typealias Format = _RawTFEager.Format - public typealias InputMode = _RawTFEager.InputMode + public typealias InputMode = _RawTFEager.InputMode - public typealias InputQuantMode = _RawTFEager.InputQuantMode + public typealias InputQuantMode = _RawTFEager.InputQuantMode - public typealias LossType = _RawTFEager.LossType + public typealias LossType = _RawTFEager.LossType - public typealias MergeOp = _RawTFEager.MergeOp + public typealias MergeOp = _RawTFEager.MergeOp - public typealias Method = _RawTFEager.Method + public typealias Method = _RawTFEager.Method - public typealias Method1 = _RawTFEager.Method1 + public typealias Method1 = _RawTFEager.Method1 - public typealias Mode = _RawTFEager.Mode + public typealias Mode = _RawTFEager.Mode - public typealias Mode1 = _RawTFEager.Mode1 + public typealias Mode1 = _RawTFEager.Mode1 - public typealias OutputEncoding = _RawTFEager.OutputEncoding + public typealias OutputEncoding = _RawTFEager.OutputEncoding - public typealias Padding = _RawTFEager.Padding + public typealias Padding = _RawTFEager.Padding - public typealias Padding1 = _RawTFEager.Padding1 + public typealias Padding1 = _RawTFEager.Padding1 - public typealias PrecisionMode = _RawTFEager.PrecisionMode + public typealias PrecisionMode = _RawTFEager.PrecisionMode - public typealias Reduction = _RawTFEager.Reduction + public typealias Reduction = _RawTFEager.Reduction - public typealias ReductionType = _RawTFEager.ReductionType + public typealias ReductionType = _RawTFEager.ReductionType - public typealias RnnMode = _RawTFEager.RnnMode + public typealias RnnMode = _RawTFEager.RnnMode - public typealias RoundMode = _RawTFEager.RoundMode + public typealias RoundMode = _RawTFEager.RoundMode - public typealias RoundMode1 = _RawTFEager.RoundMode1 + public typealias RoundMode1 = _RawTFEager.RoundMode1 - public typealias SplitType = _RawTFEager.SplitType + public typealias SplitType = _RawTFEager.SplitType - public typealias SplitType1 = _RawTFEager.SplitType1 + public typealias SplitType1 = _RawTFEager.SplitType1 - public typealias Unit = _RawTFEager.Unit + public typealias Unit = _RawTFEager.Unit - @inlinable @inline(__always) - public static func a() -> Tensor { - _RawTFEager.a() - } + @inlinable @inline(__always) + public static func a() -> Tensor { + _RawTFEager.a() + } - /// Raise a exception to abort the process when called. - /// - /// If exit_without_error is true, the process will exit normally, - /// otherwise it will exit with a SIGABORT signal. - /// - /// Returns nothing but an exception. - /// - /// - Attr error_msg: A string which is the message associated with the exception. - @inlinable @inline(__always) - public static func abort( - errorMsg: String, - exitWithoutError: Bool = false - ) { - _RawTFEager.abort(errorMsg: errorMsg, exitWithoutError: exitWithoutError) - } - - /// Computes the absolute value of a tensor. - /// - /// Given a tensor `x`, this operation returns a tensor containing the absolute - /// value of each element in `x`. For example, if x is an input element and y is - /// an output element, this operation computes \\(y = |x|\\). - @inlinable @inline(__always) - public static func abs( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.abs(x) - case .TF_EAGER: - return _RawTFEager.abs(x) - } - - } - - /// Returns the element-wise sum of a list of tensors. - /// - /// `tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not - /// wait for all of its inputs to be ready before beginning to sum. This can - /// save memory if inputs are ready at different times, since minimum temporary - /// storage is proportional to the output size rather than the inputs size. - /// - /// Unlike the original `accumulate_n`, `accumulate_n_v2` is differentiable. - /// - /// Returns a `Tensor` of same shape and type as the elements of `inputs`. - /// - /// - Parameter inputs: A list of `Tensor` objects, each with same shape and type. - /// - /// - Attr shape: Shape of elements of `inputs`. - @inlinable @inline(__always) - public static func accumulateNV2( - inputs: [Tensor], - shape: TensorShape? - ) -> Tensor { - _RawTFEager.accumulateNV2(inputs: inputs, shape: shape) - } - - /// Computes acos of x element-wise. - @inlinable @inline(__always) - public static func acos( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.acos(x) - case .TF_EAGER: - return _RawTFEager.acos(x) - } - - } - - /// Computes inverse hyperbolic cosine of x element-wise. - /// - /// Given an input tensor, the function computes inverse hyperbolic cosine of every element. - /// Input range is `[1, inf]`. It returns `nan` if the input lies outside the range. - /// - /// ```python - /// x = tf.constant([-2, -0.5, 1, 1.2, 200, 10000, float("inf")]) - /// tf.math.acosh(x) ==> [nan nan 0. 0.62236255 5.9914584 9.903487 inf] - /// ``` - @inlinable @inline(__always) - public static func acosh( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.acosh(x) - case .TF_EAGER: - return _RawTFEager.acosh(x) - } - - } - - /// Returns x + y element-wise. - /// - /// *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - @inlinable @inline(__always) - public static func add( - _ x: Tensor, - _ y: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - let output_device = y.device - let x = Tensor(copying: x, to: .defaultTFEager) - let y = Tensor(copying: y, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.add(x, y), to: output_device) - case .TF_EAGER: - return _RawTFEager.add(x, y) - } - - } - - /// Returns x + y element-wise. - /// - /// *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - @inlinable @inline(__always) - public static func add( - _ x: StringTensor, - _ y: StringTensor - ) -> StringTensor { - _RawTFEager.add(x, y) - } - - /// Add an `N`-minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles. - /// - /// A `SparseTensor` of rank `R` is represented by three tensors: `sparse_indices`, - /// `sparse_values`, and `sparse_shape`, where - /// - /// ```sparse_indices.shape[1] == sparse_shape.shape[0] == R``` - /// - /// An `N`-minibatch of `SparseTensor` objects is represented as a `SparseTensor` - /// having a first `sparse_indices` column taking values between `[0, N)`, where - /// the minibatch size `N == sparse_shape[0]`. - /// - /// The input `SparseTensor` must have rank `R` greater than 1, and the first - /// dimension is treated as the minibatch dimension. Elements of the `SparseTensor` - /// must be sorted in increasing order of this first dimension. The stored - /// `SparseTensor` objects pointed to by each row of the output `sparse_handles` - /// will have rank `R-1`. - /// - /// The `SparseTensor` values can then be read out as part of a minibatch by passing - /// the given keys as vector elements to `TakeManySparseFromTensorsMap`. To ensure - /// the correct `SparseTensorsMap` is accessed, ensure that the same - /// `container` and `shared_name` are passed to that Op. If no `shared_name` - /// is provided here, instead use the *name* of the Operation created by calling - /// `AddManySparseToTensorsMap` as the `shared_name` passed to - /// `TakeManySparseFromTensorsMap`. Ensure the Operations are colocated. - /// - /// - Parameters: - /// - sparse_indices: 2-D. The `indices` of the minibatch `SparseTensor`. - /// `sparse_indices[:, 0]` must be ordered values in `[0, N)`. - /// - sparse_values: 1-D. The `values` of the minibatch `SparseTensor`. - /// - sparse_shape: 1-D. The `shape` of the minibatch `SparseTensor`. - /// The minibatch size `N == sparse_shape[0]`. - /// - /// - Attrs: - /// - container: The container name for the `SparseTensorsMap` created by this op. - /// - shared_name: The shared name for the `SparseTensorsMap` created by this op. - /// If blank, the new Operation's unique name is used. - /// - /// - Output sparse_handles: 1-D. The handles of the `SparseTensor` now stored in the - /// `SparseTensorsMap`. Shape: `[N]`. - @inlinable @inline(__always) - public static func addManySparseToTensorsMap( - sparseIndices: Tensor, - sparseValues: Tensor, - sparseShape: Tensor, - container: String, - sharedName: String - ) -> Tensor { - switch commonBackend( - commonBackend(sparseIndices.handle.backend, sparseValues.handle.backend), - sparseShape.handle.backend) - { - case .XLA: - let output_device = sparseShape.device - let sparseIndices = Tensor(copying: sparseIndices, to: .defaultTFEager) - let sparseValues = Tensor(copying: sparseValues, to: .defaultTFEager) - let sparseShape = Tensor(copying: sparseShape, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.addManySparseToTensorsMap( - sparseIndices: sparseIndices, sparseValues: sparseValues, sparseShape: sparseShape, - container: container, sharedName: sharedName), to: output_device) - case .TF_EAGER: - return _RawTFEager.addManySparseToTensorsMap( - sparseIndices: sparseIndices, sparseValues: sparseValues, sparseShape: sparseShape, - container: container, sharedName: sharedName) - } - - } - - /// Add all input tensors element wise. - /// - /// Inputs must be of same size and shape. - /// - /// ```python - /// x = [9, 7, 10] - /// tf.math.add_n(x) ==> 26 - /// ``` - @inlinable @inline(__always) - public static func addN( - inputs: [Tensor] - ) -> Tensor { - _RawTFEager.addN(inputs: inputs) - } - - /// Add a `SparseTensor` to a `SparseTensorsMap` return its handle. - /// - /// A `SparseTensor` is represented by three tensors: `sparse_indices`, - /// `sparse_values`, and `sparse_shape`. - /// - /// This operator takes the given `SparseTensor` and adds it to a container - /// object (a `SparseTensorsMap`). A unique key within this container is generated - /// in the form of an `int64`, and this is the value that is returned. - /// - /// The `SparseTensor` can then be read out as part of a minibatch by passing - /// the key as a vector element to `TakeManySparseFromTensorsMap`. To ensure - /// the correct `SparseTensorsMap` is accessed, ensure that the same - /// `container` and `shared_name` are passed to that Op. If no `shared_name` - /// is provided here, instead use the *name* of the Operation created by calling - /// `AddSparseToTensorsMap` as the `shared_name` passed to - /// `TakeManySparseFromTensorsMap`. Ensure the Operations are colocated. - /// - /// - Parameters: - /// - sparse_indices: 2-D. The `indices` of the `SparseTensor`. - /// - sparse_values: 1-D. The `values` of the `SparseTensor`. - /// - sparse_shape: 1-D. The `shape` of the `SparseTensor`. - /// - /// - Attrs: - /// - container: The container name for the `SparseTensorsMap` created by this op. - /// - shared_name: The shared name for the `SparseTensorsMap` created by this op. - /// If blank, the new Operation's unique name is used. - /// - /// - Output sparse_handle: 0-D. The handle of the `SparseTensor` now stored in the - /// `SparseTensorsMap`. - @inlinable @inline(__always) - public static func addSparseToTensorsMap( - sparseIndices: Tensor, - sparseValues: Tensor, - sparseShape: Tensor, - container: String, - sharedName: String - ) -> Tensor { - switch commonBackend( - commonBackend(sparseIndices.handle.backend, sparseValues.handle.backend), - sparseShape.handle.backend) - { - case .XLA: - let output_device = sparseShape.device - let sparseIndices = Tensor(copying: sparseIndices, to: .defaultTFEager) - let sparseValues = Tensor(copying: sparseValues, to: .defaultTFEager) - let sparseShape = Tensor(copying: sparseShape, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.addSparseToTensorsMap( - sparseIndices: sparseIndices, sparseValues: sparseValues, sparseShape: sparseShape, - container: container, sharedName: sharedName), to: output_device) - case .TF_EAGER: - return _RawTFEager.addSparseToTensorsMap( - sparseIndices: sparseIndices, sparseValues: sparseValues, sparseShape: sparseShape, - container: container, sharedName: sharedName) - } - - } - - /// Returns x + y element-wise. - /// - /// *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - @inlinable @inline(__always) - public static func addV2( - _ x: Tensor, - _ y: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - return _RawXLA.addV2(x, y) - case .TF_EAGER: - return _RawTFEager.addV2(x, y) - } - - } - - /// Deprecated. Disallowed in GraphDef version >= 2. - @inlinable @inline(__always) - public static func adjustContrast( - images: Tensor, - contrastFactor: Tensor, - minValue: Tensor, - maxValue: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(images.handle.backend, contrastFactor.handle.backend), - minValue.handle.backend), maxValue.handle.backend) - { - case .XLA: - let output_device = maxValue.device - let images = Tensor(copying: images, to: .defaultTFEager) - let contrastFactor = Tensor(copying: contrastFactor, to: .defaultTFEager) - let minValue = Tensor(copying: minValue, to: .defaultTFEager) - let maxValue = Tensor(copying: maxValue, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.adjustContrast( - images: images, contrastFactor: contrastFactor, minValue: minValue, maxValue: maxValue), - to: output_device) - case .TF_EAGER: - return _RawTFEager.adjustContrast( - images: images, contrastFactor: contrastFactor, minValue: minValue, maxValue: maxValue) - } - - } - - /// Adjust the contrast of one or more images. - /// - /// `images` is a tensor of at least 3 dimensions. The last 3 dimensions are - /// interpreted as `[height, width, channels]`. The other dimensions only - /// represent a collection of images, such as `[batch, height, width, channels].` - /// - /// Contrast is adjusted independently for each channel of each image. - /// - /// For each channel, the Op first computes the mean of the image pixels in the - /// channel and then adjusts each component of each pixel to - /// `(x - mean) * contrast_factor + mean`. - /// - /// - Parameters: - /// - images: Images to adjust. At least 3-D. - /// - contrast_factor: A float multiplier for adjusting contrast. - /// - /// - Output output: The contrast-adjusted image or images. - @inlinable @inline(__always) - public static func adjustContrastv2( - images: Tensor, - contrastFactor: Tensor - ) -> Tensor { - switch commonBackend(images.handle.backend, contrastFactor.handle.backend) { - case .XLA: - let output_device = contrastFactor.device - let images = Tensor(copying: images, to: .defaultTFEager) - let contrastFactor = Tensor(copying: contrastFactor, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.adjustContrastv2(images: images, contrastFactor: contrastFactor), - to: output_device) - case .TF_EAGER: - return _RawTFEager.adjustContrastv2(images: images, contrastFactor: contrastFactor) - } - - } - - /// Adjust the hue of one or more images. - /// - /// `images` is a tensor of at least 3 dimensions. The last dimension is - /// interpretted as channels, and must be three. - /// - /// The input image is considered in the RGB colorspace. Conceptually, the RGB - /// colors are first mapped into HSV. A delta is then applied all the hue values, - /// and then remapped back to RGB colorspace. - /// - /// - Parameters: - /// - images: Images to adjust. At least 3-D. - /// - delta: A float delta to add to the hue. - /// - /// - Output output: The hue-adjusted image or images. - @inlinable @inline(__always) - public static func adjustHue( - images: Tensor, - delta: Tensor - ) -> Tensor { - switch commonBackend(images.handle.backend, delta.handle.backend) { - case .XLA: - let output_device = delta.device - let images = Tensor(copying: images, to: .defaultTFEager) - let delta = Tensor(copying: delta, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.adjustHue(images: images, delta: delta), to: output_device) - case .TF_EAGER: - return _RawTFEager.adjustHue(images: images, delta: delta) - } - - } - - /// Adjust the saturation of one or more images. - /// - /// `images` is a tensor of at least 3 dimensions. The last dimension is - /// interpretted as channels, and must be three. - /// - /// The input image is considered in the RGB colorspace. Conceptually, the RGB - /// colors are first mapped into HSV. A scale is then applied all the saturation - /// values, and then remapped back to RGB colorspace. - /// - /// - Parameters: - /// - images: Images to adjust. At least 3-D. - /// - scale: A float scale to add to the saturation. - /// - /// - Output output: The hue-adjusted image or images. - @inlinable @inline(__always) - public static func adjustSaturation( - images: Tensor, - scale: Tensor - ) -> Tensor { - switch commonBackend(images.handle.backend, scale.handle.backend) { - case .XLA: - let output_device = scale.device - let images = Tensor(copying: images, to: .defaultTFEager) - let scale = Tensor(copying: scale, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.adjustSaturation(images: images, scale: scale), to: output_device) - case .TF_EAGER: - return _RawTFEager.adjustSaturation(images: images, scale: scale) - } - - } - - /// Computes the "logical and" of elements across dimensions of a tensor. - /// - /// Reduces `input` along the dimensions given in `axis`. Unless - /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - /// `axis`. If `keep_dims` is true, the reduced dimensions are - /// retained with length 1. - /// - /// - Parameters: - /// - input: The tensor to reduce. - /// - reduction_indices: The dimensions to reduce. Must be in the range - /// `[-rank(input), rank(input))`. - /// - /// - Attr keep_dims: If true, retain reduced dimensions with length 1. - /// - /// - Output output: The reduced tensor. - @inlinable @inline(__always) - public static func all( - _ input: Tensor, - reductionIndices: Tensor, - keepDims: Bool = false - ) -> Tensor { - switch commonBackend(input.handle.backend, reductionIndices.handle.backend) { - case .XLA: - return _RawXLA.all(input, reductionIndices: reductionIndices, keepDims: keepDims) - case .TF_EAGER: - return _RawTFEager.all(input, reductionIndices: reductionIndices, keepDims: keepDims) - } - - } - - /// Generates labels for candidate sampling with a learned unigram distribution. - /// - /// See explanations of candidate sampling and the data formats at - /// go/candidate-sampling. - /// - /// For each batch, this op picks a single set of sampled candidate labels. - /// - /// The advantages of sampling candidates per-batch are simplicity and the - /// possibility of efficient dense matrix multiplication. The disadvantage is that - /// the sampled candidates must be chosen independently of the context and of the - /// true labels. - /// - /// - Parameter true_classes: A batch_size * num_true matrix, in which each row contains the - /// IDs of the num_true target_classes in the corresponding original label. - /// - /// - Attrs: - /// - num_true: Number of true labels per context. - /// - num_sampled: Number of candidates to produce. - /// - unique: If unique is true, we sample with rejection, so that all sampled - /// candidates in a batch are unique. This requires some approximation to - /// estimate the post-rejection sampling probabilities. - /// - seed: If either seed or seed2 are set to be non-zero, the random number - /// generator is seeded by the given seed. Otherwise, it is seeded by a - /// random seed. - /// - seed2: An second seed to avoid seed collision. - /// - /// - Outputs: - /// - sampled_candidates: A vector of length num_sampled, in which each element is - /// the ID of a sampled candidate. - /// - true_expected_count: A batch_size * num_true matrix, representing - /// the number of times each candidate is expected to occur in a batch - /// of sampled candidates. If unique=true, then this is a probability. - /// - sampled_expected_count: A vector of length num_sampled, for each sampled - /// candidate representing the number of times the candidate is expected - /// to occur in a batch of sampled candidates. If unique=true, then this is a - /// probability. - @inlinable @inline(__always) - public static func allCandidateSampler( - trueClasses: Tensor, - numTrue: Int64, - numSampled: Int64, - unique: Bool, - seed: Int64 = 0, - seed2: Int64 = 0 - ) -> ( - sampledCandidates: Tensor, trueExpectedCount: Tensor, - sampledExpectedCount: Tensor - ) { - _RawTFEager.allCandidateSampler( - trueClasses: trueClasses, numTrue: numTrue, numSampled: numSampled, unique: unique, - seed: seed, seed2: seed2) - } + /// Raise a exception to abort the process when called. + /// + /// If exit_without_error is true, the process will exit normally, + /// otherwise it will exit with a SIGABORT signal. + /// + /// Returns nothing but an exception. + /// + /// - Attr error_msg: A string which is the message associated with the exception. + @inlinable @inline(__always) + public static func abort( + errorMsg: String, + exitWithoutError: Bool = false + ) { + _RawTFEager.abort(errorMsg: errorMsg, exitWithoutError: exitWithoutError) + } - /// An Op to exchange data across TPU replicas. - /// - /// On each replica, the input is split into `split_count` blocks along - /// `split_dimension` and send to the other replicas given group_assignment. After - /// receiving `split_count` - 1 blocks from other replicas, we concatenate the - /// blocks along `concat_dimension` as the output. - /// - /// For example, suppose there are 2 TPU replicas: - /// replica 0 receives input: `[[A, B]]` - /// replica 1 receives input: `[[C, D]]` - /// - /// group_assignment=`[[0, 1]]` - /// concat_dimension=0 - /// split_dimension=1 - /// split_count=2 - /// - /// replica 0's output: `[[A], [C]]` - /// replica 1's output: `[[B], [D]]` - /// - /// - Parameters: - /// - input: The local input to the sum. - /// - group_assignment: An int32 tensor with shape - /// [num_groups, num_replicas_per_group]. `group_assignment[i]` represents the - /// replica ids in the ith subgroup. - /// - /// - Attrs: - /// - T: The type of elements to be exchanged. - /// - concat_dimension: The dimension number to concatenate. - /// - split_dimension: The dimension number to split. - /// - split_count: The number of splits, this number must equal to the sub-group - /// size(group_assignment.get_shape()[1]) - /// - /// - Output output: The exchanged result. - @inlinable @inline(__always) - public static func allToAll( - _ input: Tensor, - groupAssignment: Tensor, - concatDimension: Int64, - splitDimension: Int64, - splitCount: Int64 - ) -> Tensor { - switch commonBackend(input.handle.backend, groupAssignment.handle.backend) { - case .XLA: - let output_device = groupAssignment.device - let input = Tensor(copying: input, to: .defaultTFEager) - let groupAssignment = Tensor(copying: groupAssignment, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.allToAll( - input, groupAssignment: groupAssignment, concatDimension: concatDimension, - splitDimension: splitDimension, splitCount: splitCount), to: output_device) - case .TF_EAGER: - return _RawTFEager.allToAll( - input, groupAssignment: groupAssignment, concatDimension: concatDimension, - splitDimension: splitDimension, splitCount: splitCount) - } - - } - - /// Returns the argument of a complex number. - /// - /// Given a tensor `input` of complex numbers, this operation returns a tensor of - /// type `float` that is the argument of each element in `input`. All elements in - /// `input` must be complex numbers of the form \\(a + bj\\), where *a* - /// is the real part and *b* is the imaginary part. - /// - /// The argument returned by this operation is of the form \\(atan2(b, a)\\). - /// - /// For example: - /// - /// ``` - /// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] - /// tf.angle(input) ==> [2.0132, 1.056] - /// ``` - /// - /// @compatibility(numpy) - /// Equivalent to np.angle. - /// @end_compatibility - @inlinable @inline(__always) - public static func angle< - T: TensorFlowScalar, - Tout: FloatingPoint & TensorFlowScalar - >( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.angle(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.angle(input) - } - - } - - /// A container for an iterator resource. - /// - /// - Output handle: A handle to the iterator that can be passed to a "MakeIterator" or - /// "IteratorGetNext" op. In contrast to Iterator, AnonymousIterator prevents - /// resource sharing by name, and does not keep a reference to the resource - /// container. - @inlinable @inline(__always) - public static func anonymousIterator( - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> ResourceHandle { - _RawTFEager.anonymousIterator(outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// A container for an iterator resource. - /// - /// - Outputs: - /// - handle: A handle to the iterator that can be passed to a "MakeIterator" or - /// "IteratorGetNext" op. In contrast to Iterator, AnonymousIterator prevents - /// resource sharing by name, and does not keep a reference to the resource - /// container. - /// - deleter: A variant deleter that should be passed into the op that deletes the iterator. - @inlinable @inline(__always) - public static func anonymousIteratorV2( - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> (handle: ResourceHandle, deleter: VariantHandle) { - _RawTFEager.anonymousIteratorV2(outputTypes: outputTypes, outputShapes: outputShapes) - } - - @inlinable @inline(__always) - public static func anonymousMemoryCache() -> (handle: ResourceHandle, deleter: VariantHandle) { - _RawTFEager.anonymousMemoryCache() - } - - /// A container for a multi device iterator resource. - /// - /// - Outputs: - /// - handle: A handle to a multi device iterator that can be passed to a - /// "MultiDeviceIteratorGetNextFromShard" op. In contrast to MultiDeviceIterator, - /// AnonymousIterator prevents resource sharing by name, and does not keep a - /// reference to the resource container. - /// - deleter: A variant deleter that should be passed into the op that deletes the iterator. - @inlinable @inline(__always) - public static func anonymousMultiDeviceIterator( - devices: [String], - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> (handle: ResourceHandle, deleter: VariantHandle) { - _RawTFEager.anonymousMultiDeviceIterator( - devices: devices, outputTypes: outputTypes, outputShapes: outputShapes) - } - - @inlinable @inline(__always) - public static func anonymousRandomSeedGenerator( - seed: Tensor, - seed2: Tensor - ) -> (handle: ResourceHandle, deleter: VariantHandle) { - _RawTFEager.anonymousRandomSeedGenerator(seed: seed, seed2: seed2) - } - - /// Computes the "logical or" of elements across dimensions of a tensor. - /// - /// Reduces `input` along the dimensions given in `axis`. Unless - /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - /// `axis`. If `keep_dims` is true, the reduced dimensions are - /// retained with length 1. - /// - /// - Parameters: - /// - input: The tensor to reduce. - /// - reduction_indices: The dimensions to reduce. Must be in the range - /// `[-rank(input), rank(input))`. - /// - /// - Attr keep_dims: If true, retain reduced dimensions with length 1. - /// - /// - Output output: The reduced tensor. - @inlinable @inline(__always) - public static func any( - _ input: Tensor, - reductionIndices: Tensor, - keepDims: Bool = false - ) -> Tensor { - switch commonBackend(input.handle.backend, reductionIndices.handle.backend) { - case .XLA: - return _RawXLA.any(input, reductionIndices: reductionIndices, keepDims: keepDims) - case .TF_EAGER: - return _RawTFEager.any(input, reductionIndices: reductionIndices, keepDims: keepDims) - } - - } - - /// Returns the truth value of abs(x-y) < tolerance element-wise. - @inlinable @inline(__always) - public static func approximateEqual( - _ x: Tensor, - _ y: Tensor, - tolerance: Double = 1e-05 - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - return _RawXLA.approximateEqual(x, y, tolerance: tolerance) - case .TF_EAGER: - return _RawTFEager.approximateEqual(x, y, tolerance: tolerance) - } - - } - - /// Returns the index with the largest value across dimensions of a tensor. - /// - /// Note that in case of ties the identity of the return value is not guaranteed. - /// - /// Usage: - /// ```python - /// import tensorflow as tf - /// a = [1, 10, 26.9, 2.8, 166.32, 62.3] - /// b = tf.math.argmax(input = a) - /// c = tf.keras.backend.eval(b) - /// # c = 4 - /// # here a[4] = 166.32 which is the largest element of a across axis 0 - /// ``` - /// - /// - Parameter dimension: int32 or int64, must be in the range `[-rank(input), rank(input))`. - /// Describes which dimension of the input Tensor to reduce across. For vectors, - /// use dimension = 0. - @inlinable @inline(__always) - public static func argMax< - T: TensorFlowNumeric, - Tidx: TensorFlowIndex, - OutputType: TensorFlowIndex - >( - _ input: Tensor, - dimension: Tensor - ) -> Tensor { - switch commonBackend(input.handle.backend, dimension.handle.backend) { - case .XLA: - return _RawXLA.argMax(input, dimension: dimension) - case .TF_EAGER: - return _RawTFEager.argMax(input, dimension: dimension) - } - - } - - /// Returns the index with the smallest value across dimensions of a tensor. - /// - /// Note that in case of ties the identity of the return value is not guaranteed. - /// - /// Usage: - /// ```python - /// import tensorflow as tf - /// a = [1, 10, 26.9, 2.8, 166.32, 62.3] - /// b = tf.math.argmin(input = a) - /// c = tf.keras.backend.eval(b) - /// # c = 0 - /// # here a[0] = 1 which is the smallest element of a across axis 0 - /// ``` - /// - /// - Parameter dimension: int32 or int64, must be in the range `[-rank(input), rank(input))`. - /// Describes which dimension of the input Tensor to reduce across. For vectors, - /// use dimension = 0. - @inlinable @inline(__always) - public static func argMin< - T: TensorFlowNumeric, - Tidx: TensorFlowIndex, - OutputType: TensorFlowIndex - >( - _ input: Tensor, - dimension: Tensor - ) -> Tensor { - switch commonBackend(input.handle.backend, dimension.handle.backend) { - case .XLA: - return _RawXLA.argMin(input, dimension: dimension) - case .TF_EAGER: - return _RawTFEager.argMin(input, dimension: dimension) - } - - } - - /// Converts each entry in the given tensor to strings. - /// - /// Supports many numeric types and boolean. - /// - /// For Unicode, see the - /// [https://www.tensorflow.org/tutorials/representation/unicode](Working with Unicode text) - /// tutorial. - /// - /// Examples: - /// - /// >>> tf.strings.as_string([3, 2]) - /// - /// >>> tf.strings.as_string([3.1415926, 2.71828], precision=2).numpy() - /// array([b'3.14', b'2.72'], dtype=object) - /// - /// - Attrs: - /// - precision: The post-decimal precision to use for floating point numbers. - /// Only used if precision > -1. - /// - scientific: Use scientific notation for floating point numbers. - /// - shortest: Use shortest representation (either scientific or standard) for - /// floating point numbers. - /// - width: Pad pre-decimal numbers to this width. - /// Applies to both floating point and integer numbers. - /// Only used if width > -1. - /// - fill: The value to pad if width > -1. If empty, pads with spaces. - /// Another typical value is '0'. String cannot be longer than 1 character. - @inlinable @inline(__always) - public static func asString( - _ input: Tensor, - precision: Int64 = -1, - scientific: Bool = false, - shortest: Bool = false, - width: Int64 = -1, - fill: String - ) -> StringTensor { - _RawTFEager.asString( - input, precision: precision, scientific: scientific, shortest: shortest, width: width, - fill: fill) - } - - /// Computes the trignometric inverse sine of x element-wise. - /// - /// The `tf.math.asin` operation returns the inverse of `tf.math.sin`, such that - /// if `y = tf.math.sin(x)` then, `x = tf.math.asin(y)`. - /// - /// **Note**: The output of `tf.math.asin` will lie within the invertible range - /// of sine, i.e [-pi/2, pi/2]. - /// - /// For example: - /// - /// ```python - /// # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] - /// x = tf.constant([1.047, 0.785]) - /// y = tf.math.sin(x) # [0.8659266, 0.7068252] - /// - /// tf.math.asin(y) # [1.047, 0.785] = x - /// ``` - /// - @inlinable @inline(__always) - public static func asin( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.asin(x) - case .TF_EAGER: - return _RawTFEager.asin(x) - } - - } - - /// Computes inverse hyperbolic sine of x element-wise. - /// - /// Given an input tensor, this function computes inverse hyperbolic sine - /// for every element in the tensor. Both input and output has a range of - /// `[-inf, inf]`. - /// - /// ```python - /// x = tf.constant([-float("inf"), -2, -0.5, 1, 1.2, 200, 10000, float("inf")]) - /// tf.math.asinh(x) ==> [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 9.903487 inf] - /// ``` - @inlinable @inline(__always) - public static func asinh( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.asinh(x) - case .TF_EAGER: - return _RawTFEager.asinh(x) - } - - } - - /// Asserts that the given condition is true. - /// - /// If `condition` evaluates to false, print the list of tensors in `data`. - /// `summarize` determines how many entries of the tensors to print. - /// - /// - Parameters: - /// - condition: The condition to evaluate. - /// - data: The tensors to print out when condition is false. - /// - /// - Attr summarize: Print this many entries of each tensor. - @inlinable @inline(__always) - public static func assert( - condition: Tensor, - data: T, - summarize: Int64 = 3 - ) { - _RawTFEager.assert(condition: condition, data: data, summarize: summarize) - } - - /// A transformation that asserts which transformations happen next. - /// - /// This transformation checks whether the camel-case names (i.e. "FlatMap", not - /// "flat_map") of the transformations following this transformation match the list - /// of names in the `transformations` argument. If there is a mismatch, the - /// transformation raises an exception. - /// - /// The check occurs when iterating over the contents of the dataset, which - /// means that the check happens *after* any static optimizations are applied - /// to the dataset graph. - /// - /// - Parameters: - /// - input_dataset: A variant tensor representing the input dataset. - /// `AssertNextDataset` passes through the outputs of its input dataset. - /// - transformations: A `tf.string` vector `tf.Tensor` identifying the transformations that are - /// expected to happen next. - @inlinable @inline(__always) - public static func assertNextDataset( - inputDataset: VariantHandle, - transformations: StringTensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.assertNextDataset( - inputDataset: inputDataset, transformations: transformations, outputTypes: outputTypes, - outputShapes: outputShapes) - } - - /// Adds a value to the current value of a variable. - /// - /// Any ReadVariableOp with a control dependency on this op is guaranteed to - /// see the incremented value or a subsequent newer one. - /// - /// - Parameters: - /// - resource: handle to the resource in which to store the variable. - /// - value: the value by which the variable will be incremented. - /// - /// - Attr dtype: the dtype of the value. - @inlinable @inline(__always) - public static func assignAddVariableOp( - resource: ResourceHandle, - value: Tensor - ) { - _RawTFEager.assignAddVariableOp(resource: resource, value: value) - } - - /// Subtracts a value from the current value of a variable. - /// - /// Any ReadVariableOp with a control dependency on this op is guaranteed to - /// see the decremented value or a subsequent newer one. - /// - /// - Parameters: - /// - resource: handle to the resource in which to store the variable. - /// - value: the value by which the variable will be incremented. - /// - /// - Attr dtype: the dtype of the value. - @inlinable @inline(__always) - public static func assignSubVariableOp( - resource: ResourceHandle, - value: Tensor - ) { - _RawTFEager.assignSubVariableOp(resource: resource, value: value) - } - - /// Assigns a new value to a variable. - /// - /// Any ReadVariableOp with a control dependency on this op is guaranteed to return - /// this value or a subsequent newer value of the variable. - /// - /// - Parameters: - /// - resource: handle to the resource in which to store the variable. - /// - value: the value to set the new tensor to use. - /// - /// - Attr dtype: the dtype of the value. - @inlinable @inline(__always) - public static func assignVariableOp( - resource: ResourceHandle, - value: Tensor - ) { - _RawTFEager.assignVariableOp(resource: resource, value: value) - } - - /// Computes the trignometric inverse tangent of x element-wise. - /// - /// The `tf.math.atan` operation returns the inverse of `tf.math.tan`, such that - /// if `y = tf.math.tan(x)` then, `x = tf.math.atan(y)`. - /// - /// **Note**: The output of `tf.math.atan` will lie within the invertible range - /// of tan, i.e (-pi/2, pi/2). - /// - /// For example: - /// - /// ```python - /// # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] - /// x = tf.constant([1.047, 0.785]) - /// y = tf.math.tan(x) # [1.731261, 0.99920404] - /// - /// tf.math.atan(y) # [1.047, 0.785] = x - /// ``` - /// - @inlinable @inline(__always) - public static func atan( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.atan(x) - case .TF_EAGER: - return _RawTFEager.atan(x) - } - - } - - /// Computes arctangent of `y/x` element-wise, respecting signs of the arguments. - /// - /// This is the angle \( \theta \in [-\pi, \pi] \) such that - /// \[ x = r \cos(\theta) \] - /// and - /// \[ y = r \sin(\theta) \] - /// where \(r = \sqrt(x^2 + y^2) \). - @inlinable @inline(__always) - public static func atan2( - _ y: Tensor, - _ x: Tensor - ) -> Tensor { - switch commonBackend(y.handle.backend, x.handle.backend) { - case .XLA: - let output_device = x.device - let y = Tensor(copying: y, to: .defaultTFEager) - let x = Tensor(copying: x, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.atan2(y, x), to: output_device) - case .TF_EAGER: - return _RawTFEager.atan2(y, x) - } - - } - - /// Computes inverse hyperbolic tangent of x element-wise. - /// - /// Given an input tensor, this function computes inverse hyperbolic tangent - /// for every element in the tensor. Input range is `[-1,1]` and output range is - /// `[-inf, inf]`. If input is `-1`, output will be `-inf` and if the - /// input is `1`, output will be `inf`. Values outside the range will have - /// `nan` as output. - /// - /// ```python - /// x = tf.constant([-float("inf"), -1, -0.5, 1, 0, 0.5, 10, float("inf")]) - /// tf.math.atanh(x) ==> [nan -inf -0.54930615 inf 0. 0.54930615 nan nan] - /// ``` - @inlinable @inline(__always) - public static func atanh( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.atanh(x) - case .TF_EAGER: - return _RawTFEager.atanh(x) - } - - } - - @inlinable @inline(__always) - public static func attr( - _ a: Int64 - ) { - _RawTFEager.attr(a) + /// Computes the absolute value of a tensor. + /// + /// Given a tensor `x`, this operation returns a tensor containing the absolute + /// value of each element in `x`. For example, if x is an input element and y is + /// an output element, this operation computes \\(y = |x|\\). + @inlinable @inline(__always) + public static func abs( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.abs(x) + case .TF_EAGER: + return _RawTFEager.abs(x) } - @inlinable @inline(__always) - public static func attrBool( - _ a: Bool - ) { - _RawTFEager.attrBool(a) - } + } - @inlinable @inline(__always) - public static func attrBoolList( - _ a: [Bool] - ) { - _RawTFEager.attrBoolList(a) - } + /// Returns the element-wise sum of a list of tensors. + /// + /// `tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not + /// wait for all of its inputs to be ready before beginning to sum. This can + /// save memory if inputs are ready at different times, since minimum temporary + /// storage is proportional to the output size rather than the inputs size. + /// + /// Unlike the original `accumulate_n`, `accumulate_n_v2` is differentiable. + /// + /// Returns a `Tensor` of same shape and type as the elements of `inputs`. + /// + /// - Parameter inputs: A list of `Tensor` objects, each with same shape and type. + /// + /// - Attr shape: Shape of elements of `inputs`. + @inlinable @inline(__always) + public static func accumulateNV2( + inputs: [Tensor], + shape: TensorShape? + ) -> Tensor { + _RawTFEager.accumulateNV2(inputs: inputs, shape: shape) + } - @inlinable @inline(__always) - public static func attrDefault( - _ a: String = "banana" - ) { - _RawTFEager.attrDefault(a) + /// Computes acos of x element-wise. + @inlinable @inline(__always) + public static func acos( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.acos(x) + case .TF_EAGER: + return _RawTFEager.acos(x) } - @inlinable @inline(__always) - public static func attrEmptyListDefault( - _ a: [Double] - ) { - _RawTFEager.attrEmptyListDefault(a) - } + } - @inlinable @inline(__always) - public static func attrEnum( - _ a: A - ) { - _RawTFEager.attrEnum(a) + /// Computes inverse hyperbolic cosine of x element-wise. + /// + /// Given an input tensor, the function computes inverse hyperbolic cosine of every element. + /// Input range is `[1, inf]`. It returns `nan` if the input lies outside the range. + /// + /// ```python + /// x = tf.constant([-2, -0.5, 1, 1.2, 200, 10000, float("inf")]) + /// tf.math.acosh(x) ==> [nan nan 0. 0.62236255 5.9914584 9.903487 inf] + /// ``` + @inlinable @inline(__always) + public static func acosh( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.acosh(x) + case .TF_EAGER: + return _RawTFEager.acosh(x) } - @inlinable @inline(__always) - public static func attrEnumList( - _ a: [String] - ) { - _RawTFEager.attrEnumList(a) - } + } - @inlinable @inline(__always) - public static func attrFloat( - _ a: Double - ) { - _RawTFEager.attrFloat(a) + /// Returns x + y element-wise. + /// + /// *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + @inlinable @inline(__always) + public static func add( + _ x: Tensor, + _ y: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + let output_device = y.device + let x = Tensor(copying: x, to: .defaultTFEager) + let y = Tensor(copying: y, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.add(x, y), to: output_device) + case .TF_EAGER: + return _RawTFEager.add(x, y) } - @inlinable @inline(__always) - public static func attrListDefault( - _ a: [Int32] = [5, 15] - ) { - _RawTFEager.attrListDefault(a) - } + } - @inlinable @inline(__always) - public static func attrListMin( - _ a: [Int32] - ) { - _RawTFEager.attrListMin(a) - } + /// Returns x + y element-wise. + /// + /// *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + @inlinable @inline(__always) + public static func add( + _ x: StringTensor, + _ y: StringTensor + ) -> StringTensor { + _RawTFEager.add(x, y) + } - @inlinable @inline(__always) - public static func attrListTypeDefault( - _ a: [Tensor], - _ b: [Tensor] - ) { - _RawTFEager.attrListTypeDefault(a, b) + /// Add an `N`-minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles. + /// + /// A `SparseTensor` of rank `R` is represented by three tensors: `sparse_indices`, + /// `sparse_values`, and `sparse_shape`, where + /// + /// ```sparse_indices.shape[1] == sparse_shape.shape[0] == R``` + /// + /// An `N`-minibatch of `SparseTensor` objects is represented as a `SparseTensor` + /// having a first `sparse_indices` column taking values between `[0, N)`, where + /// the minibatch size `N == sparse_shape[0]`. + /// + /// The input `SparseTensor` must have rank `R` greater than 1, and the first + /// dimension is treated as the minibatch dimension. Elements of the `SparseTensor` + /// must be sorted in increasing order of this first dimension. The stored + /// `SparseTensor` objects pointed to by each row of the output `sparse_handles` + /// will have rank `R-1`. + /// + /// The `SparseTensor` values can then be read out as part of a minibatch by passing + /// the given keys as vector elements to `TakeManySparseFromTensorsMap`. To ensure + /// the correct `SparseTensorsMap` is accessed, ensure that the same + /// `container` and `shared_name` are passed to that Op. If no `shared_name` + /// is provided here, instead use the *name* of the Operation created by calling + /// `AddManySparseToTensorsMap` as the `shared_name` passed to + /// `TakeManySparseFromTensorsMap`. Ensure the Operations are colocated. + /// + /// - Parameters: + /// - sparse_indices: 2-D. The `indices` of the minibatch `SparseTensor`. + /// `sparse_indices[:, 0]` must be ordered values in `[0, N)`. + /// - sparse_values: 1-D. The `values` of the minibatch `SparseTensor`. + /// - sparse_shape: 1-D. The `shape` of the minibatch `SparseTensor`. + /// The minibatch size `N == sparse_shape[0]`. + /// + /// - Attrs: + /// - container: The container name for the `SparseTensorsMap` created by this op. + /// - shared_name: The shared name for the `SparseTensorsMap` created by this op. + /// If blank, the new Operation's unique name is used. + /// + /// - Output sparse_handles: 1-D. The handles of the `SparseTensor` now stored in the + /// `SparseTensorsMap`. Shape: `[N]`. + @inlinable @inline(__always) + public static func addManySparseToTensorsMap( + sparseIndices: Tensor, + sparseValues: Tensor, + sparseShape: Tensor, + container: String, + sharedName: String + ) -> Tensor { + switch commonBackend( + commonBackend(sparseIndices.handle.backend, sparseValues.handle.backend), + sparseShape.handle.backend) + { + case .XLA: + let output_device = sparseShape.device + let sparseIndices = Tensor(copying: sparseIndices, to: .defaultTFEager) + let sparseValues = Tensor(copying: sparseValues, to: .defaultTFEager) + let sparseShape = Tensor(copying: sparseShape, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.addManySparseToTensorsMap( + sparseIndices: sparseIndices, sparseValues: sparseValues, sparseShape: sparseShape, + container: container, sharedName: sharedName), to: output_device) + case .TF_EAGER: + return _RawTFEager.addManySparseToTensorsMap( + sparseIndices: sparseIndices, sparseValues: sparseValues, sparseShape: sparseShape, + container: container, sharedName: sharedName) } - @inlinable @inline(__always) - public static func attrMin( - _ a: Int64 - ) { - _RawTFEager.attrMin(a) - } + } - @inlinable @inline(__always) - public static func attrPartialShape( - _ a: TensorShape? - ) { - _RawTFEager.attrPartialShape(a) - } + /// Add all input tensors element wise. + /// + /// Inputs must be of same size and shape. + /// + /// ```python + /// x = [9, 7, 10] + /// tf.math.add_n(x) ==> 26 + /// ``` + @inlinable @inline(__always) + public static func addN( + inputs: [Tensor] + ) -> Tensor { + _RawTFEager.addN(inputs: inputs) + } - @inlinable @inline(__always) - public static func attrPartialShapeList( - _ a: [TensorShape?] - ) { - _RawTFEager.attrPartialShapeList(a) + /// Add a `SparseTensor` to a `SparseTensorsMap` return its handle. + /// + /// A `SparseTensor` is represented by three tensors: `sparse_indices`, + /// `sparse_values`, and `sparse_shape`. + /// + /// This operator takes the given `SparseTensor` and adds it to a container + /// object (a `SparseTensorsMap`). A unique key within this container is generated + /// in the form of an `int64`, and this is the value that is returned. + /// + /// The `SparseTensor` can then be read out as part of a minibatch by passing + /// the key as a vector element to `TakeManySparseFromTensorsMap`. To ensure + /// the correct `SparseTensorsMap` is accessed, ensure that the same + /// `container` and `shared_name` are passed to that Op. If no `shared_name` + /// is provided here, instead use the *name* of the Operation created by calling + /// `AddSparseToTensorsMap` as the `shared_name` passed to + /// `TakeManySparseFromTensorsMap`. Ensure the Operations are colocated. + /// + /// - Parameters: + /// - sparse_indices: 2-D. The `indices` of the `SparseTensor`. + /// - sparse_values: 1-D. The `values` of the `SparseTensor`. + /// - sparse_shape: 1-D. The `shape` of the `SparseTensor`. + /// + /// - Attrs: + /// - container: The container name for the `SparseTensorsMap` created by this op. + /// - shared_name: The shared name for the `SparseTensorsMap` created by this op. + /// If blank, the new Operation's unique name is used. + /// + /// - Output sparse_handle: 0-D. The handle of the `SparseTensor` now stored in the + /// `SparseTensorsMap`. + @inlinable @inline(__always) + public static func addSparseToTensorsMap( + sparseIndices: Tensor, + sparseValues: Tensor, + sparseShape: Tensor, + container: String, + sharedName: String + ) -> Tensor { + switch commonBackend( + commonBackend(sparseIndices.handle.backend, sparseValues.handle.backend), + sparseShape.handle.backend) + { + case .XLA: + let output_device = sparseShape.device + let sparseIndices = Tensor(copying: sparseIndices, to: .defaultTFEager) + let sparseValues = Tensor(copying: sparseValues, to: .defaultTFEager) + let sparseShape = Tensor(copying: sparseShape, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.addSparseToTensorsMap( + sparseIndices: sparseIndices, sparseValues: sparseValues, sparseShape: sparseShape, + container: container, sharedName: sharedName), to: output_device) + case .TF_EAGER: + return _RawTFEager.addSparseToTensorsMap( + sparseIndices: sparseIndices, sparseValues: sparseValues, sparseShape: sparseShape, + container: container, sharedName: sharedName) } - @inlinable @inline(__always) - public static func attrShape( - _ a: TensorShape? - ) { - _RawTFEager.attrShape(a) - } + } - @inlinable @inline(__always) - public static func attrShapeList( - _ a: [TensorShape?] - ) { - _RawTFEager.attrShapeList(a) + /// Returns x + y element-wise. + /// + /// *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + @inlinable @inline(__always) + public static func addV2( + _ x: Tensor, + _ y: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + return _RawXLA.addV2(x, y) + case .TF_EAGER: + return _RawTFEager.addV2(x, y) } - @inlinable @inline(__always) - public static func attrTypeDefault( - _ a: Tensor - ) { - _RawTFEager.attrTypeDefault(a) - } - - /// Audio Microfrontend Op. - /// - /// This Op converts a sequence of audio data into one or more - /// feature vectors containing filterbanks of the input. The - /// conversion process uses a lightweight library to perform: - /// - /// 1. A slicing window function - /// 2. Short-time FFTs - /// 3. Filterbank calculations - /// 4. Noise reduction - /// 5. PCAN Auto Gain Control - /// 6. Logarithmic scaling - /// - /// Arguments - /// audio: 1D Tensor, int16 audio data in temporal ordering. - /// sample_rate: Integer, the sample rate of the audio in Hz. - /// window_size: Integer, length of desired time frames in ms. - /// window_step: Integer, length of step size for the next frame in ms. - /// num_channels: Integer, the number of filterbank channels to use. - /// upper_band_limit: Float, the highest frequency included in the filterbanks. - /// lower_band_limit: Float, the lowest frequency included in the filterbanks. - /// smoothing_bits: Int, scale up signal by 2^(smoothing_bits) before reduction. - /// even_smoothing: Float, smoothing coefficient for even-numbered channels. - /// odd_smoothing: Float, smoothing coefficient for odd-numbered channels. - /// min_signal_remaining: Float, fraction of signal to preserve in smoothing. - /// enable_pcan: Bool, enable PCAN auto gain control. - /// pcan_strength: Float, gain normalization exponent. - /// pcan_offset: Float, positive value added in the normalization denominator. - /// gain_bits: Int, number of fractional bits in the gain. - /// enable_log: Bool, enable logarithmic scaling of filterbanks. - /// scale_shift: Integer, scale filterbanks by 2^(scale_shift). - /// left_context: Integer, number of preceding frames to attach to each frame. - /// right_context: Integer, number of preceding frames to attach to each frame. - /// frame_stride: Integer, M frames to skip over, where output[n] = frame[n*M]. - /// zero_padding: Bool, if left/right context is out-of-bounds, attach frame of - /// zeroes. Otherwise, frame[0] or frame[size-1] will be copied. - /// out_scale: Integer, divide all filterbanks by this number. - /// out_type: DType, type of the output Tensor, defaults to UINT16. - /// - /// Returns - /// filterbanks: 2D Tensor, each row is a time frame, each column is a channel. - @inlinable @inline(__always) - public static func audioMicrofrontend( - audio: Tensor, - sampleRate: Int64 = 16000, - windowSize: Int64 = 25, - windowStep: Int64 = 10, - numChannels: Int64 = 32, - upperBandLimit: Double = 7500, - lowerBandLimit: Double = 125, - smoothingBits: Int64 = 10, - evenSmoothing: Double = 0.025, - oddSmoothing: Double = 0.06, - minSignalRemaining: Double = 0.05, - enablePcan: Bool = false, - pcanStrength: Double = 0.95, - pcanOffset: Double = 80, - gainBits: Int64 = 21, - enableLog: Bool = true, - scaleShift: Int64 = 6, - leftContext: Int64 = 0, - rightContext: Int64 = 0, - frameStride: Int64 = 1, - zeroPadding: Bool = false, - outScale: Int64 = 1 - ) -> Tensor { - switch audio.handle.backend { - case .XLA: - let output_device = audio.device - let audio = Tensor(copying: audio, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.audioMicrofrontend( - audio: audio, sampleRate: sampleRate, windowSize: windowSize, windowStep: windowStep, - numChannels: numChannels, upperBandLimit: upperBandLimit, - lowerBandLimit: lowerBandLimit, smoothingBits: smoothingBits, - evenSmoothing: evenSmoothing, oddSmoothing: oddSmoothing, - minSignalRemaining: minSignalRemaining, enablePcan: enablePcan, - pcanStrength: pcanStrength, pcanOffset: pcanOffset, gainBits: gainBits, - enableLog: enableLog, scaleShift: scaleShift, leftContext: leftContext, - rightContext: rightContext, frameStride: frameStride, zeroPadding: zeroPadding, - outScale: outScale), to: output_device) - case .TF_EAGER: - return _RawTFEager.audioMicrofrontend( - audio: audio, sampleRate: sampleRate, windowSize: windowSize, windowStep: windowStep, - numChannels: numChannels, upperBandLimit: upperBandLimit, lowerBandLimit: lowerBandLimit, - smoothingBits: smoothingBits, evenSmoothing: evenSmoothing, oddSmoothing: oddSmoothing, - minSignalRemaining: minSignalRemaining, enablePcan: enablePcan, - pcanStrength: pcanStrength, pcanOffset: pcanOffset, gainBits: gainBits, - enableLog: enableLog, scaleShift: scaleShift, leftContext: leftContext, - rightContext: rightContext, frameStride: frameStride, zeroPadding: zeroPadding, - outScale: outScale) - } - - } - - /// Produces a visualization of audio data over time. - /// - /// Spectrograms are a standard way of representing audio information as a series of - /// slices of frequency information, one slice for each window of time. By joining - /// these together into a sequence, they form a distinctive fingerprint of the sound - /// over time. - /// - /// This op expects to receive audio data as an input, stored as floats in the range - /// -1 to 1, together with a window width in samples, and a stride specifying how - /// far to move the window between slices. From this it generates a three - /// dimensional output. The first dimension is for the channels in the input, so a - /// stereo audio input would have two here for example. The second dimension is time, - /// with successive frequency slices. The third dimension has an amplitude value for - /// each frequency during that time slice. - /// - /// This means the layout when converted and saved as an image is rotated 90 degrees - /// clockwise from a typical spectrogram. Time is descending down the Y axis, and - /// the frequency decreases from left to right. - /// - /// Each value in the result represents the square root of the sum of the real and - /// imaginary parts of an FFT on the current window of samples. In this way, the - /// lowest dimension represents the power of each frequency in the current window, - /// and adjacent windows are concatenated in the next dimension. - /// - /// To get a more intuitive and visual look at what this operation does, you can run - /// tensorflow/examples/wav_to_spectrogram to read in an audio file and save out the - /// resulting spectrogram as a PNG image. - /// - /// - Parameter input: Float representation of audio data. - /// - /// - Attrs: - /// - window_size: How wide the input window is in samples. For the highest efficiency - /// this should be a power of two, but other values are accepted. - /// - stride: How widely apart the center of adjacent sample windows should be. - /// - magnitude_squared: Whether to return the squared magnitude or just the - /// magnitude. Using squared magnitude can avoid extra calculations. - /// - /// - Output spectrogram: 3D representation of the audio frequencies as an image. - @inlinable @inline(__always) - public static func audioSpectrogram( - _ input: Tensor, - windowSize: Int64, - stride: Int64, - magnitudeSquared: Bool = false - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.audioSpectrogram( - input, windowSize: windowSize, stride: stride, magnitudeSquared: magnitudeSquared), - to: output_device) - case .TF_EAGER: - return _RawTFEager.audioSpectrogram( - input, windowSize: windowSize, stride: stride, magnitudeSquared: magnitudeSquared) - } - - } - - /// Outputs a `Summary` protocol buffer with audio. - /// - /// The summary has up to `max_outputs` summary values containing audio. The - /// audio is built from `tensor` which must be 3-D with shape `[batch_size, - /// frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are - /// assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`. - /// - /// The `tag` argument is a scalar `Tensor` of type `string`. It is used to - /// build the `tag` of the summary values: - /// - /// * If `max_outputs` is 1, the summary value tag is '*tag*/audio'. - /// * If `max_outputs` is greater than 1, the summary value tags are - /// generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc. - /// - /// - Parameters: - /// - tag: Scalar. Used to build the `tag` attribute of the summary values. - /// - tensor: 2-D of shape `[batch_size, frames]`. - /// - /// - Attrs: - /// - sample_rate: The sample rate of the signal in hertz. - /// - max_outputs: Max number of batch elements to generate audio for. - /// - /// - Output summary: Scalar. Serialized `Summary` protocol buffer. - @inlinable @inline(__always) - public static func audioSummary( - tag: StringTensor, - _ tensor: Tensor, - sampleRate: Double, - maxOutputs: Int64 = 3 - ) -> StringTensor { - _RawTFEager.audioSummary(tag: tag, tensor, sampleRate: sampleRate, maxOutputs: maxOutputs) - } - - /// Outputs a `Summary` protocol buffer with audio. - /// - /// The summary has up to `max_outputs` summary values containing audio. The - /// audio is built from `tensor` which must be 3-D with shape `[batch_size, - /// frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are - /// assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`. - /// - /// The `tag` argument is a scalar `Tensor` of type `string`. It is used to - /// build the `tag` of the summary values: - /// - /// * If `max_outputs` is 1, the summary value tag is '*tag*/audio'. - /// * If `max_outputs` is greater than 1, the summary value tags are - /// generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc. - /// - /// - Parameters: - /// - tag: Scalar. Used to build the `tag` attribute of the summary values. - /// - tensor: 2-D of shape `[batch_size, frames]`. - /// - sample_rate: The sample rate of the signal in hertz. - /// - /// - Attr max_outputs: Max number of batch elements to generate audio for. - /// - /// - Output summary: Scalar. Serialized `Summary` protocol buffer. - @inlinable @inline(__always) - public static func audioSummaryV2( - tag: StringTensor, - _ tensor: Tensor, - sampleRate: Tensor, - maxOutputs: Int64 = 3 - ) -> StringTensor { - _RawTFEager.audioSummaryV2(tag: tag, tensor, sampleRate: sampleRate, maxOutputs: maxOutputs) - } - - /// Creates a dataset that shards the input dataset. - /// - /// Creates a dataset that shards the input dataset by num_workers, returning a - /// sharded dataset for the index-th worker. This attempts to automatically shard - /// a dataset by examining the Dataset graph and inserting a shard op before the - /// inputs to a reader Dataset (e.g. CSVDataset, TFRecordDataset). - /// - /// This dataset will throw a NotFound error if we cannot shard the dataset - /// automatically. - /// - /// - Parameters: - /// - input_dataset: A variant tensor representing the input dataset. - /// - num_workers: A scalar representing the number of workers to distribute this dataset across. - /// - index: A scalar representing the index of the current worker out of num_workers. - @inlinable @inline(__always) - public static func autoShardDataset( - inputDataset: VariantHandle, - numWorkers: Tensor, - index: Tensor, - autoShardPolicy: Int64 = 0, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.autoShardDataset( - inputDataset: inputDataset, numWorkers: numWorkers, index: index, - autoShardPolicy: autoShardPolicy, outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Performs average pooling on the input. - /// - /// Each entry in `output` is the mean of the corresponding size `ksize` - /// window in `value`. - /// - /// - Parameter value: 4-D with shape `[batch, height, width, channels]`. - /// - /// - Attrs: - /// - ksize: The size of the sliding window for each dimension of `value`. - /// - strides: The stride of the sliding window for each dimension of `value`. - /// - padding: The type of padding algorithm to use. - /// - data_format: Specify the data format of the input and output data. With the - /// default format "NHWC", the data is stored in the order of: - /// [batch, in_height, in_width, in_channels]. - /// Alternatively, the format could be "NCHW", the data storage order of: - /// [batch, in_channels, in_height, in_width]. - /// - /// - Output output: The average pooled output tensor. - @inlinable @inline(__always) - public static func avgPool( - value: Tensor, - ksize: [Int32], - strides: [Int32], - padding: Padding, - dataFormat: DataFormat = .nhwc - ) -> Tensor { - switch value.handle.backend { - case .XLA: - return _RawXLA.avgPool( - value: value, ksize: ksize, strides: strides, padding: padding, dataFormat: dataFormat) - case .TF_EAGER: - return _RawTFEager.avgPool( - value: value, ksize: ksize, strides: strides, padding: padding, dataFormat: dataFormat) - } - - } - - /// Performs 3D average pooling on the input. - /// - /// - Parameter input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over. - /// - /// - Attrs: - /// - ksize: 1-D tensor of length 5. The size of the window for each dimension of - /// the input tensor. Must have `ksize[0] = ksize[4] = 1`. - /// - strides: 1-D tensor of length 5. The stride of the sliding window for each - /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. - /// - padding: The type of padding algorithm to use. - /// - data_format: The data format of the input and output data. With the - /// default format "NDHWC", the data is stored in the order of: - /// [batch, in_depth, in_height, in_width, in_channels]. - /// Alternatively, the format could be "NCDHW", the data storage order is: - /// [batch, in_channels, in_depth, in_height, in_width]. - /// - /// - Output output: The average pooled output tensor. - @inlinable @inline(__always) - public static func avgPool3D( - _ input: Tensor, - ksize: [Int32], - strides: [Int32], - padding: Padding, - dataFormat: DataFormat1 = .ndhwc - ) -> Tensor { - switch input.handle.backend { - case .XLA: - return _RawXLA.avgPool3D( - input, ksize: ksize, strides: strides, padding: padding, dataFormat: dataFormat) - case .TF_EAGER: - return _RawTFEager.avgPool3D( - input, ksize: ksize, strides: strides, padding: padding, dataFormat: dataFormat) - } - - } - - /// Computes gradients of average pooling function. - /// - /// - Parameters: - /// - orig_input_shape: The original input dimensions. - /// - grad: Output backprop of shape `[batch, depth, rows, cols, channels]`. - /// - /// - Attrs: - /// - ksize: 1-D tensor of length 5. The size of the window for each dimension of - /// the input tensor. Must have `ksize[0] = ksize[4] = 1`. - /// - strides: 1-D tensor of length 5. The stride of the sliding window for each - /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. - /// - padding: The type of padding algorithm to use. - /// - data_format: The data format of the input and output data. With the - /// default format "NDHWC", the data is stored in the order of: - /// [batch, in_depth, in_height, in_width, in_channels]. - /// Alternatively, the format could be "NCDHW", the data storage order is: - /// [batch, in_channels, in_depth, in_height, in_width]. - /// - /// - Output output: The backprop for input. - @inlinable @inline(__always) - public static func avgPool3DGrad( - origInputShape: Tensor, - grad: Tensor, - ksize: [Int32], - strides: [Int32], - padding: Padding, - dataFormat: DataFormat1 = .ndhwc - ) -> Tensor { - switch commonBackend(origInputShape.handle.backend, grad.handle.backend) { - case .XLA: - return _RawXLA.avgPool3DGrad( - origInputShape: origInputShape, grad: grad, ksize: ksize, strides: strides, - padding: padding, dataFormat: dataFormat) - case .TF_EAGER: - return _RawTFEager.avgPool3DGrad( - origInputShape: origInputShape, grad: grad, ksize: ksize, strides: strides, - padding: padding, dataFormat: dataFormat) - } - - } - - /// Computes gradients of the average pooling function. - /// - /// - Parameters: - /// - orig_input_shape: 1-D. Shape of the original input to `avg_pool`. - /// - grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. - /// the output of `avg_pool`. - /// - /// - Attrs: - /// - ksize: The size of the sliding window for each dimension of the input. - /// - strides: The stride of the sliding window for each dimension of the input. - /// - padding: The type of padding algorithm to use. - /// - data_format: Specify the data format of the input and output data. With the - /// default format "NHWC", the data is stored in the order of: - /// [batch, in_height, in_width, in_channels]. - /// Alternatively, the format could be "NCHW", the data storage order of: - /// [batch, in_channels, in_height, in_width]. - /// - /// - Output output: 4-D. Gradients w.r.t. the input of `avg_pool`. - @inlinable @inline(__always) - public static func avgPoolGrad( - origInputShape: Tensor, - grad: Tensor, - ksize: [Int32], - strides: [Int32], - padding: Padding, - dataFormat: DataFormat = .nhwc - ) -> Tensor { - switch commonBackend(origInputShape.handle.backend, grad.handle.backend) { - case .XLA: - return _RawXLA.avgPoolGrad( - origInputShape: origInputShape, grad: grad, ksize: ksize, strides: strides, - padding: padding, dataFormat: dataFormat) - case .TF_EAGER: - return _RawTFEager.avgPoolGrad( - origInputShape: origInputShape, grad: grad, ksize: ksize, strides: strides, - padding: padding, dataFormat: dataFormat) - } - - } - - @inlinable @inline(__always) - public static func b() -> Tensor { - _RawTFEager.b() - } - - /// Batches all input tensors nondeterministically. - /// - /// When many instances of this Op are being run concurrently with the same - /// container/shared_name in the same device, some will output zero-shaped Tensors - /// and others will output Tensors of size up to max_batch_size. - /// - /// All Tensors in in_tensors are batched together (so, for example, labels and - /// features should be batched with a single instance of this operation. - /// - /// Each invocation of batch emits an `id` scalar which will be used to identify - /// this particular invocation when doing unbatch or its gradient. - /// - /// Each op which emits a non-empty batch will also emit a non-empty batch_index - /// Tensor, which, is a [K, 3] matrix where each row contains the invocation's id, - /// start, and length of elements of each set of Tensors present in batched_tensors. - /// - /// Batched tensors are concatenated along the first dimension, and all tensors in - /// in_tensors must have the first dimension of the same size. - /// - /// in_tensors: The tensors to be batched. - /// num_batch_threads: Number of scheduling threads for processing batches of work. - /// Determines the number of batches processed in parallel. - /// max_batch_size: Batch sizes will never be bigger than this. - /// batch_timeout_micros: Maximum number of microseconds to wait before outputting - /// an incomplete batch. - /// allowed_batch_sizes: Optional list of allowed batch sizes. If left empty, does - /// nothing. Otherwise, supplies a list of batch sizes, causing the op to pad - /// batches up to one of those sizes. The entries must increase monotonically, and - /// the final entry must equal max_batch_size. - /// grad_timeout_micros: The timeout to use for the gradient. See Unbatch. - /// batched_tensors: Either empty tensors or a batch of concatenated Tensors. - /// batch_index: If out_tensors is non-empty, has information to invert it. - /// container: Controls the scope of sharing of this batch. - /// id: always contains a scalar with a unique ID for this invocation of Batch. - /// shared_name: Concurrently running instances of batch in the same device with the - /// same container and shared_name will batch their elements together. If left - /// empty, the op name will be used as the shared name. - /// T: the types of tensors to be batched. - @inlinable @inline(__always) - public static func batch( - inTensors: T, - numBatchThreads: Int64, - maxBatchSize: Int64, - maxEnqueuedBatches: Int64 = 10, - batchTimeoutMicros: Int64, - allowedBatchSizes: [Int32], - gradTimeoutMicros: Int64, - container: String, - sharedName: String, - batchingQueue: String - ) -> (batchedTensors: T, batchIndex: Tensor, id: Tensor) { - _RawTFEager.batch( - inTensors: inTensors, numBatchThreads: numBatchThreads, maxBatchSize: maxBatchSize, - maxEnqueuedBatches: maxEnqueuedBatches, batchTimeoutMicros: batchTimeoutMicros, - allowedBatchSizes: allowedBatchSizes, gradTimeoutMicros: gradTimeoutMicros, - container: container, sharedName: sharedName, batchingQueue: batchingQueue) - } - - @inlinable @inline(__always) - public static func batchCholesky( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.batchCholesky(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.batchCholesky(input) - } - - } - - @inlinable @inline(__always) - public static func batchCholeskyGrad( - l: Tensor, - grad: Tensor - ) -> Tensor { - switch commonBackend(l.handle.backend, grad.handle.backend) { - case .XLA: - let output_device = grad.device - let l = Tensor(copying: l, to: .defaultTFEager) - let grad = Tensor(copying: grad, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.batchCholeskyGrad(l: l, grad: grad), to: output_device) - case .TF_EAGER: - return _RawTFEager.batchCholeskyGrad(l: l, grad: grad) - } - - } - - /// Creates a dataset that batches `batch_size` elements from `input_dataset`. - /// - /// - Parameter batch_size: A scalar representing the number of elements to accumulate in a - /// batch. - @inlinable @inline(__always) - public static func batchDataset( - inputDataset: VariantHandle, - batchSize: Tensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.batchDataset( - inputDataset: inputDataset, batchSize: batchSize, outputTypes: outputTypes, - outputShapes: outputShapes) - } - - /// Creates a dataset that batches `batch_size` elements from `input_dataset`. - /// - /// - Parameters: - /// - batch_size: A scalar representing the number of elements to accumulate in a batch. - /// - drop_remainder: A scalar representing whether the last batch should be dropped in case its size - /// is smaller than desired. - @inlinable @inline(__always) - public static func batchDatasetV2( - inputDataset: VariantHandle, - batchSize: Tensor, - dropRemainder: Tensor, - parallelCopy: Bool = false, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.batchDatasetV2( - inputDataset: inputDataset, batchSize: batchSize, dropRemainder: dropRemainder, - parallelCopy: parallelCopy, outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Batches all the inputs tensors to the computation done by the function. - /// - /// So, for example, in the following code - /// - /// ```python - /// - /// # This input will be captured. - /// y = tf.placeholder_with_default(1.0, shape=[]) - /// - /// @tf.Defun(tf.float32) - /// def computation(a): - /// return tf.matmul(a, a) + y - /// - /// b = gen_batch_ops.batch_function( - /// f=computation - /// in_tensors=[a], - /// captured_tensors=computation.captured_inputs, - /// Tout=[o.type for o in computation.definition.signature.output_arg], - /// num_batch_threads=1, - /// max_batch_size=10, - /// batch_timeout_micros=100000, # 100ms - /// allowed_batch_sizes=[3, 10], - /// batching_queue="") - /// - /// If more than one session.run call is simultaneously trying to compute `b` - /// the values of `a` will be gathered, non-deterministically concatenated - /// along the first axis, and only one thread will run the computation. - /// - /// Assumes that all arguments of the function are Tensors which will be batched - /// along their first dimension. - /// - /// Arguments that are captured, are not batched. The session.run call which does - /// the concatenation, will use the values of the captured tensors available to it. - /// Therefore, typical uses of captured tensors should involve values which remain - /// unchanged across session.run calls. Inference is a good example of this. - /// - /// SparseTensor is not supported. The return value of the decorated function - /// must be a Tensor or a list/tuple of Tensors. - /// - /// - Parameters: - /// - in_tensors: The tensors to be batched. - /// - captured_tensors: The tensors which are captured in the function, and don't need - /// to be batched. - /// - /// - Attrs: - /// - num_batch_threads: Number of scheduling threads for processing batches of work. - /// Determines the number of batches processed in parallel. - /// - max_batch_size: Batch sizes will never be bigger than this. - /// - batch_timeout_micros: Maximum number of microseconds to wait before outputting - /// an incomplete batch. - /// - max_enqueued_batches: Maximum number of batches enqueued. Default: 10. - /// - allowed_batch_sizes: Optional list of allowed batch sizes. If left empty, does - /// nothing. Otherwise, supplies a list of batch sizes, causing the op to pad - /// batches up to one of those sizes. The entries must increase monotonically, and - /// the final entry must equal max_batch_size. - /// - container: Controls the scope of sharing of this batch. - /// - shared_name: Concurrently running instances of batch in the same device with the - /// same container and shared_name will batch their elements together. If left - /// empty, the op name will be used as the shared name. - /// - Tin: the types of tensors to be batched. - /// - Tcaptured: the types of the captured tensors. - /// - Tout: the types of the output tensors. - /// - /// - Output out_tensors: The output tensors. - @inlinable @inline(__always) - public static func batchFunction< - FIn: TensorGroup, - FOut: TensorGroup, - Tin: TensorArrayProtocol, - Tcaptured: TensorArrayProtocol, - Tout: TensorGroup - >( - inTensors: Tin, - capturedTensors: Tcaptured, - f: (FIn) -> FOut, - numBatchThreads: Int64, - maxBatchSize: Int64, - batchTimeoutMicros: Int64, - maxEnqueuedBatches: Int64 = 10, - allowedBatchSizes: [Int32], - container: String, - sharedName: String, - batchingQueue: String - ) -> Tout { - _RawTFEager.batchFunction( - inTensors: inTensors, capturedTensors: capturedTensors, f: f, - numBatchThreads: numBatchThreads, maxBatchSize: maxBatchSize, - batchTimeoutMicros: batchTimeoutMicros, maxEnqueuedBatches: maxEnqueuedBatches, - allowedBatchSizes: allowedBatchSizes, container: container, sharedName: sharedName, - batchingQueue: batchingQueue) - } - - /// Multiplies slices of two tensors in batches. - /// - /// Multiplies all slices of `Tensor` `x` and `y` (each slice can be - /// viewed as an element of a batch), and arranges the individual results - /// in a single output tensor of the same batch size. Each of the - /// individual slices can optionally be adjointed (to adjoint a matrix - /// means to transpose and conjugate it) before multiplication by setting - /// the `adj_x` or `adj_y` flag to `True`, which are by default `False`. - /// - /// The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]` - /// and `[..., r_y, c_y]`. - /// - /// The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where: - /// - /// r_o = c_x if adj_x else r_x - /// c_o = r_y if adj_y else c_y - /// - /// It is computed as: - /// - /// output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) - /// - /// - Parameters: - /// - x: 2-D or higher with shape `[..., r_x, c_x]`. - /// - y: 2-D or higher with shape `[..., r_y, c_y]`. - /// - /// - Attrs: - /// - adj_x: If `True`, adjoint the slices of `x`. Defaults to `False`. - /// - adj_y: If `True`, adjoint the slices of `y`. Defaults to `False`. - /// - /// - Output output: 3-D or higher with shape `[..., r_o, c_o]` - @inlinable @inline(__always) - public static func batchMatMul( - _ x: Tensor, - _ y: Tensor, - adjX: Bool = false, - adjY: Bool = false - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - let output_device = y.device - let x = Tensor(copying: x, to: .defaultTFEager) - let y = Tensor(copying: y, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.batchMatMul(x, y, adjX: adjX, adjY: adjY), to: output_device) - case .TF_EAGER: - return _RawTFEager.batchMatMul(x, y, adjX: adjX, adjY: adjY) - } - - } - - /// Multiplies slices of two tensors in batches. - /// - /// Multiplies all slices of `Tensor` `x` and `y` (each slice can be - /// viewed as an element of a batch), and arranges the individual results - /// in a single output tensor of the same batch size. Each of the - /// individual slices can optionally be adjointed (to adjoint a matrix - /// means to transpose and conjugate it) before multiplication by setting - /// the `adj_x` or `adj_y` flag to `True`, which are by default `False`. - /// - /// The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]` - /// and `[..., r_y, c_y]`. - /// - /// The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where: - /// - /// r_o = c_x if adj_x else r_x - /// c_o = r_y if adj_y else c_y - /// - /// It is computed as: - /// - /// output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) - /// - /// *NOTE*: `BatchMatMulV2` supports broadcasting in the batch dimensions. More - /// about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). - /// - /// - /// - Parameters: - /// - x: 2-D or higher with shape `[..., r_x, c_x]`. - /// - y: 2-D or higher with shape `[..., r_y, c_y]`. - /// - /// - Attrs: - /// - adj_x: If `True`, adjoint the slices of `x`. Defaults to `False`. - /// - adj_y: If `True`, adjoint the slices of `y`. Defaults to `False`. - /// - /// - Output output: 3-D or higher with shape `[..., r_o, c_o]` - @inlinable @inline(__always) - public static func batchMatMulV2( - _ x: Tensor, - _ y: Tensor, - adjX: Bool = false, - adjY: Bool = false - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - return _RawXLA.batchMatMulV2(x, y, adjX: adjX, adjY: adjY) - case .TF_EAGER: - return _RawTFEager.batchMatMulV2(x, y, adjX: adjX, adjY: adjY) - } - - } - - @inlinable @inline(__always) - public static func batchMatrixBandPart( - _ input: Tensor, - numLower: Tensor, - numUpper: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(input.handle.backend, numLower.handle.backend), numUpper.handle.backend) - { - case .XLA: - let output_device = numUpper.device - let input = Tensor(copying: input, to: .defaultTFEager) - let numLower = Tensor(copying: numLower, to: .defaultTFEager) - let numUpper = Tensor(copying: numUpper, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.batchMatrixBandPart(input, numLower: numLower, numUpper: numUpper), - to: output_device) - case .TF_EAGER: - return _RawTFEager.batchMatrixBandPart(input, numLower: numLower, numUpper: numUpper) - } - - } - - @inlinable @inline(__always) - public static func batchMatrixDeterminant( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.batchMatrixDeterminant(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.batchMatrixDeterminant(input) - } - - } - - @inlinable @inline(__always) - public static func batchMatrixDiag( - diagonal: Tensor - ) -> Tensor { - switch diagonal.handle.backend { - case .XLA: - let output_device = diagonal.device - let diagonal = Tensor(copying: diagonal, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.batchMatrixDiag(diagonal: diagonal), to: output_device) - case .TF_EAGER: - return _RawTFEager.batchMatrixDiag(diagonal: diagonal) - } - - } - - @inlinable @inline(__always) - public static func batchMatrixDiagPart( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.batchMatrixDiagPart(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.batchMatrixDiagPart(input) - } - - } - - @inlinable @inline(__always) - public static func batchMatrixInverse( - _ input: Tensor, - adjoint: Bool = false - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.batchMatrixInverse(input, adjoint: adjoint), to: output_device) - case .TF_EAGER: - return _RawTFEager.batchMatrixInverse(input, adjoint: adjoint) - } - - } - - @inlinable @inline(__always) - public static func batchMatrixSetDiag( - _ input: Tensor, - diagonal: Tensor - ) -> Tensor { - switch commonBackend(input.handle.backend, diagonal.handle.backend) { - case .XLA: - let output_device = diagonal.device - let input = Tensor(copying: input, to: .defaultTFEager) - let diagonal = Tensor(copying: diagonal, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.batchMatrixSetDiag(input, diagonal: diagonal), to: output_device) - case .TF_EAGER: - return _RawTFEager.batchMatrixSetDiag(input, diagonal: diagonal) - } - - } - - @inlinable @inline(__always) - public static func batchMatrixSolve( - matrix: Tensor, - rhs: Tensor, - adjoint: Bool = false - ) -> Tensor { - switch commonBackend(matrix.handle.backend, rhs.handle.backend) { - case .XLA: - let output_device = rhs.device - let matrix = Tensor(copying: matrix, to: .defaultTFEager) - let rhs = Tensor(copying: rhs, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.batchMatrixSolve(matrix: matrix, rhs: rhs, adjoint: adjoint), - to: output_device) - case .TF_EAGER: - return _RawTFEager.batchMatrixSolve(matrix: matrix, rhs: rhs, adjoint: adjoint) - } - - } - - @inlinable @inline(__always) - public static func batchMatrixSolveLs( - matrix: Tensor, - rhs: Tensor, - l2Regularizer: Tensor, - fast: Bool = true - ) -> Tensor { - switch commonBackend( - commonBackend(matrix.handle.backend, rhs.handle.backend), l2Regularizer.handle.backend) - { - case .XLA: - let output_device = l2Regularizer.device - let matrix = Tensor(copying: matrix, to: .defaultTFEager) - let rhs = Tensor(copying: rhs, to: .defaultTFEager) - let l2Regularizer = Tensor(copying: l2Regularizer, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.batchMatrixSolveLs( - matrix: matrix, rhs: rhs, l2Regularizer: l2Regularizer, fast: fast), to: output_device) - case .TF_EAGER: - return _RawTFEager.batchMatrixSolveLs( - matrix: matrix, rhs: rhs, l2Regularizer: l2Regularizer, fast: fast) - } - - } - - @inlinable @inline(__always) - public static func batchMatrixTriangularSolve( - matrix: Tensor, - rhs: Tensor, - lower: Bool = true, - adjoint: Bool = false - ) -> Tensor { - switch commonBackend(matrix.handle.backend, rhs.handle.backend) { - case .XLA: - let output_device = rhs.device - let matrix = Tensor(copying: matrix, to: .defaultTFEager) - let rhs = Tensor(copying: rhs, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.batchMatrixTriangularSolve( - matrix: matrix, rhs: rhs, lower: lower, adjoint: adjoint), to: output_device) - case .TF_EAGER: - return _RawTFEager.batchMatrixTriangularSolve( - matrix: matrix, rhs: rhs, lower: lower, adjoint: adjoint) - } - - } - - /// Batch normalization. - /// - /// This op is deprecated. Prefer `tf.nn.batch_normalization`. - /// - /// - Parameters: - /// - t: A 4D input Tensor. - /// - m: A 1D mean Tensor with size matching the last dimension of t. - /// This is the first output from tf.nn.moments, - /// or a saved moving average thereof. - /// - v: A 1D variance Tensor with size matching the last dimension of t. - /// This is the second output from tf.nn.moments, - /// or a saved moving average thereof. - /// - beta: A 1D beta Tensor with size matching the last dimension of t. - /// An offset to be added to the normalized tensor. - /// - gamma: A 1D gamma Tensor with size matching the last dimension of t. - /// If "scale_after_normalization" is true, this tensor will be multiplied - /// with the normalized tensor. - /// - /// - Attrs: - /// - variance_epsilon: A small float number to avoid dividing by 0. - /// - scale_after_normalization: A bool indicating whether the resulted tensor - /// needs to be multiplied with gamma. - @inlinable @inline(__always) - public static func batchNormWithGlobalNormalization( - t: Tensor, - m: Tensor, - v: Tensor, - beta: Tensor, - gamma: Tensor, - varianceEpsilon: Double, - scaleAfterNormalization: Bool - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(commonBackend(t.handle.backend, m.handle.backend), v.handle.backend), - beta.handle.backend), gamma.handle.backend) - { - case .XLA: - let output_device = gamma.device - let t = Tensor(copying: t, to: .defaultTFEager) - let m = Tensor(copying: m, to: .defaultTFEager) - let v = Tensor(copying: v, to: .defaultTFEager) - let beta = Tensor(copying: beta, to: .defaultTFEager) - let gamma = Tensor(copying: gamma, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.batchNormWithGlobalNormalization( - t: t, m: m, v: v, beta: beta, gamma: gamma, varianceEpsilon: varianceEpsilon, - scaleAfterNormalization: scaleAfterNormalization), to: output_device) - case .TF_EAGER: - return _RawTFEager.batchNormWithGlobalNormalization( - t: t, m: m, v: v, beta: beta, gamma: gamma, varianceEpsilon: varianceEpsilon, - scaleAfterNormalization: scaleAfterNormalization) - } - - } - - /// Gradients for batch normalization. - /// - /// This op is deprecated. See `tf.nn.batch_normalization`. - /// - /// - Parameters: - /// - t: A 4D input Tensor. - /// - m: A 1D mean Tensor with size matching the last dimension of t. - /// This is the first output from tf.nn.moments, - /// or a saved moving average thereof. - /// - v: A 1D variance Tensor with size matching the last dimension of t. - /// This is the second output from tf.nn.moments, - /// or a saved moving average thereof. - /// - gamma: A 1D gamma Tensor with size matching the last dimension of t. - /// If "scale_after_normalization" is true, this Tensor will be multiplied - /// with the normalized Tensor. - /// - backprop: 4D backprop Tensor. - /// - /// - Attrs: - /// - variance_epsilon: A small float number to avoid dividing by 0. - /// - scale_after_normalization: A bool indicating whether the resulted tensor - /// needs to be multiplied with gamma. - /// - /// - Outputs: - /// - dx: 4D backprop tensor for input. - /// - dm: 1D backprop tensor for mean. - /// - dv: 1D backprop tensor for variance. - /// - db: 1D backprop tensor for beta. - /// - dg: 1D backprop tensor for gamma. - @inlinable @inline(__always) - public static func batchNormWithGlobalNormalizationGrad( - t: Tensor, - m: Tensor, - v: Tensor, - gamma: Tensor, - backprop: Tensor, - varianceEpsilon: Double, - scaleAfterNormalization: Bool - ) -> (dx: Tensor, dm: Tensor, dv: Tensor, db: Tensor, dg: Tensor) { - _RawTFEager.batchNormWithGlobalNormalizationGrad( - t: t, m: m, v: v, gamma: gamma, backprop: backprop, varianceEpsilon: varianceEpsilon, - scaleAfterNormalization: scaleAfterNormalization) - } + } - @inlinable @inline(__always) - public static func batchSelfAdjointEig( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.batchSelfAdjointEig(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.batchSelfAdjointEig(input) - } - - } - - @inlinable @inline(__always) - public static func batchSelfAdjointEigV2( - _ input: Tensor, - computeV: Bool = true - ) -> (e: Tensor, v: Tensor) { - _RawTFEager.batchSelfAdjointEigV2(input, computeV: computeV) - } - - @inlinable @inline(__always) - public static func batchSvd( - _ input: Tensor, - computeUv: Bool = true, - fullMatrices: Bool = false - ) -> (s: Tensor, u: Tensor, v: Tensor) { - _RawTFEager.batchSvd(input, computeUv: computeUv, fullMatrices: fullMatrices) - } - - /// BatchToSpace for 4-D tensors of type T. - /// - /// This is a legacy version of the more general BatchToSpaceND. - /// - /// Rearranges (permutes) data from batch into blocks of spatial data, followed by - /// cropping. This is the reverse transformation of SpaceToBatch. More specifically, - /// this op outputs a copy of the input tensor where values from the `batch` - /// dimension are moved in spatial blocks to the `height` and `width` dimensions, - /// followed by cropping along the `height` and `width` dimensions. - /// - /// - Parameters: - /// - input: 4-D tensor with shape - /// `[batch*block_size*block_size, height_pad/block_size, width_pad/block_size, - /// depth]`. Note that the batch size of the input tensor must be divisible by - /// `block_size * block_size`. - /// - crops: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies - /// how many elements to crop from the intermediate result across the spatial - /// dimensions as follows: - /// - /// crops = [[crop_top, crop_bottom], [crop_left, crop_right]] - /// - /// - Output output: 4-D with shape `[batch, height, width, depth]`, where: - /// - /// height = height_pad - crop_top - crop_bottom - /// width = width_pad - crop_left - crop_right - /// - /// The attr `block_size` must be greater than one. It indicates the block size. - /// - /// Some examples: - /// - /// (1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2: - /// - /// ``` - /// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] - /// ``` - /// - /// The output tensor has shape `[1, 2, 2, 1]` and value: - /// - /// ``` - /// x = [[[[1], [2]], [[3], [4]]]] - /// ``` - /// - /// (2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2: - /// - /// ``` - /// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] - /// ``` - /// - /// The output tensor has shape `[1, 2, 2, 3]` and value: - /// - /// ``` - /// x = [[[[1, 2, 3], [4, 5, 6]], - /// [[7, 8, 9], [10, 11, 12]]]] - /// ``` - /// - /// (3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2: - /// - /// ``` - /// x = [[[[1], [3]], [[9], [11]]], - /// [[[2], [4]], [[10], [12]]], - /// [[[5], [7]], [[13], [15]]], - /// [[[6], [8]], [[14], [16]]]] - /// ``` - /// - /// The output tensor has shape `[1, 4, 4, 1]` and value: - /// - /// ``` - /// x = [[[[1], [2], [3], [4]], - /// [[5], [6], [7], [8]], - /// [[9], [10], [11], [12]], - /// [[13], [14], [15], [16]]]] - /// ``` - /// - /// (4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2: - /// - /// ``` - /// x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], - /// [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] - /// ``` - /// - /// The output tensor has shape `[2, 2, 4, 1]` and value: - /// - /// ``` - /// x = [[[[1], [3]], [[5], [7]]], - /// [[[2], [4]], [[10], [12]]], - /// [[[5], [7]], [[13], [15]]], - /// [[[6], [8]], [[14], [16]]]] - /// ``` - @inlinable @inline(__always) - public static func batchToSpace< - T: TensorFlowScalar, - Tidx: TensorFlowIndex - >( - _ input: Tensor, - crops: Tensor, - blockSize: Int64 - ) -> Tensor { - switch commonBackend(input.handle.backend, crops.handle.backend) { - case .XLA: - let output_device = crops.device - let input = Tensor(copying: input, to: .defaultTFEager) - let crops = Tensor(copying: crops, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.batchToSpace(input, crops: crops, blockSize: blockSize), - to: output_device) - case .TF_EAGER: - return _RawTFEager.batchToSpace(input, crops: crops, blockSize: blockSize) - } - - } - - /// BatchToSpace for N-D tensors of type T. - /// - /// This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape - /// `block_shape + [batch]`, interleaves these blocks back into the grid defined by - /// the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as - /// the input. The spatial dimensions of this intermediate result are then - /// optionally cropped according to `crops` to produce the output. This is the - /// reverse of SpaceToBatch. See below for a precise description. - /// - /// - Parameters: - /// - input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, - /// where spatial_shape has M dimensions. - /// - block_shape: 1-D with shape `[M]`, all values must be >= 1. - /// - crops: 2-D with shape `[M, 2]`, all values must be >= 0. - /// `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input - /// dimension `i + 1`, which corresponds to spatial dimension `i`. It is - /// required that - /// `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`. - /// - /// This operation is equivalent to the following steps: - /// - /// 1. Reshape `input` to `reshaped` of shape: - /// [block_shape[0], ..., block_shape[M-1], - /// batch / prod(block_shape), - /// input_shape[1], ..., input_shape[N-1]] - /// - /// 2. Permute dimensions of `reshaped` to produce `permuted` of shape - /// [batch / prod(block_shape), - /// - /// input_shape[1], block_shape[0], - /// ..., - /// input_shape[M], block_shape[M-1], - /// - /// input_shape[M+1], ..., input_shape[N-1]] - /// - /// 3. Reshape `permuted` to produce `reshaped_permuted` of shape - /// [batch / prod(block_shape), - /// - /// input_shape[1] * block_shape[0], - /// ..., - /// input_shape[M] * block_shape[M-1], - /// - /// input_shape[M+1], - /// ..., - /// input_shape[N-1]] - /// - /// 4. Crop the start and end of dimensions `[1, ..., M]` of - /// `reshaped_permuted` according to `crops` to produce the output of shape: - /// [batch / prod(block_shape), - /// - /// input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], - /// ..., - /// input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1], - /// - /// input_shape[M+1], ..., input_shape[N-1]] - /// - /// Some examples: - /// - /// (1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and - /// `crops = [[0, 0], [0, 0]]`: - /// - /// ``` - /// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] - /// ``` - /// - /// The output tensor has shape `[1, 2, 2, 1]` and value: - /// - /// ``` - /// x = [[[[1], [2]], [[3], [4]]]] - /// ``` - /// - /// (2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and - /// `crops = [[0, 0], [0, 0]]`: - /// - /// ``` - /// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] - /// ``` - /// - /// The output tensor has shape `[1, 2, 2, 3]` and value: - /// - /// ``` - /// x = [[[[1, 2, 3], [4, 5, 6]], - /// [[7, 8, 9], [10, 11, 12]]]] - /// ``` - /// - /// (3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and - /// `crops = [[0, 0], [0, 0]]`: - /// - /// ``` - /// x = [[[[1], [3]], [[9], [11]]], - /// [[[2], [4]], [[10], [12]]], - /// [[[5], [7]], [[13], [15]]], - /// [[[6], [8]], [[14], [16]]]] - /// ``` - /// - /// The output tensor has shape `[1, 4, 4, 1]` and value: - /// - /// ``` - /// x = [[[[1], [2], [3], [4]], - /// [[5], [6], [7], [8]], - /// [[9], [10], [11], [12]], - /// [[13], [14], [15], [16]]]] - /// ``` - /// - /// (4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and - /// `crops = [[0, 0], [2, 0]]`: - /// - /// ``` - /// x = [[[[0], [1], [3]]], [[[0], [9], [11]]], - /// [[[0], [2], [4]]], [[[0], [10], [12]]], - /// [[[0], [5], [7]]], [[[0], [13], [15]]], - /// [[[0], [6], [8]]], [[[0], [14], [16]]]] - /// ``` - /// - /// The output tensor has shape `[2, 2, 4, 1]` and value: - /// - /// ``` - /// x = [[[[1], [2], [3], [4]], - /// [[5], [6], [7], [8]]], - /// [[[9], [10], [11], [12]], - /// [[13], [14], [15], [16]]]] - /// ``` - @inlinable @inline(__always) - public static func batchToSpaceND< - T: TensorFlowScalar, - TblockShape: TensorFlowIndex, - Tcrops: TensorFlowIndex - >( - _ input: Tensor, - blockShape: Tensor, - crops: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(input.handle.backend, blockShape.handle.backend), crops.handle.backend) - { - case .XLA: - let output_device = crops.device - let input = Tensor(copying: input, to: .defaultTFEager) - let blockShape = Tensor(copying: blockShape, to: .defaultTFEager) - let crops = Tensor(copying: crops, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.batchToSpaceND(input, blockShape: blockShape, crops: crops), - to: output_device) - case .TF_EAGER: - return _RawTFEager.batchToSpaceND(input, blockShape: blockShape, crops: crops) - } - - } - - /// Computes the Bessel i0e function of `x` element-wise. - /// - /// Exponentially scaled modified Bessel function of order 0 defined as - /// `bessel_i0e(x) = exp(-abs(x)) bessel_i0(x)`. - /// - /// This function is faster and numerically stabler than `bessel_i0(x)`. - @inlinable @inline(__always) - public static func besselI0e( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - let output_device = x.device - let x = Tensor(copying: x, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.besselI0e(x), to: output_device) - case .TF_EAGER: - return _RawTFEager.besselI0e(x) - } - - } - - /// Computes the Bessel i1e function of `x` element-wise. - /// - /// Exponentially scaled modified Bessel function of order 0 defined as - /// `bessel_i1e(x) = exp(-abs(x)) bessel_i1(x)`. - /// - /// This function is faster and numerically stabler than `bessel_i1(x)`. - @inlinable @inline(__always) - public static func besselI1e( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - let output_device = x.device - let x = Tensor(copying: x, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.besselI1e(x), to: output_device) - case .TF_EAGER: - return _RawTFEager.besselI1e(x) - } - - } - - /// Compute the regularized incomplete beta integral \\(I_x(a, b)\\). - /// - /// The regularized incomplete beta integral is defined as: - /// - /// - /// \\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\\) - /// - /// where - /// - /// - /// \\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\\) - /// - /// - /// is the incomplete beta function and \\(B(a, b)\\) is the *complete* - /// beta function. - @inlinable @inline(__always) - public static func betainc( - _ a: Tensor, - _ b: Tensor, - _ x: Tensor - ) -> Tensor { - switch commonBackend(commonBackend(a.handle.backend, b.handle.backend), x.handle.backend) { - case .XLA: - let output_device = x.device - let a = Tensor(copying: a, to: .defaultTFEager) - let b = Tensor(copying: b, to: .defaultTFEager) - let x = Tensor(copying: x, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.betainc(a, b, x), to: output_device) - case .TF_EAGER: - return _RawTFEager.betainc(a, b, x) - } - - } - - /// Adds `bias` to `value`. - /// - /// This is a special case of `tf.add` where `bias` is restricted to be 1-D. - /// Broadcasting is supported, so `value` may have any number of dimensions. - /// - /// - Parameters: - /// - value: Any number of dimensions. - /// - bias: 1-D with size the last dimension of `value`. - /// - /// - Attr data_format: Specify the data format of the input and output data. With the - /// default format "NHWC", the bias tensor will be added to the last dimension - /// of the value tensor. - /// Alternatively, the format could be "NCHW", the data storage order of: - /// [batch, in_channels, in_height, in_width]. - /// The tensor will be added to "in_channels", the third-to-the-last - /// dimension. - /// - /// - Output output: Broadcasted sum of `value` and `bias`. - @inlinable @inline(__always) - public static func biasAdd( - value: Tensor, - bias: Tensor, - dataFormat: DataFormat = .nhwc - ) -> Tensor { - switch commonBackend(value.handle.backend, bias.handle.backend) { - case .XLA: - let output_device = bias.device - let value = Tensor(copying: value, to: .defaultTFEager) - let bias = Tensor(copying: bias, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.biasAdd(value: value, bias: bias, dataFormat: dataFormat), - to: output_device) - case .TF_EAGER: - return _RawTFEager.biasAdd(value: value, bias: bias, dataFormat: dataFormat) - } - - } - - /// The backward operation for "BiasAdd" on the "bias" tensor. - /// - /// It accumulates all the values from out_backprop into the feature dimension. - /// For NHWC data format, the feature dimension is the last. For NCHW data format, - /// the feature dimension is the third-to-last. - /// - /// - Parameter out_backprop: Any number of dimensions. - /// - /// - Attr data_format: Specify the data format of the input and output data. With the - /// default format "NHWC", the bias tensor will be added to the last dimension - /// of the value tensor. - /// Alternatively, the format could be "NCHW", the data storage order of: - /// [batch, in_channels, in_height, in_width]. - /// The tensor will be added to "in_channels", the third-to-the-last - /// dimension. - /// - /// - Output output: 1-D with size the feature dimension of `out_backprop`. - @inlinable @inline(__always) - public static func biasAddGrad( - outBackprop: Tensor, - dataFormat: DataFormat = .nhwc - ) -> Tensor { - switch outBackprop.handle.backend { - case .XLA: - let output_device = outBackprop.device - let outBackprop = Tensor(copying: outBackprop, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.biasAddGrad(outBackprop: outBackprop, dataFormat: dataFormat), - to: output_device) - case .TF_EAGER: - return _RawTFEager.biasAddGrad(outBackprop: outBackprop, dataFormat: dataFormat) - } - - } - - /// Adds `bias` to `value`. - /// - /// This is a deprecated version of BiasAdd and will be soon removed. - /// - /// This is a special case of `tf.add` where `bias` is restricted to be 1-D. - /// Broadcasting is supported, so `value` may have any number of dimensions. - /// - /// - Parameters: - /// - value: Any number of dimensions. - /// - bias: 1-D with size the last dimension of `value`. - /// - /// - Output output: Broadcasted sum of `value` and `bias`. - @inlinable @inline(__always) - public static func biasAddV1( - value: Tensor, - bias: Tensor - ) -> Tensor { - switch commonBackend(value.handle.backend, bias.handle.backend) { - case .XLA: - let output_device = bias.device - let value = Tensor(copying: value, to: .defaultTFEager) - let bias = Tensor(copying: bias, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.biasAddV1(value: value, bias: bias), to: output_device) - case .TF_EAGER: - return _RawTFEager.biasAddV1(value: value, bias: bias) - } - - } - - @inlinable @inline(__always) - public static func binary( - _ a: Tensor, - _ b: Tensor - ) -> Tensor { - switch commonBackend(a.handle.backend, b.handle.backend) { - case .XLA: - let output_device = b.device - let a = Tensor(copying: a, to: .defaultTFEager) - let b = Tensor(copying: b, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.binary(a, b), to: output_device) - case .TF_EAGER: - return _RawTFEager.binary(a, b) - } - - } - - /// Counts the number of occurrences of each value in an integer array. - /// - /// Outputs a vector with length `size` and the same dtype as `weights`. If - /// `weights` are empty, then index `i` stores the number of times the value `i` is - /// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of - /// the value in `weights` at each index where the corresponding value in `arr` is - /// `i`. - /// - /// Values in `arr` outside of the range [0, size) are ignored. - /// - /// - Parameters: - /// - arr: int32 `Tensor`. - /// - size: non-negative int32 scalar `Tensor`. - /// - weights: is an int32, int64, float32, or float64 `Tensor` with the same - /// shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights - /// equal to 1. - /// - /// - Output bins: 1D `Tensor` with length equal to `size`. The counts or summed weights for - /// each value in the range [0, size). - @inlinable @inline(__always) - public static func bincount( - arr: Tensor, - size: Tensor, - weights: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(arr.handle.backend, size.handle.backend), weights.handle.backend) - { - case .XLA: - let output_device = weights.device - let arr = Tensor(copying: arr, to: .defaultTFEager) - let size = Tensor(copying: size, to: .defaultTFEager) - let weights = Tensor(copying: weights, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.bincount(arr: arr, size: size, weights: weights), to: output_device) - case .TF_EAGER: - return _RawTFEager.bincount(arr: arr, size: size, weights: weights) - } - - } - - /// Bitcasts a tensor from one type to another without copying data. - /// - /// Given a tensor `input`, this operation returns a tensor that has the same buffer - /// data as `input` with datatype `type`. - /// - /// If the input datatype `T` is larger than the output datatype `type` then the - /// shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)]. - /// - /// If `T` is smaller than `type`, the operator requires that the rightmost - /// dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from - /// [..., sizeof(`type`)/sizeof(`T`)] to [...]. - /// - /// tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype - /// (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast() - /// gives module error. - /// For example, - /// - /// Example 1: - /// - /// >>> a = [1., 2., 3.] - /// >>> equality_bitcast = tf.bitcast(a, tf.complex128) - /// Traceback (most recent call last): - /// ... - /// InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast] - /// >>> equality_cast = tf.cast(a, tf.complex128) - /// >>> print(equality_cast) - /// tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128) - /// - /// Example 2: - /// - /// >>> tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8) - /// - /// - /// Example 3: - /// - /// >>> x = [1., 2., 3.] - /// >>> y = [0., 2., 3.] - /// >>> equality= tf.equal(x,y) - /// >>> equality_cast = tf.cast(equality,tf.float32) - /// >>> equality_bitcast = tf.bitcast(equality_cast,tf.uint8) - /// >>> print(equality) - /// tf.Tensor([False True True], shape=(3,), dtype=bool) - /// >>> print(equality_cast) - /// tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32) - /// >>> print(equality_bitcast) - /// tf.Tensor( - /// [[ 0 0 0 0] - /// [ 0 0 128 63] - /// [ 0 0 128 63]], shape=(3, 4), dtype=uint8) - /// - /// *NOTE*: Bitcast is implemented as a low-level cast, so machines with different - /// endian orderings will give different results. - @inlinable @inline(__always) - public static func bitcast< - T: TensorFlowNumeric, - Type: TensorFlowNumeric - >( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.bitcast(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.bitcast(input) - } - - } - - /// Elementwise computes the bitwise AND of `x` and `y`. - /// - /// The result will have those bits set, that are set in both `x` and `y`. The - /// computation is performed on the underlying representations of `x` and `y`. - /// - /// For example: - /// - /// ```python - /// import tensorflow as tf - /// from tensorflow.python.ops import bitwise_ops - /// dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, - /// tf.uint8, tf.uint16, tf.uint32, tf.uint64] - /// - /// for dtype in dtype_list: - /// lhs = tf.constant([0, 5, 3, 14], dtype=dtype) - /// rhs = tf.constant([5, 0, 7, 11], dtype=dtype) - /// exp = tf.constant([0, 0, 3, 10], dtype=tf.float32) - /// - /// res = bitwise_ops.bitwise_and(lhs, rhs) - /// tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE - /// ``` - /// - @inlinable @inline(__always) - public static func bitwiseAnd( - _ x: Tensor, - _ y: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - let output_device = y.device - let x = Tensor(copying: x, to: .defaultTFEager) - let y = Tensor(copying: y, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.bitwiseAnd(x, y), to: output_device) - case .TF_EAGER: - return _RawTFEager.bitwiseAnd(x, y) - } - - } - - /// Elementwise computes the bitwise OR of `x` and `y`. - /// - /// The result will have those bits set, that are set in `x`, `y` or both. The - /// computation is performed on the underlying representations of `x` and `y`. - /// - /// For example: - /// - /// ```python - /// import tensorflow as tf - /// from tensorflow.python.ops import bitwise_ops - /// dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, - /// tf.uint8, tf.uint16, tf.uint32, tf.uint64] - /// - /// for dtype in dtype_list: - /// lhs = tf.constant([0, 5, 3, 14], dtype=dtype) - /// rhs = tf.constant([5, 0, 7, 11], dtype=dtype) - /// exp = tf.constant([5, 5, 7, 15], dtype=tf.float32) - /// - /// res = bitwise_ops.bitwise_or(lhs, rhs) - /// tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE - /// ``` - /// - @inlinable @inline(__always) - public static func bitwiseOr( - _ x: Tensor, - _ y: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - let output_device = y.device - let x = Tensor(copying: x, to: .defaultTFEager) - let y = Tensor(copying: y, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.bitwiseOr(x, y), to: output_device) - case .TF_EAGER: - return _RawTFEager.bitwiseOr(x, y) - } - - } - - /// Elementwise computes the bitwise XOR of `x` and `y`. - /// - /// The result will have those bits set, that are different in `x` and `y`. The - /// computation is performed on the underlying representations of `x` and `y`. - /// - /// For example: - /// - /// ```python - /// import tensorflow as tf - /// from tensorflow.python.ops import bitwise_ops - /// dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, - /// tf.uint8, tf.uint16, tf.uint32, tf.uint64] - /// - /// for dtype in dtype_list: - /// lhs = tf.constant([0, 5, 3, 14], dtype=dtype) - /// rhs = tf.constant([5, 0, 7, 11], dtype=dtype) - /// exp = tf.constant([5, 5, 4, 5], dtype=tf.float32) - /// - /// res = bitwise_ops.bitwise_xor(lhs, rhs) - /// tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE - /// ``` - /// - @inlinable @inline(__always) - public static func bitwiseXor( - _ x: Tensor, - _ y: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - let output_device = y.device - let x = Tensor(copying: x, to: .defaultTFEager) - let y = Tensor(copying: y, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.bitwiseXor(x, y), to: output_device) - case .TF_EAGER: - return _RawTFEager.bitwiseXor(x, y) - } - - } - - /// Computes the LSTM cell forward propagation for all the time steps. - /// - /// This is equivalent to applying LSTMBlockCell in a loop, like so: - /// - /// ```python - /// for x1 in unpack(x): - /// i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock( - /// x1, cs_prev, h_prev, w, wci, wcf, wco, b) - /// cs_prev = cs1 - /// h_prev = h1 - /// i.append(i1) - /// cs.append(cs1) - /// f.append(f1) - /// o.append(o1) - /// ci.append(ci1) - /// co.append(co1) - /// h.append(h1) - /// return pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h) - /// ``` - /// - /// - Parameters: - /// - seq_len_max: Maximum time length actually used by this input. Outputs are padded - /// with zeros beyond this length. - /// - x: The sequence input to the LSTM, shape (timelen, batch_size, num_inputs). - /// - cs_prev: Value of the initial cell state. - /// - h_prev: Initial output of cell (to be used for peephole). - /// - w: The weight matrix. - /// - wci: The weight matrix for input gate peephole connection. - /// - wcf: The weight matrix for forget gate peephole connection. - /// - wco: The weight matrix for output gate peephole connection. - /// - b: The bias vector. - /// - /// - Attrs: - /// - forget_bias: The forget gate bias. - /// - cell_clip: Value to clip the 'cs' value to. - /// - use_peephole: Whether to use peephole weights. - /// - /// - Outputs: - /// - i: The input gate over the whole time sequence. - /// - cs: The cell state before the tanh over the whole time sequence. - /// - f: The forget gate over the whole time sequence. - /// - o: The output gate over the whole time sequence. - /// - ci: The cell input over the whole time sequence. - /// - co: The cell after the tanh over the whole time sequence. - /// - h: The output h vector over the whole time sequence. - @inlinable @inline(__always) - public static func blockLSTM( - seqLenMax: Tensor, - _ x: Tensor, - csPrev: Tensor, - hPrev: Tensor, - w: Tensor, - wci: Tensor, - wcf: Tensor, - wco: Tensor, - _ b: Tensor, - forgetBias: Double = 1, - cellClip: Double = 3, - usePeephole: Bool = false - ) -> ( - i: Tensor, cs: Tensor, f: Tensor, o: Tensor, ci: Tensor, co: Tensor, - h: Tensor - ) { - _RawTFEager.blockLSTM( - seqLenMax: seqLenMax, x, csPrev: csPrev, hPrev: hPrev, w: w, wci: wci, wcf: wcf, wco: wco, - b, forgetBias: forgetBias, cellClip: cellClip, usePeephole: usePeephole) - } - - /// Computes the LSTM cell backward propagation for the entire time sequence. - /// - /// This implementation is to be used in conjunction of LSTMBlock. - /// - /// - Parameters: - /// - seq_len_max: Maximum time length actually used by this input. Outputs are padded - /// with zeros beyond this length. - /// - x: The sequence input to the LSTM, shape (timelen, batch_size, num_inputs). - /// - cs_prev: Value of the initial cell state. - /// - h_prev: Initial output of cell (to be used for peephole). - /// - w: The weight matrix. - /// - wci: The weight matrix for input gate peephole connection. - /// - wcf: The weight matrix for forget gate peephole connection. - /// - wco: The weight matrix for output gate peephole connection. - /// - b: The bias vector. - /// - i: The input gate over the whole time sequence. - /// - cs: The cell state before the tanh over the whole time sequence. - /// - f: The forget gate over the whole time sequence. - /// - o: The output gate over the whole time sequence. - /// - ci: The cell input over the whole time sequence. - /// - co: The cell after the tanh over the whole time sequence. - /// - h: The output h vector over the whole time sequence. - /// - cs_grad: The current gradient of cs. - /// - h_grad: The gradient of h vector. - /// - /// - Attr use_peephole: Whether to use peephole weights. - /// - /// - Outputs: - /// - x_grad: The gradient of x to be back-propped. - /// - cs_prev_grad: The gradient of cs_prev to be back-propped. - /// - h_prev_grad: The gradient of h_prev to be back-propped. - /// - w_grad: The gradient for w to be back-propped. - /// - wci_grad: The gradient for wci to be back-propped. - /// - wcf_grad: The gradient for wcf to be back-propped. - /// - wco_grad: The gradient for wco to be back-propped. - /// - b_grad: The gradient for w to be back-propped. - @inlinable @inline(__always) - public static func blockLSTMGrad( - seqLenMax: Tensor, - _ x: Tensor, - csPrev: Tensor, - hPrev: Tensor, - w: Tensor, - wci: Tensor, - wcf: Tensor, - wco: Tensor, - _ b: Tensor, - i: Tensor, - cs: Tensor, - f: Tensor, - o: Tensor, - ci: Tensor, - co: Tensor, - h: Tensor, - csGrad: Tensor, - hGrad: Tensor, - usePeephole: Bool - ) -> ( - xGrad: Tensor, csPrevGrad: Tensor, hPrevGrad: Tensor, wGrad: Tensor, - wciGrad: Tensor, wcfGrad: Tensor, wcoGrad: Tensor, bGrad: Tensor - ) { - _RawTFEager.blockLSTMGrad( - seqLenMax: seqLenMax, x, csPrev: csPrev, hPrev: hPrev, w: w, wci: wci, wcf: wcf, wco: wco, - b, i: i, cs: cs, f: f, o: o, ci: ci, co: co, h: h, csGrad: csGrad, hGrad: hGrad, - usePeephole: usePeephole) - } - - /// Computes the LSTM cell backward propagation for the entire time sequence. - /// - /// This implementation is to be used in conjunction of BlockLSTMV2. - /// - /// - Parameters: - /// - seq_len_max: Maximum time length actually used by this input. Outputs are padded - /// with zeros beyond this length. - /// - x: The sequence input to the LSTM, shape (timelen, batch_size, num_inputs). - /// - cs_prev: Value of the initial cell state. - /// - h_prev: Initial output of cell (to be used for peephole). - /// - w: The weight matrix. - /// - wci: The weight matrix for input gate peephole connection. - /// - wcf: The weight matrix for forget gate peephole connection. - /// - wco: The weight matrix for output gate peephole connection. - /// - b: The bias vector. - /// - i: The input gate over the whole time sequence. - /// - cs: The cell state before the tanh over the whole time sequence. - /// - f: The forget gate over the whole time sequence. - /// - o: The output gate over the whole time sequence. - /// - ci: The cell input over the whole time sequence. - /// - co: The cell after the tanh over the whole time sequence. - /// - h: The output h vector over the whole time sequence. - /// - cs_grad: The current gradient of cs. - /// - h_grad: The gradient of h vector. - /// - /// - Attr use_peephole: Whether to use peephole weights. - /// - /// - Outputs: - /// - x_grad: The gradient of x to be back-propped. - /// - cs_prev_grad: The gradient of cs_prev to be back-propped. - /// - h_prev_grad: The gradient of h_prev to be back-propped. - /// - w_grad: The gradient for w to be back-propped. - /// - wci_grad: The gradient for wci to be back-propped. - /// - wcf_grad: The gradient for wcf to be back-propped. - /// - wco_grad: The gradient for wco to be back-propped. - /// - b_grad: The gradient for w to be back-propped. - @inlinable @inline(__always) - public static func blockLSTMGradV2( - seqLenMax: Tensor, - _ x: Tensor, - csPrev: Tensor, - hPrev: Tensor, - w: Tensor, - wci: Tensor, - wcf: Tensor, - wco: Tensor, - _ b: Tensor, - i: Tensor, - cs: Tensor, - f: Tensor, - o: Tensor, - ci: Tensor, - co: Tensor, - h: Tensor, - csGrad: Tensor, - hGrad: Tensor, - usePeephole: Bool - ) -> ( - xGrad: Tensor, csPrevGrad: Tensor, hPrevGrad: Tensor, wGrad: Tensor, - wciGrad: Tensor, wcfGrad: Tensor, wcoGrad: Tensor, bGrad: Tensor - ) { - _RawTFEager.blockLSTMGradV2( - seqLenMax: seqLenMax, x, csPrev: csPrev, hPrev: hPrev, w: w, wci: wci, wcf: wcf, wco: wco, - b, i: i, cs: cs, f: f, o: o, ci: ci, co: co, h: h, csGrad: csGrad, hGrad: hGrad, - usePeephole: usePeephole) - } - - /// Computes the LSTM cell forward propagation for all the time steps. - /// - /// This is equivalent to applying LSTMBlockCell in a loop, like so: - /// - /// ```python - /// for x1 in unpack(x): - /// i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock( - /// x1, cs_prev, h_prev, w, wci, wcf, wco, b) - /// cs_prev = cs1 - /// h_prev = h1 - /// i.append(i1) - /// cs.append(cs1) - /// f.append(f1) - /// o.append(o1) - /// ci.append(ci1) - /// co.append(co1) - /// h.append(h1) - /// return pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h) - /// - /// Note that unlike LSTMBlockCell (and BlockLSTM) which uses ICFO gate layout, - /// this op uses IFCO. So in order for the following snippet to be equivalent - /// all gate-related outputs should be reordered. - /// ``` - /// - /// - Parameters: - /// - seq_len_max: Maximum time length actually used by this input. Outputs are padded - /// with zeros beyond this length. - /// - x: The sequence input to the LSTM, shape (timelen, batch_size, num_inputs). - /// - cs_prev: Value of the initial cell state. - /// - h_prev: Initial output of cell (to be used for peephole). - /// - w: The weight matrix. - /// - wci: The weight matrix for input gate peephole connection. - /// - wcf: The weight matrix for forget gate peephole connection. - /// - wco: The weight matrix for output gate peephole connection. - /// - b: The bias vector. - /// - /// - Attrs: - /// - cell_clip: Value to clip the 'cs' value to. - /// - use_peephole: Whether to use peephole weights. - /// - /// - Outputs: - /// - i: The input gate over the whole time sequence. - /// - cs: The cell state before the tanh over the whole time sequence. - /// - f: The forget gate over the whole time sequence. - /// - o: The output gate over the whole time sequence. - /// - ci: The cell input over the whole time sequence. - /// - co: The cell after the tanh over the whole time sequence. - /// - h: The output h vector over the whole time sequence. - @inlinable @inline(__always) - public static func blockLSTMV2( - seqLenMax: Tensor, - _ x: Tensor, - csPrev: Tensor, - hPrev: Tensor, - w: Tensor, - wci: Tensor, - wcf: Tensor, - wco: Tensor, - _ b: Tensor, - cellClip: Double = 0, - usePeephole: Bool = false - ) -> ( - i: Tensor, cs: Tensor, f: Tensor, o: Tensor, ci: Tensor, co: Tensor, - h: Tensor - ) { - _RawTFEager.blockLSTMV2( - seqLenMax: seqLenMax, x, csPrev: csPrev, hPrev: hPrev, w: w, wci: wci, wcf: wcf, wco: wco, - b, cellClip: cellClip, usePeephole: usePeephole) - } - - /// Aggregates the summary of accumulated stats for the batch. - /// - /// The summary stats contains gradients and hessians accumulated for each node, feature dimension id and bucket. - /// - /// - Parameters: - /// - node_ids: int32; Rank 1 Tensor containing node ids for each example, shape [batch_size]. - /// - gradients: float32; Rank 2 Tensor (shape=[batch_size, logits_dimension]) with gradients for each example. - /// - hessians: float32; Rank 2 Tensor (shape=[batch_size, hessian_dimension]) with hessians for each example. - /// - feature: int32; Rank 2 feature Tensors (shape=[batch_size, feature_dimension]). - /// - /// - Attrs: - /// - max_splits: int; the maximum number of splits possible in the whole tree. - /// - num_buckets: int; equals to the maximum possible value of bucketized feature. - /// - /// - Output stats_summary: output Rank 4 Tensor (shape=[splits, feature_dimension, buckets, logits_dimension + hessian_dimension]) - /// containing accumulated stats for each node, feature dimension and bucket. - @inlinable @inline(__always) - public static func boostedTreesAggregateStats( - nodeIds: Tensor, - gradients: Tensor, - hessians: Tensor, - feature: Tensor, - maxSplits: Int64, - numBuckets: Int64 - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(nodeIds.handle.backend, gradients.handle.backend), hessians.handle.backend), - feature.handle.backend) - { - case .XLA: - let output_device = feature.device - let nodeIds = Tensor(copying: nodeIds, to: .defaultTFEager) - let gradients = Tensor(copying: gradients, to: .defaultTFEager) - let hessians = Tensor(copying: hessians, to: .defaultTFEager) - let feature = Tensor(copying: feature, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.boostedTreesAggregateStats( - nodeIds: nodeIds, gradients: gradients, hessians: hessians, feature: feature, - maxSplits: maxSplits, numBuckets: numBuckets), to: output_device) - case .TF_EAGER: - return _RawTFEager.boostedTreesAggregateStats( - nodeIds: nodeIds, gradients: gradients, hessians: hessians, feature: feature, - maxSplits: maxSplits, numBuckets: numBuckets) - } - - } - - /// Bucketize each feature based on bucket boundaries. - /// - /// An op that returns a list of float tensors, where each tensor represents the - /// bucketized values for a single feature. - /// - /// - Parameters: - /// - float_values: float; List of Rank 1 Tensor each containing float values for a single feature. - /// - bucket_boundaries: float; List of Rank 1 Tensors each containing the bucket boundaries for a single - /// feature. - /// - /// - Attr num_features: inferred int; number of features. - /// - /// - Output buckets: int; List of Rank 1 Tensors each containing the bucketized values for a single feature. - @inlinable @inline(__always) - public static func boostedTreesBucketize( - floatValues: [Tensor], - bucketBoundaries: [Tensor] - ) -> [Tensor] { - _RawTFEager.boostedTreesBucketize( - floatValues: floatValues, bucketBoundaries: bucketBoundaries) - } - - /// Calculates gains for each feature and returns the best possible split information for the feature. - /// - /// The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature. - /// - /// It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split. - /// - /// In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features). - /// - /// The output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature. - /// - /// - Parameters: - /// - node_id_range: A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive). - /// - stats_summary: A Rank 4 tensor (#shape=[max_splits, feature_dims, bucket, stats_dims]) for accumulated stats summary (gradient/hessian) per node, per dimension, per buckets for each feature. - /// The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used. - /// - l1: l1 regularization factor on leaf weights, per instance based. - /// - l2: l2 regularization factor on leaf weights, per instance based. - /// - tree_complexity: adjustment to the gain, per leaf based. - /// - min_node_weight: minimum avg of hessians in a node before required for the node to be considered for splitting. - /// - /// - Attrs: - /// - logits_dimension: The dimension of logit, i.e., number of classes. - /// - split_type: A string indicating if this Op should perform inequality split or equality split. - /// - /// - Outputs: - /// - node_ids: A Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes. - /// - gains: A Rank 1 tensors indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes. - /// - feature_dimensions: A Rank 1 tensors indicating the best feature dimension for each feature to split for certain nodes if the feature is multi-dimension. See above for details like shapes and sizes. - /// - thresholds: A Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes. - /// - left_node_contribs: A Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes. - /// - right_node_contribs: A Rank 2 tensors, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node. - /// - split_with_default_directions: A Rank 1 tensors indicating the which direction to go if data is missing. See above for details like shapes and sizes. - /// Inequality with default left returns 0, inequality with default right returns 1, equality with default right returns 2. - @inlinable @inline(__always) - public static func boostedTreesCalculateBestFeatureSplit( - nodeIdRange: Tensor, - statsSummary: Tensor, - l1: Tensor, - l2: Tensor, - treeComplexity: Tensor, - minNodeWeight: Tensor, - logitsDimension: Int64, - splitType: SplitType = .inequality - ) -> ( - nodeIds: Tensor, gains: Tensor, featureDimensions: Tensor, - thresholds: Tensor, leftNodeContribs: Tensor, rightNodeContribs: Tensor, - splitWithDefaultDirections: StringTensor - ) { - _RawTFEager.boostedTreesCalculateBestFeatureSplit( - nodeIdRange: nodeIdRange, statsSummary: statsSummary, l1: l1, l2: l2, - treeComplexity: treeComplexity, minNodeWeight: minNodeWeight, - logitsDimension: logitsDimension, splitType: splitType) - } - - /// Calculates gains for each feature and returns the best possible split information for the feature. - /// - /// The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature. - /// - /// It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split. - /// - /// In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features). - /// - /// The length of output lists are all of the same length, `num_features`. - /// The output shapes are compatible in a way that the first dimension of all tensors of all lists are the same and equal to the number of possible split nodes for each feature. - /// - /// - Parameters: - /// - node_id_range: A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive). - /// - stats_summary_list: A list of Rank 3 tensor (#shape=[max_splits, bucket, 2]) for accumulated stats summary (gradient/hessian) per node per buckets for each feature. The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used. - /// - l1: l1 regularization factor on leaf weights, per instance based. - /// - l2: l2 regularization factor on leaf weights, per instance based. - /// - tree_complexity: adjustment to the gain, per leaf based. - /// - min_node_weight: minimum avg of hessians in a node before required for the node to be considered for splitting. - /// - /// - Attrs: - /// - max_splits: the number of nodes that can be split in the whole tree. Used as a dimension of output tensors. - /// - num_features: inferred from the size of `stats_summary_list`; the number of total features. - /// - /// - Outputs: - /// - node_ids_list: An output list of Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes. - /// - gains_list: An output list of Rank 1 tensors indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes. - /// - thresholds_list: An output list of Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes. - /// - left_node_contribs_list: A list of Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes. - /// - right_node_contribs_list: A list of Rank 2 tensors, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node. - @inlinable @inline(__always) - public static func boostedTreesCalculateBestGainsPerFeature( - nodeIdRange: Tensor, - statsSummaryList: [Tensor], - l1: Tensor, - l2: Tensor, - treeComplexity: Tensor, - minNodeWeight: Tensor, - maxSplits: Int64 - ) -> ( - nodeIdsList: [Tensor], gainsList: [Tensor], thresholdsList: [Tensor], - leftNodeContribsList: [Tensor], rightNodeContribsList: [Tensor] - ) { - _RawTFEager.boostedTreesCalculateBestGainsPerFeature( - nodeIdRange: nodeIdRange, statsSummaryList: statsSummaryList, l1: l1, l2: l2, - treeComplexity: treeComplexity, minNodeWeight: minNodeWeight, maxSplits: maxSplits) - } - - /// Calculates the prior from the training data (the bias) and fills in the first node with the logits' prior. Returns a boolean indicating whether to continue centering. - /// - /// - Parameters: - /// - tree_ensemble_handle: Handle to the tree ensemble. - /// - mean_gradients: A tensor with shape=[logits_dimension] with mean of gradients for a first node. - /// - mean_hessians: A tensor with shape=[logits_dimension] mean of hessians for a first node. - /// - l1: l1 regularization factor on leaf weights, per instance based. - /// - l2: l2 regularization factor on leaf weights, per instance based. - /// - /// - Output continue_centering: Bool, whether to continue bias centering. - @inlinable @inline(__always) - public static func boostedTreesCenterBias( - treeEnsembleHandle: ResourceHandle, - meanGradients: Tensor, - meanHessians: Tensor, - l1: Tensor, - l2: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(meanGradients.handle.backend, meanHessians.handle.backend), - l1.handle.backend), l2.handle.backend) - { - case .XLA: - let output_device = l2.device - let meanGradients = Tensor(copying: meanGradients, to: .defaultTFEager) - let meanHessians = Tensor(copying: meanHessians, to: .defaultTFEager) - let l1 = Tensor(copying: l1, to: .defaultTFEager) - let l2 = Tensor(copying: l2, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.boostedTreesCenterBias( - treeEnsembleHandle: treeEnsembleHandle, meanGradients: meanGradients, - meanHessians: meanHessians, l1: l1, l2: l2), to: output_device) - case .TF_EAGER: - return _RawTFEager.boostedTreesCenterBias( - treeEnsembleHandle: treeEnsembleHandle, meanGradients: meanGradients, - meanHessians: meanHessians, l1: l1, l2: l2) - } - - } - - /// Creates a tree ensemble model and returns a handle to it. - /// - /// - Parameters: - /// - tree_ensemble_handle: Handle to the tree ensemble resource to be created. - /// - stamp_token: Token to use as the initial value of the resource stamp. - /// - tree_ensemble_serialized: Serialized proto of the tree ensemble. - @inlinable @inline(__always) - public static func boostedTreesCreateEnsemble( - treeEnsembleHandle: ResourceHandle, - stampToken: Tensor, - treeEnsembleSerialized: StringTensor - ) { - _RawTFEager.boostedTreesCreateEnsemble( - treeEnsembleHandle: treeEnsembleHandle, stampToken: stampToken, - treeEnsembleSerialized: treeEnsembleSerialized) - } - - /// Create the Resource for Quantile Streams. - /// - /// - Parameters: - /// - quantile_stream_resource_handle: resource; Handle to quantile stream resource. - /// - epsilon: float; The required approximation error of the stream resource. - /// - num_streams: int; The number of streams managed by the resource that shares the same epsilon. - /// - /// - Attr max_elements: int; The maximum number of data points that can be fed to the stream. - @inlinable @inline(__always) - public static func boostedTreesCreateQuantileStreamResource( - quantileStreamResourceHandle: ResourceHandle, - epsilon: Tensor, - numStreams: Tensor, - maxElements: Int64 = 1_099_511_627_776 - ) { - _RawTFEager.boostedTreesCreateQuantileStreamResource( - quantileStreamResourceHandle: quantileStreamResourceHandle, epsilon: epsilon, - numStreams: numStreams, maxElements: maxElements) - } - - /// Deserializes a serialized tree ensemble config and replaces current tree - /// - /// ensemble. - /// - /// - Parameters: - /// - tree_ensemble_handle: Handle to the tree ensemble. - /// - stamp_token: Token to use as the new value of the resource stamp. - /// - tree_ensemble_serialized: Serialized proto of the ensemble. - @inlinable @inline(__always) - public static func boostedTreesDeserializeEnsemble( - treeEnsembleHandle: ResourceHandle, - stampToken: Tensor, - treeEnsembleSerialized: StringTensor - ) { - _RawTFEager.boostedTreesDeserializeEnsemble( - treeEnsembleHandle: treeEnsembleHandle, stampToken: stampToken, - treeEnsembleSerialized: treeEnsembleSerialized) - } - - /// Creates a handle to a BoostedTreesEnsembleResource - @inlinable @inline(__always) - public static func boostedTreesEnsembleResourceHandleOp( - container: String, - sharedName: String - ) -> ResourceHandle { - _RawTFEager.boostedTreesEnsembleResourceHandleOp(container: container, sharedName: sharedName) - } - - /// Debugging/model interpretability outputs for each example. - /// - /// It traverses all the trees and computes debug metrics for individual examples, - /// such as getting split feature ids and logits after each split along the decision - /// path used to compute directional feature contributions. - /// - /// - Parameter bucketized_features: A list of rank 1 Tensors containing bucket id for each - /// feature. - /// - /// - Attrs: - /// - num_bucketized_features: Inferred. - /// - logits_dimension: scalar, dimension of the logits, to be used for constructing the protos in - /// examples_debug_outputs_serialized. - /// - /// - Output examples_debug_outputs_serialized: Output rank 1 Tensor containing a proto serialized as a string for each example. - @inlinable @inline(__always) - public static func boostedTreesExampleDebugOutputs( - treeEnsembleHandle: ResourceHandle, - bucketizedFeatures: [Tensor], - logitsDimension: Int64 - ) -> StringTensor { - _RawTFEager.boostedTreesExampleDebugOutputs( - treeEnsembleHandle: treeEnsembleHandle, bucketizedFeatures: bucketizedFeatures, - logitsDimension: logitsDimension) - } - - /// Flush the quantile summaries from each quantile stream resource. - /// - /// An op that outputs a list of quantile summaries of a quantile stream resource. - /// Each summary Tensor is rank 2, containing summaries (value, weight, min_rank, - /// max_rank) for a single feature. - /// - /// - Parameter quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource. - @inlinable @inline(__always) - public static func boostedTreesFlushQuantileSummaries( - quantileStreamResourceHandle: ResourceHandle, - numFeatures: Int64 - ) -> [Tensor] { - _RawTFEager.boostedTreesFlushQuantileSummaries( - quantileStreamResourceHandle: quantileStreamResourceHandle, numFeatures: numFeatures) - } - - /// Retrieves the tree ensemble resource stamp token, number of trees and growing statistics. - /// - /// - Parameter tree_ensemble_handle: Handle to the tree ensemble. - /// - /// - Outputs: - /// - stamp_token: Stamp token of the tree ensemble resource. - /// - num_trees: The number of trees in the tree ensemble resource. - /// - num_finalized_trees: The number of trees that were finished successfully. - /// - num_attempted_layers: The number of layers we attempted to build (but not necessarily succeeded). - /// - last_layer_nodes_range: Rank size 2 tensor that contains start and end ids of the nodes in the latest - /// layer. - @inlinable @inline(__always) - public static func boostedTreesGetEnsembleStates( - treeEnsembleHandle: ResourceHandle - ) -> ( - stampToken: Tensor, numTrees: Tensor, numFinalizedTrees: Tensor, - numAttemptedLayers: Tensor, lastLayerNodesRange: Tensor - ) { - _RawTFEager.boostedTreesGetEnsembleStates(treeEnsembleHandle: treeEnsembleHandle) - } - - /// Makes the summary of quantiles for the batch. - /// - /// An op that takes a list of tensors (one tensor per feature) and outputs the - /// quantile summaries for each tensor. - /// - /// - Parameters: - /// - float_values: float; List of Rank 1 Tensors each containing values for a single feature. - /// - example_weights: float; Rank 1 Tensor with weights per instance. - /// - epsilon: float; The required maximum approximation error. - /// - /// - Attr num_features: int; Inferred from the size of float_values. - /// The number of float features. - /// - /// - Output summaries: float; List of Rank 2 Tensors each containing the quantile summary - /// (value, weight, min_rank, max_rank) of a single feature. - @inlinable @inline(__always) - public static func boostedTreesMakeQuantileSummaries( - floatValues: [Tensor], - exampleWeights: Tensor, - epsilon: Tensor - ) -> [Tensor] { - switch commonBackend( - commonBackend(commonBackend(floatValues), exampleWeights.handle.backend), - epsilon.handle.backend) - { - case .XLA: - let output_device = epsilon.device - let floatValues = [Tensor](copying: floatValues, to: .defaultTFEager) - let exampleWeights = Tensor(copying: exampleWeights, to: .defaultTFEager) - let epsilon = Tensor(copying: epsilon, to: .defaultTFEager) - return [Tensor]( - copying: _RawTFEager.boostedTreesMakeQuantileSummaries( - floatValues: floatValues, exampleWeights: exampleWeights, epsilon: epsilon), - to: output_device) - case .TF_EAGER: - return _RawTFEager.boostedTreesMakeQuantileSummaries( - floatValues: floatValues, exampleWeights: exampleWeights, epsilon: epsilon) - } - - } - - /// Makes the summary of accumulated stats for the batch. - /// - /// The summary stats contains gradients and hessians accumulated into the corresponding node and bucket for each example. - /// - /// - Parameters: - /// - node_ids: int32 Rank 1 Tensor containing node ids, which each example falls into for the requested layer. - /// - gradients: float32; Rank 2 Tensor (shape=[#examples, 1]) for gradients. - /// - hessians: float32; Rank 2 Tensor (shape=[#examples, 1]) for hessians. - /// - bucketized_features_list: int32 list of Rank 1 Tensors, each containing the bucketized feature (for each feature column). - /// - /// - Attrs: - /// - max_splits: int; the maximum number of splits possible in the whole tree. - /// - num_buckets: int; equals to the maximum possible value of bucketized feature. - /// - num_features: int; inferred from the size of bucketized_features_list; the number of features. - /// - /// - Output stats_summary: output Rank 4 Tensor (shape=[#features, #splits, #buckets, 2]) containing accumulated stats put into the corresponding node and bucket. The first index of 4th dimension refers to gradients, and the second to hessians. - @inlinable @inline(__always) - public static func boostedTreesMakeStatsSummary( - nodeIds: Tensor, - gradients: Tensor, - hessians: Tensor, - bucketizedFeaturesList: [Tensor], - maxSplits: Int64, - numBuckets: Int64 - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(nodeIds.handle.backend, gradients.handle.backend), hessians.handle.backend), - commonBackend(bucketizedFeaturesList)) - { - case .XLA: - let output_device = hessians.device - let nodeIds = Tensor(copying: nodeIds, to: .defaultTFEager) - let gradients = Tensor(copying: gradients, to: .defaultTFEager) - let hessians = Tensor(copying: hessians, to: .defaultTFEager) - let bucketizedFeaturesList = [Tensor]( - copying: bucketizedFeaturesList, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.boostedTreesMakeStatsSummary( - nodeIds: nodeIds, gradients: gradients, hessians: hessians, - bucketizedFeaturesList: bucketizedFeaturesList, maxSplits: maxSplits, - numBuckets: numBuckets), to: output_device) - case .TF_EAGER: - return _RawTFEager.boostedTreesMakeStatsSummary( - nodeIds: nodeIds, gradients: gradients, hessians: hessians, - bucketizedFeaturesList: bucketizedFeaturesList, maxSplits: maxSplits, - numBuckets: numBuckets) - } - - } - - /// Runs multiple additive regression ensemble predictors on input instances and - /// - /// computes the logits. It is designed to be used during prediction. - /// It traverses all the trees and calculates the final score for each instance. - /// - /// - Parameter bucketized_features: A list of rank 1 Tensors containing bucket id for each - /// feature. - /// - /// - Attrs: - /// - num_bucketized_features: Inferred. - /// - logits_dimension: scalar, dimension of the logits, to be used for partial logits - /// shape. - /// - /// - Output logits: Output rank 2 Tensor containing logits for each example. - @inlinable @inline(__always) - public static func boostedTreesPredict( - treeEnsembleHandle: ResourceHandle, - bucketizedFeatures: [Tensor], - logitsDimension: Int64 - ) -> Tensor { - _RawTFEager.boostedTreesPredict( - treeEnsembleHandle: treeEnsembleHandle, bucketizedFeatures: bucketizedFeatures, - logitsDimension: logitsDimension) - } - - /// Add the quantile summaries to each quantile stream resource. - /// - /// An op that adds a list of quantile summaries to a quantile stream resource. Each - /// summary Tensor is rank 2, containing summaries (value, weight, min_rank, max_rank) - /// for a single feature. - /// - /// - Parameters: - /// - quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource. - /// - summaries: string; List of Rank 2 Tensor each containing the summaries for a single feature. - @inlinable @inline(__always) - public static func boostedTreesQuantileStreamResourceAddSummaries( - quantileStreamResourceHandle: ResourceHandle, - summaries: [Tensor] - ) { - _RawTFEager.boostedTreesQuantileStreamResourceAddSummaries( - quantileStreamResourceHandle: quantileStreamResourceHandle, summaries: summaries) - } - - /// Deserialize bucket boundaries and ready flag into current QuantileAccumulator. - /// - /// An op that deserializes bucket boundaries and are boundaries ready flag into current QuantileAccumulator. - /// - /// - Parameters: - /// - quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource. - /// - bucket_boundaries: float; List of Rank 1 Tensors each containing the bucket boundaries for a feature. - /// - /// - Attr num_streams: inferred int; number of features to get bucket boundaries for. - @inlinable @inline(__always) - public static func boostedTreesQuantileStreamResourceDeserialize( - quantileStreamResourceHandle: ResourceHandle, - bucketBoundaries: [Tensor] - ) { - _RawTFEager.boostedTreesQuantileStreamResourceDeserialize( - quantileStreamResourceHandle: quantileStreamResourceHandle, - bucketBoundaries: bucketBoundaries) - } - - /// Flush the summaries for a quantile stream resource. - /// - /// An op that flushes the summaries for a quantile stream resource. - /// - /// - Parameters: - /// - quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource. - /// - num_buckets: int; approximate number of buckets unless using generate_quantiles. - /// - /// - Attr generate_quantiles: bool; If True, the output will be the num_quantiles for each stream where the ith - /// entry is the ith quantile of the input with an approximation error of epsilon. - /// Duplicate values may be present. - /// If False, the output will be the points in the histogram that we got which roughly - /// translates to 1/epsilon boundaries and without any duplicates. - /// Default to False. - @inlinable @inline(__always) - public static func boostedTreesQuantileStreamResourceFlush( - quantileStreamResourceHandle: ResourceHandle, - numBuckets: Tensor, - generateQuantiles: Bool = false - ) { - _RawTFEager.boostedTreesQuantileStreamResourceFlush( - quantileStreamResourceHandle: quantileStreamResourceHandle, numBuckets: numBuckets, - generateQuantiles: generateQuantiles) - } - - /// Generate the bucket boundaries for each feature based on accumulated summaries. - /// - /// An op that returns a list of float tensors for a quantile stream resource. Each - /// tensor is Rank 1 containing bucket boundaries for a single feature. - /// - /// - Parameter quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource. - /// - /// - Attr num_features: inferred int; number of features to get bucket boundaries for. - /// - /// - Output bucket_boundaries: float; List of Rank 1 Tensors each containing the bucket boundaries for a feature. - @inlinable @inline(__always) - public static func boostedTreesQuantileStreamResourceGetBucketBoundaries( - quantileStreamResourceHandle: ResourceHandle, - numFeatures: Int64 - ) -> [Tensor] { - _RawTFEager.boostedTreesQuantileStreamResourceGetBucketBoundaries( - quantileStreamResourceHandle: quantileStreamResourceHandle, numFeatures: numFeatures) - } - - /// Creates a handle to a BoostedTreesQuantileStreamResource. - @inlinable @inline(__always) - public static func boostedTreesQuantileStreamResourceHandleOp( - container: String, - sharedName: String - ) -> ResourceHandle { - _RawTFEager.boostedTreesQuantileStreamResourceHandleOp( - container: container, sharedName: sharedName) + /// Deprecated. Disallowed in GraphDef version >= 2. + @inlinable @inline(__always) + public static func adjustContrast( + images: Tensor, + contrastFactor: Tensor, + minValue: Tensor, + maxValue: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(images.handle.backend, contrastFactor.handle.backend), + minValue.handle.backend), maxValue.handle.backend) + { + case .XLA: + let output_device = maxValue.device + let images = Tensor(copying: images, to: .defaultTFEager) + let contrastFactor = Tensor(copying: contrastFactor, to: .defaultTFEager) + let minValue = Tensor(copying: minValue, to: .defaultTFEager) + let maxValue = Tensor(copying: maxValue, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.adjustContrast( + images: images, contrastFactor: contrastFactor, minValue: minValue, maxValue: maxValue), + to: output_device) + case .TF_EAGER: + return _RawTFEager.adjustContrast( + images: images, contrastFactor: contrastFactor, minValue: minValue, maxValue: maxValue) } - /// Serializes the tree ensemble to a proto. - /// - /// - Parameter tree_ensemble_handle: Handle to the tree ensemble. - /// - /// - Outputs: - /// - stamp_token: Stamp token of the tree ensemble resource. - /// - tree_ensemble_serialized: Serialized proto of the ensemble. - @inlinable @inline(__always) - public static func boostedTreesSerializeEnsemble( - treeEnsembleHandle: ResourceHandle - ) -> (stampToken: Tensor, treeEnsembleSerialized: StringTensor) { - _RawTFEager.boostedTreesSerializeEnsemble(treeEnsembleHandle: treeEnsembleHandle) - } - - /// Aggregates the summary of accumulated stats for the batch. - /// - /// The summary stats contains gradients and hessians accumulated for each node, bucket and dimension id. - /// - /// - Parameters: - /// - node_ids: int32; Rank 1 Tensor containing node ids for each example, shape [batch_size]. - /// - gradients: float32; Rank 2 Tensor (shape=[batch_size, logits_dimension]) with gradients for each example. - /// - hessians: float32; Rank 2 Tensor (shape=[batch_size, hessian_dimension]) with hessians for each example. - /// - feature_indices: int32; Rank 2 indices of feature sparse Tensors (shape=[number of sparse entries, 2]). - /// Number of sparse entries across all instances from the batch. The first value is - /// the index of the instance, the second is dimension of the feature. The second axis - /// can only have 2 values, i.e., the input dense version of Tensor can only be matrix. - /// - feature_values: int32; Rank 1 values of feature sparse Tensors (shape=[number of sparse entries]). - /// Number of sparse entries across all instances from the batch. The first value is - /// the index of the instance, the second is dimension of the feature. - /// - feature_shape: int32; Rank 1 dense shape of feature sparse Tensors (shape=[2]). - /// The first axis can only have 2 values, [batch_size, feature_dimension]. - /// - /// - Attrs: - /// - max_splits: int; the maximum number of splits possible in the whole tree. - /// - num_buckets: int; equals to the maximum possible value of bucketized feature + 1. - /// - /// - Outputs: - /// - stats_summary_indices: int32; Rank 2 indices of summary sparse Tensors (shape=[number of non zero statistics, 4]) - /// The second axis can only be 4 including node id, feature dimension, bucket id, and statistics_dimension. - /// statistics_dimension = logits_dimension + hessian_dimension. - /// - stats_summary_values: output Rank 1 Tensor (shape=[number of non zero statistics]) - /// - stats_summary_shape: output Rank 1 Tensor (shape=[4]) - /// The tensor has following 4 values: [max_splits, feature_dimension, num_buckets, statistics_dimension], - /// where statistics_dimension = gradient_dimension + hessian_dimension. gradient_dimension - /// is the same as label_dimension, i.e., the output space. hessian_dimension can be the same - /// as logits dimension when diagonal hessian is used, or label_dimension^2 when full - /// hessian is used. - @inlinable @inline(__always) - public static func boostedTreesSparseAggregateStats( - nodeIds: Tensor, - gradients: Tensor, - hessians: Tensor, - featureIndices: Tensor, - featureValues: Tensor, - featureShape: Tensor, - maxSplits: Int64, - numBuckets: Int64 - ) -> ( - statsSummaryIndices: Tensor, statsSummaryValues: Tensor, - statsSummaryShape: Tensor - ) { - _RawTFEager.boostedTreesSparseAggregateStats( - nodeIds: nodeIds, gradients: gradients, hessians: hessians, featureIndices: featureIndices, - featureValues: featureValues, featureShape: featureShape, maxSplits: maxSplits, - numBuckets: numBuckets) - } + } - /// Calculates gains for each feature and returns the best possible split information for the feature. - /// - /// The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature. - /// - /// It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split. - /// - /// In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features). - /// - /// The output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature. - /// - /// - Parameters: - /// - node_id_range: A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive). - /// - stats_summary_indices: A Rank 2 int64 tensor of dense shape [N, 4] (N specifies the number of non-zero values) for accumulated stats summary (gradient/hessian) per node per bucket for each feature. The second dimension contains node id, feature dimension, bucket id, and stats dim. - /// stats dim is the sum of logits dimension and hessian dimension, hessian dimension can either be logits dimension if diagonal hessian is used, or logits dimension^2 if full hessian is used. - /// - stats_summary_values: A Rank 1 float tensor of dense shape [N] (N specifies the number of non-zero values), which supplies the values for each element in summary_indices. - /// - stats_summary_shape: A Rank 1 float tensor of dense shape [4], which specifies the dense shape of the sparse tensor, which is [num tree nodes, feature dimensions, num buckets, stats dim]. - /// - l1: l1 regularization factor on leaf weights, per instance based. - /// - l2: l2 regularization factor on leaf weights, per instance based. - /// - tree_complexity: adjustment to the gain, per leaf based. - /// - min_node_weight: minimum avg of hessians in a node before required for the node to be considered for splitting. - /// - /// - Attrs: - /// - logits_dimension: The dimension of logit, i.e., number of classes. - /// - split_type: A string indicating if this Op should perform inequality split or equality split. - /// - /// - Outputs: - /// - node_ids: A Rank 1 tensor indicating possible node ids that can be split. - /// - gains: A Rank 1 tensor indicating the best gains to split each node. - /// - feature_dimensions: A Rank 1 tensor indicating the best feature dimension for each feature to split for each node. - /// - thresholds: A Rank 1 tensor indicating the bucket id to compare with (as a threshold) for split in each node. - /// - left_node_contribs: A Rank 2 tensor indicating the contribution of the left nodes when branching from parent nodes to the left direction by the given threshold for each feature. - /// This value will be used to make the left node value by adding to the parent node value. Second dimension size is logits dimension. - /// - right_node_contribs: A Rank 2 tensor, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node. - /// - split_with_default_directions: A Rank 1 tensor indicating which direction to go if data is missing. - /// Inequality with default left returns 0, inequality with default right returns 1, equality with default right returns 2. - @inlinable @inline(__always) - public static func boostedTreesSparseCalculateBestFeatureSplit( - nodeIdRange: Tensor, - statsSummaryIndices: Tensor, - statsSummaryValues: Tensor, - statsSummaryShape: Tensor, - l1: Tensor, - l2: Tensor, - treeComplexity: Tensor, - minNodeWeight: Tensor, - logitsDimension: Int64, - splitType: SplitType1 = .inequality - ) -> ( - nodeIds: Tensor, gains: Tensor, featureDimensions: Tensor, - thresholds: Tensor, leftNodeContribs: Tensor, rightNodeContribs: Tensor, - splitWithDefaultDirections: StringTensor - ) { - _RawTFEager.boostedTreesSparseCalculateBestFeatureSplit( - nodeIdRange: nodeIdRange, statsSummaryIndices: statsSummaryIndices, - statsSummaryValues: statsSummaryValues, statsSummaryShape: statsSummaryShape, l1: l1, - l2: l2, treeComplexity: treeComplexity, minNodeWeight: minNodeWeight, - logitsDimension: logitsDimension, splitType: splitType) - } - - /// Runs multiple additive regression ensemble predictors on input instances and - /// - /// computes the update to cached logits. It is designed to be used during training. - /// It traverses the trees starting from cached tree id and cached node id and - /// calculates the updates to be pushed to the cache. - /// - /// - Parameters: - /// - cached_tree_ids: Rank 1 Tensor containing cached tree ids which is the starting - /// tree of prediction. - /// - cached_node_ids: Rank 1 Tensor containing cached node id which is the starting - /// node of prediction. - /// - bucketized_features: A list of rank 1 Tensors containing bucket id for each - /// feature. - /// - /// - Attrs: - /// - num_bucketized_features: Inferred. - /// - logits_dimension: scalar, dimension of the logits, to be used for partial logits - /// shape. - /// - /// - Outputs: - /// - partial_logits: Rank 2 Tensor containing logits update (with respect to cached - /// values stored) for each example. - /// - tree_ids: Rank 1 Tensor containing new tree ids for each example. - /// - node_ids: Rank 1 Tensor containing new node ids in the new tree_ids. - @inlinable @inline(__always) - public static func boostedTreesTrainingPredict( - treeEnsembleHandle: ResourceHandle, - cachedTreeIds: Tensor, - cachedNodeIds: Tensor, - bucketizedFeatures: [Tensor], - logitsDimension: Int64 - ) -> (partialLogits: Tensor, treeIds: Tensor, nodeIds: Tensor) { - _RawTFEager.boostedTreesTrainingPredict( - treeEnsembleHandle: treeEnsembleHandle, cachedTreeIds: cachedTreeIds, - cachedNodeIds: cachedNodeIds, bucketizedFeatures: bucketizedFeatures, - logitsDimension: logitsDimension) - } - - /// Updates the tree ensemble by either adding a layer to the last tree being grown - /// - /// or by starting a new tree. - /// - /// - Parameters: - /// - tree_ensemble_handle: Handle to the ensemble variable. - /// - feature_ids: Rank 1 tensor with ids for each feature. This is the real id of - /// the feature that will be used in the split. - /// - node_ids: List of rank 1 tensors representing the nodes for which this feature - /// has a split. - /// - gains: List of rank 1 tensors representing the gains for each of the feature's - /// split. - /// - thresholds: List of rank 1 tensors representing the thesholds for each of the - /// feature's split. - /// - left_node_contribs: List of rank 2 tensors with left leaf contribs for each of - /// the feature's splits. Will be added to the previous node values to constitute - /// the values of the left nodes. - /// - right_node_contribs: List of rank 2 tensors with right leaf contribs for each - /// of the feature's splits. Will be added to the previous node values to constitute - /// the values of the right nodes. - /// - max_depth: Max depth of the tree to build. - /// - learning_rate: shrinkage const for each new tree. - /// - /// - Attrs: - /// - pruning_mode: 0-No pruning, 1-Pre-pruning, 2-Post-pruning. - /// - num_features: Number of features that have best splits returned. INFERRED. - @inlinable @inline(__always) - public static func boostedTreesUpdateEnsemble( - treeEnsembleHandle: ResourceHandle, - featureIds: Tensor, - nodeIds: [Tensor], - gains: [Tensor], - thresholds: [Tensor], - leftNodeContribs: [Tensor], - rightNodeContribs: [Tensor], - maxDepth: Tensor, - learningRate: Tensor, - pruningMode: Int64 - ) { - _RawTFEager.boostedTreesUpdateEnsemble( - treeEnsembleHandle: treeEnsembleHandle, featureIds: featureIds, nodeIds: nodeIds, - gains: gains, thresholds: thresholds, leftNodeContribs: leftNodeContribs, - rightNodeContribs: rightNodeContribs, maxDepth: maxDepth, learningRate: learningRate, - pruningMode: pruningMode) - } - - /// - /// - Parameters: - /// - tree_ensemble_handle: Handle to the ensemble variable. - /// - feature_ids: Rank 1 tensor with ids for each feature. This is the real id of - /// the feature that will be used in the split. - /// - dimension_ids: List of rank 1 tensors representing the dimension in each feature. - /// - node_ids: List of rank 1 tensors representing the nodes for which this feature - /// has a split. - /// - gains: List of rank 1 tensors representing the gains for each of the feature's - /// split. - /// - thresholds: List of rank 1 tensors representing the thesholds for each of the - /// feature's split. - /// - left_node_contribs: List of rank 2 tensors with left leaf contribs for each of - /// the feature's splits. Will be added to the previous node values to constitute - /// the values of the left nodes. - /// - right_node_contribs: List of rank 2 tensors with right leaf contribs for each - /// of the feature's splits. Will be added to the previous node values to constitute - /// the values of the right nodes. - /// - split_types: List of rank 1 tensors representing the split type for each feature. - /// - max_depth: Max depth of the tree to build. - /// - learning_rate: shrinkage const for each new tree. - /// - pruning_mode: 0-No pruning, 1-Pre-pruning, 2-Post-pruning. - /// - /// - Attrs: - /// - num_features: Number of features that have best splits returned. INFERRED. - /// - logits_dimension: scalar, dimension of the logits - @inlinable @inline(__always) - public static func boostedTreesUpdateEnsembleV2( - treeEnsembleHandle: ResourceHandle, - featureIds: Tensor, - dimensionIds: [Tensor], - nodeIds: [Tensor], - gains: [Tensor], - thresholds: [Tensor], - leftNodeContribs: [Tensor], - rightNodeContribs: [Tensor], - splitTypes: [StringTensor], - maxDepth: Tensor, - learningRate: Tensor, - pruningMode: Tensor, - logitsDimension: Int64 = 1 - ) { - _RawTFEager.boostedTreesUpdateEnsembleV2( - treeEnsembleHandle: treeEnsembleHandle, featureIds: featureIds, dimensionIds: dimensionIds, - nodeIds: nodeIds, gains: gains, thresholds: thresholds, leftNodeContribs: leftNodeContribs, - rightNodeContribs: rightNodeContribs, splitTypes: splitTypes, maxDepth: maxDepth, - learningRate: learningRate, pruningMode: pruningMode, logitsDimension: logitsDimension) - } - - /// Return the shape of s0 op s1 with broadcast. - /// - /// Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the - /// broadcasted shape. `s0`, `s1` and `r0` are all integer vectors. - @inlinable @inline(__always) - public static func broadcastArgs( - s0: Tensor, - s1: Tensor - ) -> Tensor { - switch commonBackend(s0.handle.backend, s1.handle.backend) { - case .XLA: - let output_device = s1.device - let s0 = Tensor(copying: s0, to: .defaultTFEager) - let s1 = Tensor(copying: s1, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.broadcastArgs(s0: s0, s1: s1), to: output_device) - case .TF_EAGER: - return _RawTFEager.broadcastArgs(s0: s0, s1: s1) - } - - } - - /// Return the reduction indices for computing gradients of s0 op s1 with broadcast. - /// - /// This is typically used by gradient computations for a broadcasting operation. - @inlinable @inline(__always) - public static func broadcastGradientArgs( - s0: Tensor, - s1: Tensor - ) -> (r0: Tensor, r1: Tensor) { - switch commonBackend(s0.handle.backend, s1.handle.backend) { - case .XLA: - return _RawXLA.broadcastGradientArgs(s0: s0, s1: s1) - case .TF_EAGER: - return _RawTFEager.broadcastGradientArgs(s0: s0, s1: s1) - } - - } - - /// Broadcast an array for a compatible shape. - /// - /// Broadcasting is the process of making arrays to have compatible shapes - /// for arithmetic operations. Two shapes are compatible if for each - /// dimension pair they are either equal or one of them is one. When trying - /// to broadcast a Tensor to a shape, it starts with the trailing dimensions, - /// and works its way forward. - /// - /// For example, - /// - /// >>> x = tf.constant([1, 2, 3]) - /// >>> y = tf.broadcast_to(x, [3, 3]) - /// >>> print(y) - /// tf.Tensor( - /// [[1 2 3] - /// [1 2 3] - /// [1 2 3]], shape=(3, 3), dtype=int32) - /// - /// In the above example, the input Tensor with the shape of `[1, 3]` - /// is broadcasted to output Tensor with shape of `[3, 3]`. - /// - /// - Parameters: - /// - input: A Tensor to broadcast. - /// - shape: An 1-D `int` Tensor. The shape of the desired output. - /// - /// - Output output: A Tensor. - @inlinable @inline(__always) - public static func broadcastTo< - T: TensorFlowScalar, - Tidx: TensorFlowIndex - >( - _ input: Tensor, - shape: Tensor - ) -> Tensor { - switch commonBackend(input.handle.backend, shape.handle.backend) { - case .XLA: - return _RawXLA.broadcastTo(input, shape: shape) - case .TF_EAGER: - return _RawTFEager.broadcastTo(input, shape: shape) - } - - } - - /// Bucketizes 'input' based on 'boundaries'. - /// - /// For example, if the inputs are - /// boundaries = [0, 10, 100] - /// input = [[-5, 10000] - /// [150, 10] - /// [5, 100]] - /// - /// then the output will be - /// output = [[0, 3] - /// [3, 2] - /// [1, 3]] - /// - /// - Parameter input: Any shape of Tensor contains with int or float type. - /// - /// - Attr boundaries: A sorted list of floats gives the boundary of the buckets. - /// - /// - Output output: Same shape with 'input', each value of input replaced with bucket index. - /// - /// @compatibility(numpy) - /// Equivalent to np.digitize. - /// @end_compatibility - @inlinable @inline(__always) - public static func bucketize( - _ input: Tensor, - boundaries: [Double] - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.bucketize(input, boundaries: boundaries), to: output_device) - case .TF_EAGER: - return _RawTFEager.bucketize(input, boundaries: boundaries) - } - - } - - /// Records the bytes size of each element of `input_dataset` in a StatsAggregator. - @inlinable @inline(__always) - public static func bytesProducedStatsDataset( - inputDataset: VariantHandle, - tag: StringTensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.bytesProducedStatsDataset( - inputDataset: inputDataset, tag: tag, outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Reads out the CSR components at batch `index`. - /// - /// This op is meant only for debugging / testing, and its interface is not expected - /// to be stable. - /// - /// - Parameters: - /// - csr_sparse_matrix: A batched CSRSparseMatrix. - /// - index: The index in `csr_sparse_matrix`'s batch. - /// - /// - Outputs: - /// - row_ptrs: An array containing CSR matrix row pointers. - /// - col_inds: An array containing CSR matrix column indices. - /// - values: An array containing CSR matrix nonzero values. - @inlinable @inline(__always) - public static func cSRSparseMatrixComponents( - csrSparseMatrix: VariantHandle, - index: Tensor - ) -> (rowPtrs: Tensor, colInds: Tensor, values: Tensor) { - _RawTFEager.cSRSparseMatrixComponents(csrSparseMatrix: csrSparseMatrix, index: index) - } - - /// Convert a (possibly batched) CSRSparseMatrix to dense. - /// - /// - Parameter sparse_input: A batched CSRSparseMatrix. - /// - /// - Output dense_output: A dense tensor. - @inlinable @inline(__always) - public static func cSRSparseMatrixToDense( - sparseInput: VariantHandle - ) -> Tensor { - _RawTFEager.cSRSparseMatrixToDense(sparseInput: sparseInput) - } - - /// Converts a (possibly batched) CSRSparesMatrix to a SparseTensor. - /// - /// - Parameter sparse_matrix: A (possibly batched) CSRSparseMatrix. - /// - /// - Outputs: - /// - indices: SparseTensor indices. - /// - values: SparseTensor values. - /// - dense_shape: SparseTensor dense shape. - @inlinable @inline(__always) - public static func cSRSparseMatrixToSparseTensor( - sparseMatrix: VariantHandle - ) -> (indices: Tensor, values: Tensor, denseShape: Tensor) { - _RawTFEager.cSRSparseMatrixToSparseTensor(sparseMatrix: sparseMatrix) - } - - @inlinable @inline(__always) - public static func cSVDataset( - filenames: StringTensor, - compressionType: StringTensor, - bufferSize: Tensor, - header: Tensor, - fieldDelim: StringTensor, - useQuoteDelim: Tensor, - naValue: StringTensor, - selectCols: Tensor, - recordDefaults: OutputTypes, - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.cSVDataset( - filenames: filenames, compressionType: compressionType, bufferSize: bufferSize, - header: header, fieldDelim: fieldDelim, useQuoteDelim: useQuoteDelim, naValue: naValue, - selectCols: selectCols, recordDefaults: recordDefaults, outputShapes: outputShapes) - } - - /// Performs beam search decoding on the logits given in input. - /// - /// A note about the attribute merge_repeated: For the beam search decoder, - /// this means that if consecutive entries in a beam are the same, only - /// the first of these is emitted. That is, when the top path is "A B B B B", - /// "A B" is returned if merge_repeated = True but "A B B B B" is - /// returned if merge_repeated = False. - /// - /// - Parameters: - /// - inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. - /// - sequence_length: A vector containing sequence lengths, size `(batch)`. - /// - /// - Attrs: - /// - beam_width: A scalar >= 0 (beam search beam width). - /// - top_paths: A scalar >= 0, <= beam_width (controls output size). - /// - merge_repeated: If true, merge repeated classes in output. - /// - /// - Outputs: - /// - decoded_indices: A list (length: top_paths) of indices matrices. Matrix j, - /// size `(total_decoded_outputs[j] x 2)`, has indices of a - /// `SparseTensor`. The rows store: [batch, time]. - /// - decoded_values: A list (length: top_paths) of values vectors. Vector j, - /// size `(length total_decoded_outputs[j])`, has the values of a - /// `SparseTensor`. The vector stores the decoded classes for beam j. - /// - decoded_shape: A list (length: top_paths) of shape vector. Vector j, - /// size `(2)`, stores the shape of the decoded `SparseTensor[j]`. - /// Its values are: `[batch_size, max_decoded_length[j]]`. - /// - log_probability: A matrix, shaped: `(batch_size x top_paths)`. The - /// sequence log-probabilities. - @inlinable @inline(__always) - public static func cTCBeamSearchDecoder( - inputs: Tensor, - sequenceLength: Tensor, - beamWidth: Int64, - topPaths: Int64, - mergeRepeated: Bool = true - ) -> ( - decodedIndices: [Tensor], decodedValues: [Tensor], - decodedShape: [Tensor], logProbability: Tensor - ) { - _RawTFEager.cTCBeamSearchDecoder( - inputs: inputs, sequenceLength: sequenceLength, beamWidth: beamWidth, topPaths: topPaths, - mergeRepeated: mergeRepeated) - } - - /// Performs greedy decoding on the logits given in inputs. - /// - /// A note about the attribute merge_repeated: if enabled, when - /// consecutive logits' maximum indices are the same, only the first of - /// these is emitted. Labeling the blank '*', the sequence "A B B * B B" - /// becomes "A B B" if merge_repeated = True and "A B B B B" if - /// merge_repeated = False. - /// - /// Regardless of the value of merge_repeated, if the maximum index of a given - /// time and batch corresponds to the blank, index `(num_classes - 1)`, no new - /// element is emitted. - /// - /// - Parameters: - /// - inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. - /// - sequence_length: A vector containing sequence lengths, size `(batch_size)`. - /// - /// - Attr merge_repeated: If True, merge repeated classes in output. - /// - /// - Outputs: - /// - decoded_indices: Indices matrix, size `(total_decoded_outputs x 2)`, - /// of a `SparseTensor`. The rows store: [batch, time]. - /// - decoded_values: Values vector, size: `(total_decoded_outputs)`, - /// of a `SparseTensor`. The vector stores the decoded classes. - /// - decoded_shape: Shape vector, size `(2)`, of the decoded SparseTensor. - /// Values are: `[batch_size, max_decoded_length]`. - /// - log_probability: Matrix, size `(batch_size x 1)`, containing sequence - /// log-probabilities. - @inlinable @inline(__always) - public static func cTCGreedyDecoder( - inputs: Tensor, - sequenceLength: Tensor, - mergeRepeated: Bool = false - ) -> ( - decodedIndices: Tensor, decodedValues: Tensor, decodedShape: Tensor, - logProbability: Tensor - ) { - _RawTFEager.cTCGreedyDecoder( - inputs: inputs, sequenceLength: sequenceLength, mergeRepeated: mergeRepeated) - } - - /// Calculates the CTC Loss (log probability) for each batch entry. Also calculates - /// - /// the gradient. This class performs the softmax operation for you, so inputs - /// should be e.g. linear projections of outputs by an LSTM. - /// - /// - Parameters: - /// - inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. - /// - labels_indices: The indices of a `SparseTensor`. - /// `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for - /// `(batch b, time t)`. - /// - labels_values: The values (labels) associated with the given batch and time. - /// - sequence_length: A vector containing sequence lengths (batch). - /// - /// - Attrs: - /// - preprocess_collapse_repeated: Scalar, if true then repeated labels are - /// collapsed prior to the CTC calculation. - /// - ctc_merge_repeated: Scalar. If set to false, *during* CTC calculation - /// repeated non-blank labels will not be merged and are interpreted as - /// individual labels. This is a simplified version of CTC. - /// - ignore_longer_outputs_than_inputs: Scalar. If set to true, during CTC - /// calculation, items that have longer output sequences than input sequences - /// are skipped: they don't contribute to the loss term and have zero-gradient. - /// - /// - Outputs: - /// - loss: A vector (batch) containing log-probabilities. - /// - gradient: The gradient of `loss`. 3-D, shape: - /// `(max_time x batch_size x num_classes)`. - @inlinable @inline(__always) - public static func cTCLoss( - inputs: Tensor, - labelsIndices: Tensor, - labelsValues: Tensor, - sequenceLength: Tensor, - preprocessCollapseRepeated: Bool = false, - ctcMergeRepeated: Bool = true, - ignoreLongerOutputsThanInputs: Bool = false - ) -> (loss: Tensor, gradient: Tensor) { - _RawTFEager.cTCLoss( - inputs: inputs, labelsIndices: labelsIndices, labelsValues: labelsValues, - sequenceLength: sequenceLength, preprocessCollapseRepeated: preprocessCollapseRepeated, - ctcMergeRepeated: ctcMergeRepeated, - ignoreLongerOutputsThanInputs: ignoreLongerOutputsThanInputs) - } - - /// Creates a dataset that caches elements from `input_dataset`. - /// - /// A CacheDataset will iterate over the input_dataset, and store tensors. If the - /// cache already exists, the cache will be used. If the cache is inappropriate - /// (e.g. cannot be opened, contains tensors of the wrong shape / size), an error - /// will the returned when used. - /// - /// - Parameter filename: A path on the filesystem where we should cache the dataset. Note: this - /// will be a directory. - @inlinable @inline(__always) - public static func cacheDataset( - inputDataset: VariantHandle, - filename: StringTensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.cacheDataset( - inputDataset: inputDataset, filename: filename, outputTypes: outputTypes, - outputShapes: outputShapes) - } - - @inlinable @inline(__always) - public static func cacheDatasetV2( - inputDataset: VariantHandle, - filename: StringTensor, - cache: ResourceHandle, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.cacheDatasetV2( - inputDataset: inputDataset, filename: filename, cache: cache, outputTypes: outputTypes, - outputShapes: outputShapes) - } - - /// Cast x of type SrcT to y of DstT. - @inlinable @inline(__always) - public static func cast< - Srct: TensorFlowScalar, - Dstt: TensorFlowScalar - >( - _ x: Tensor, - truncate: Bool = false - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.cast(x, truncate: truncate) - case .TF_EAGER: - return _RawTFEager.cast(x, truncate: truncate) - } - - } - - /// Returns element-wise smallest integer not less than x. - @inlinable @inline(__always) - public static func ceil( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.ceil(x) - case .TF_EAGER: - return _RawTFEager.ceil(x) - } - - } - - /// Checks a tensor for NaN and Inf values. - /// - /// When run, reports an `InvalidArgument` error if `tensor` has any values - /// that are not a number (NaN) or infinity (Inf). Otherwise, passes `tensor` as-is. - /// - /// - Attr message: Prefix of the error message. - @inlinable @inline(__always) - public static func checkNumerics( - _ tensor: Tensor, - message: String - ) -> Tensor { - switch tensor.handle.backend { - case .XLA: - let output_device = tensor.device - let tensor = Tensor(copying: tensor, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.checkNumerics(tensor, message: message), to: output_device) - case .TF_EAGER: - return _RawTFEager.checkNumerics(tensor, message: message) - } - - } - - /// Computes the Cholesky decomposition of one or more square matrices. - /// - /// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions - /// form square matrices. - /// - /// The input has to be symmetric and positive definite. Only the lower-triangular - /// part of the input will be used for this operation. The upper-triangular part - /// will not be read. - /// - /// The output is a tensor of the same shape as the input - /// containing the Cholesky decompositions for all input submatrices `[..., :, :]`. - /// - /// **Note**: The gradient computation on GPU is faster for large matrices but - /// not for large batch dimensions when the submatrices are small. In this - /// case it might be faster to use the CPU. - /// - /// - Parameter input: Shape is `[..., M, M]`. - /// - /// - Output output: Shape is `[..., M, M]`. - @inlinable @inline(__always) - public static func cholesky( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.cholesky(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.cholesky(input) - } - - } - - /// Computes the reverse mode backpropagated gradient of the Cholesky algorithm. - /// - /// For an explanation see "Differentiation of the Cholesky algorithm" by - /// Iain Murray http://arxiv.org/abs/1602.07527. - /// - /// - Parameters: - /// - l: Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`. - /// Algorithm depends only on lower triangular part of the innermost matrices of - /// this tensor. - /// - grad: df/dl where f is some scalar function. Shape is `[..., M, M]`. - /// Algorithm depends only on lower triangular part of the innermost matrices of - /// this tensor. - /// - /// - Output output: Symmetrized version of df/dA . Shape is `[..., M, M]` - @inlinable @inline(__always) - public static func choleskyGrad( - l: Tensor, - grad: Tensor - ) -> Tensor { - switch commonBackend(l.handle.backend, grad.handle.backend) { - case .XLA: - let output_device = grad.device - let l = Tensor(copying: l, to: .defaultTFEager) - let grad = Tensor(copying: grad, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.choleskyGrad(l: l, grad: grad), to: output_device) - case .TF_EAGER: - return _RawTFEager.choleskyGrad(l: l, grad: grad) - } - - } - - @inlinable @inline(__always) - public static func chooseFastestDataset( - inputDatasets: [VariantHandle], - numExperiments: Int64, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.chooseFastestDataset( - inputDatasets: inputDatasets, numExperiments: numExperiments, outputTypes: outputTypes, - outputShapes: outputShapes) - } - - /// Clips tensor values to a specified min and max. - /// - /// Given a tensor `t`, this operation returns a tensor of the same type and - /// shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`. - /// Any values less than `clip_value_min` are set to `clip_value_min`. Any values - /// greater than `clip_value_max` are set to `clip_value_max`. - /// - /// - Parameters: - /// - t: A `Tensor`. - /// - clip_value_min: A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape - /// as `t`. The minimum value to clip by. - /// - clip_value_max: A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape - /// as `t`. The maximum value to clip by. - /// - /// - Output output: A clipped `Tensor` with the same shape as input 't'. - @inlinable @inline(__always) - public static func clipByValue( - t: Tensor, - clipValueMin: Tensor, - clipValueMax: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(t.handle.backend, clipValueMin.handle.backend), clipValueMax.handle.backend) - { - case .XLA: - return _RawXLA.clipByValue(t: t, clipValueMin: clipValueMin, clipValueMax: clipValueMax) - case .TF_EAGER: - return _RawTFEager.clipByValue(t: t, clipValueMin: clipValueMin, clipValueMax: clipValueMax) - } - - } - - @inlinable @inline(__always) - public static func closeSummaryWriter( - writer: ResourceHandle - ) { - _RawTFEager.closeSummaryWriter(writer: writer) - } - - /// Receives a tensor value broadcast from another device. - @inlinable @inline(__always) - public static func collectiveBcastRecv( - groupSize: Int64, - groupKey: Int64, - instanceKey: Int64, - shape: TensorShape?, - communicationHint: String = "auto" - ) -> Tensor { - _RawTFEager.collectiveBcastRecv( - groupSize: groupSize, groupKey: groupKey, instanceKey: instanceKey, shape: shape, - communicationHint: communicationHint) + /// Adjust the contrast of one or more images. + /// + /// `images` is a tensor of at least 3 dimensions. The last 3 dimensions are + /// interpreted as `[height, width, channels]`. The other dimensions only + /// represent a collection of images, such as `[batch, height, width, channels].` + /// + /// Contrast is adjusted independently for each channel of each image. + /// + /// For each channel, the Op first computes the mean of the image pixels in the + /// channel and then adjusts each component of each pixel to + /// `(x - mean) * contrast_factor + mean`. + /// + /// - Parameters: + /// - images: Images to adjust. At least 3-D. + /// - contrast_factor: A float multiplier for adjusting contrast. + /// + /// - Output output: The contrast-adjusted image or images. + @inlinable @inline(__always) + public static func adjustContrastv2( + images: Tensor, + contrastFactor: Tensor + ) -> Tensor { + switch commonBackend(images.handle.backend, contrastFactor.handle.backend) { + case .XLA: + let output_device = contrastFactor.device + let images = Tensor(copying: images, to: .defaultTFEager) + let contrastFactor = Tensor(copying: contrastFactor, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.adjustContrastv2(images: images, contrastFactor: contrastFactor), + to: output_device) + case .TF_EAGER: + return _RawTFEager.adjustContrastv2(images: images, contrastFactor: contrastFactor) } - /// Broadcasts a tensor value to one or more other devices. - @inlinable @inline(__always) - public static func collectiveBcastSend( - _ input: Tensor, - groupSize: Int64, - groupKey: Int64, - instanceKey: Int64, - shape: TensorShape?, - communicationHint: String = "auto" - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.collectiveBcastSend( - input, groupSize: groupSize, groupKey: groupKey, instanceKey: instanceKey, shape: shape, - communicationHint: communicationHint), to: output_device) - case .TF_EAGER: - return _RawTFEager.collectiveBcastSend( - input, groupSize: groupSize, groupKey: groupKey, instanceKey: instanceKey, shape: shape, - communicationHint: communicationHint) - } - - } - - /// Mutually accumulates multiple tensors of identical type and shape. - @inlinable @inline(__always) - public static func collectiveGather( - _ input: Tensor, - groupSize: Int64, - groupKey: Int64, - instanceKey: Int64, - shape: TensorShape?, - communicationHint: String = "auto" - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.collectiveGather( - input, groupSize: groupSize, groupKey: groupKey, instanceKey: instanceKey, shape: shape, - communicationHint: communicationHint), to: output_device) - case .TF_EAGER: - return _RawTFEager.collectiveGather( - input, groupSize: groupSize, groupKey: groupKey, instanceKey: instanceKey, shape: shape, - communicationHint: communicationHint) - } - - } - - /// An Op to permute tensors across replicated TPU instances. - /// - /// Each instance supplies its own input. - /// - /// For example, suppose there are 4 TPU instances: `[A, B, C, D]`. Passing - /// source_target_pairs=`[[0,1],[1,2],[2,3],[3,0]]` gets the outputs: - /// `[D, A, B, C]`. - /// - /// - Parameters: - /// - input: The local input to be permuted. Currently only supports float and - /// bfloat16. - /// - source_target_pairs: A tensor with shape [num_pairs, 2]. - /// - /// - Attr T: The type of elements to be exchanged. - /// - /// - Output output: The permuted input. - @inlinable @inline(__always) - public static func collectivePermute( - _ input: Tensor, - sourceTargetPairs: Tensor - ) -> Tensor { - switch commonBackend(input.handle.backend, sourceTargetPairs.handle.backend) { - case .XLA: - let output_device = sourceTargetPairs.device - let input = Tensor(copying: input, to: .defaultTFEager) - let sourceTargetPairs = Tensor(copying: sourceTargetPairs, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.collectivePermute(input, sourceTargetPairs: sourceTargetPairs), - to: output_device) - case .TF_EAGER: - return _RawTFEager.collectivePermute(input, sourceTargetPairs: sourceTargetPairs) - } - - } - - /// Mutually reduces multiple tensors of identical type and shape. - @inlinable @inline(__always) - public static func collectiveReduce( - _ input: Tensor, - groupSize: Int64, - groupKey: Int64, - instanceKey: Int64, - mergeOp: MergeOp, - finalOp: FinalOp, - subdivOffsets: [Int32], - waitFor: [Int32], - communicationHint: String = "auto" - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.collectiveReduce( - input, groupSize: groupSize, groupKey: groupKey, instanceKey: instanceKey, - mergeOp: mergeOp, finalOp: finalOp, subdivOffsets: subdivOffsets, waitFor: waitFor, - communicationHint: communicationHint), to: output_device) - case .TF_EAGER: - return _RawTFEager.collectiveReduce( - input, groupSize: groupSize, groupKey: groupKey, instanceKey: instanceKey, - mergeOp: mergeOp, finalOp: finalOp, subdivOffsets: subdivOffsets, waitFor: waitFor, - communicationHint: communicationHint) - } - - } - - /// Greedily selects a subset of bounding boxes in descending order of score, - /// - /// This operation performs non_max_suppression on the inputs per batch, across - /// all classes. - /// Prunes away boxes that have high intersection-over-union (IOU) overlap - /// with previously selected boxes. Bounding boxes are supplied as - /// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any - /// diagonal pair of box corners and the coordinates can be provided as normalized - /// (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm - /// is agnostic to where the origin is in the coordinate system. Also note that - /// this algorithm is invariant to orthogonal transformations and translations - /// of the coordinate system; thus translating or reflections of the coordinate - /// system result in the same boxes being selected by the algorithm. - /// The output of this operation is the final boxes, scores and classes tensor - /// returned after performing non_max_suppression. - /// - /// - Parameters: - /// - boxes: A 4-D float tensor of shape `[batch_size, num_boxes, q, 4]`. If `q` is 1 then - /// same boxes are used for all classes otherwise, if `q` is equal to number of - /// classes, class-specific boxes are used. - /// - scores: A 3-D float tensor of shape `[batch_size, num_boxes, num_classes]` - /// representing a single score corresponding to each box (each row of boxes). - /// - max_output_size_per_class: A scalar integer tensor representing the maximum number of - /// boxes to be selected by non max suppression per class - /// - max_total_size: A scalar representing maximum number of boxes retained over all classes. - /// - iou_threshold: A 0-D float tensor representing the threshold for deciding whether - /// boxes overlap too much with respect to IOU. - /// - score_threshold: A 0-D float tensor representing the threshold for deciding when to remove - /// boxes based on score. - /// - /// - Attrs: - /// - pad_per_class: If false, the output nmsed boxes, scores and classes - /// are padded/clipped to `max_total_size`. If true, the - /// output nmsed boxes, scores and classes are padded to be of length - /// `max_size_per_class`*`num_classes`, unless it exceeds `max_total_size` in - /// which case it is clipped to `max_total_size`. Defaults to false. - /// - clip_boxes: If true, assume the box coordinates are between [0, 1] and clip the output boxes - /// if they fall beyond [0, 1]. If false, do not do clipping and output the box - /// coordinates as it is. - /// - /// - Outputs: - /// - nmsed_boxes: A [batch_size, max_detections, 4] float32 tensor - /// containing the non-max suppressed boxes. - /// - nmsed_scores: A [batch_size, max_detections] float32 tensor - /// containing the scores for the boxes. - /// - nmsed_classes: A [batch_size, max_detections] float32 tensor - /// containing the classes for the boxes. - /// - valid_detections: A [batch_size] int32 tensor indicating the number of - /// valid detections per batch item. Only the top num_detections[i] entries in - /// nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the - /// entries are zero paddings. - @inlinable @inline(__always) - public static func combinedNonMaxSuppression( - boxes: Tensor, - scores: Tensor, - maxOutputSizePerClass: Tensor, - maxTotalSize: Tensor, - iouThreshold: Tensor, - scoreThreshold: Tensor, - padPerClass: Bool = false, - clipBoxes: Bool = true - ) -> ( - nmsedBoxes: Tensor, nmsedScores: Tensor, nmsedClasses: Tensor, - validDetections: Tensor - ) { - _RawTFEager.combinedNonMaxSuppression( - boxes: boxes, scores: scores, maxOutputSizePerClass: maxOutputSizePerClass, - maxTotalSize: maxTotalSize, iouThreshold: iouThreshold, scoreThreshold: scoreThreshold, - padPerClass: padPerClass, clipBoxes: clipBoxes) - } - - /// Compare values of `input` to `threshold` and pack resulting bits into a `uint8`. - /// - /// Each comparison returns a boolean `true` (if `input_value > threshold`) - /// or and `false` otherwise. - /// - /// This operation is useful for Locality-Sensitive-Hashing (LSH) and other - /// algorithms that use hashing approximations of cosine and `L2` distances; - /// codes can be generated from an input via: - /// - /// ```python - /// codebook_size = 50 - /// codebook_bits = codebook_size * 32 - /// codebook = tf.get_variable('codebook', [x.shape[-1].value, codebook_bits], - /// dtype=x.dtype, - /// initializer=tf.orthogonal_initializer()) - /// codes = compare_and_threshold(tf.matmul(x, codebook), threshold=0.) - /// codes = tf.bitcast(codes, tf.int32) # go from uint8 to int32 - /// # now codes has shape x.shape[:-1] + [codebook_size] - /// ``` - /// - /// **NOTE**: Currently, the innermost dimension of the tensor must be divisible - /// by 8. - /// - /// Given an `input` shaped `[s0, s1, ..., s_n]`, the output is - /// a `uint8` tensor shaped `[s0, s1, ..., s_n / 8]`. - /// - /// - Parameters: - /// - input: Values to compare against `threshold` and bitpack. - /// - threshold: Threshold to compare against. - /// - /// - Attr T: The type of the input and threshold. - /// - /// - Output output: The bitpacked comparisons. - @inlinable @inline(__always) - public static func compareAndBitpack( - _ input: Tensor, - threshold: Tensor - ) -> Tensor { - switch commonBackend(input.handle.backend, threshold.handle.backend) { - case .XLA: - let output_device = threshold.device - let input = Tensor(copying: input, to: .defaultTFEager) - let threshold = Tensor(copying: threshold, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.compareAndBitpack(input, threshold: threshold), to: output_device) - case .TF_EAGER: - return _RawTFEager.compareAndBitpack(input, threshold: threshold) - } - - } - - /// Converts two real numbers to a complex number. - /// - /// Given a tensor `real` representing the real part of a complex number, and a - /// tensor `imag` representing the imaginary part of a complex number, this - /// operation returns complex numbers elementwise of the form \\(a + bj\\), where - /// *a* represents the `real` part and *b* represents the `imag` part. - /// - /// The input tensors `real` and `imag` must have the same shape. - /// - /// For example: - /// - /// ``` - /// # tensor 'real' is [2.25, 3.25] - /// # tensor `imag` is [4.75, 5.75] - /// tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] - /// ``` - @inlinable @inline(__always) - public static func complex< - T: FloatingPoint & TensorFlowScalar, - Tout: TensorFlowScalar - >( - real: Tensor, - imag: Tensor - ) -> Tensor { - switch commonBackend(real.handle.backend, imag.handle.backend) { - case .XLA: - let output_device = imag.device - let real = Tensor(copying: real, to: .defaultTFEager) - let imag = Tensor(copying: imag, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.complex(real: real, imag: imag), to: output_device) - case .TF_EAGER: - return _RawTFEager.complex(real: real, imag: imag) - } - - } - - /// Computes the complex absolute value of a tensor. - /// - /// Given a tensor `x` of complex numbers, this operation returns a tensor of type - /// `float` or `double` that is the absolute value of each element in `x`. All - /// elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute - /// value is computed as \\( \sqrt{a^2 + b^2}\\). - @inlinable @inline(__always) - public static func complexAbs< - T: TensorFlowScalar, - Tout: FloatingPoint & TensorFlowScalar - >( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - let output_device = x.device - let x = Tensor(copying: x, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.complexAbs(x), to: output_device) - case .TF_EAGER: - return _RawTFEager.complexAbs(x) - } - - } - - @inlinable @inline(__always) - public static func complexStruct( - nA: Int64, - nB: Int64 - ) -> (a: [Tensor], b: [Tensor], c: TC) { - _RawTFEager.complexStruct(nA: nA, nB: nB) - } - - /// Computes the ids of the positions in sampled_candidates that match true_labels. - /// - /// When doing log-odds NCE, the result of this op should be passed through a - /// SparseToDense op, then added to the logits of the sampled candidates. This has - /// the effect of 'removing' the sampled labels that match the true labels by - /// making the classifier sure that they are sampled labels. - /// - /// - Parameters: - /// - true_classes: The true_classes output of UnpackSparseLabels. - /// - sampled_candidates: The sampled_candidates output of CandidateSampler. - /// - /// - Attrs: - /// - num_true: Number of true labels per context. - /// - seed: If either seed or seed2 are set to be non-zero, the random number - /// generator is seeded by the given seed. Otherwise, it is seeded by a - /// random seed. - /// - seed2: An second seed to avoid seed collision. - /// - /// - Outputs: - /// - indices: A vector of indices corresponding to rows of true_candidates. - /// - ids: A vector of IDs of positions in sampled_candidates that match a true_label - /// for the row with the corresponding index in indices. - /// - weights: A vector of the same length as indices and ids, in which each element - /// is -FLOAT_MAX. - @inlinable @inline(__always) - public static func computeAccidentalHits( - trueClasses: Tensor, - sampledCandidates: Tensor, - numTrue: Int64, - seed: Int64 = 0, - seed2: Int64 = 0 - ) -> (indices: Tensor, ids: Tensor, weights: Tensor) { - _RawTFEager.computeAccidentalHits( - trueClasses: trueClasses, sampledCandidates: sampledCandidates, numTrue: numTrue, - seed: seed, seed2: seed2) + } + + /// Adjust the hue of one or more images. + /// + /// `images` is a tensor of at least 3 dimensions. The last dimension is + /// interpretted as channels, and must be three. + /// + /// The input image is considered in the RGB colorspace. Conceptually, the RGB + /// colors are first mapped into HSV. A delta is then applied all the hue values, + /// and then remapped back to RGB colorspace. + /// + /// - Parameters: + /// - images: Images to adjust. At least 3-D. + /// - delta: A float delta to add to the hue. + /// + /// - Output output: The hue-adjusted image or images. + @inlinable @inline(__always) + public static func adjustHue( + images: Tensor, + delta: Tensor + ) -> Tensor { + switch commonBackend(images.handle.backend, delta.handle.backend) { + case .XLA: + let output_device = delta.device + let images = Tensor(copying: images, to: .defaultTFEager) + let delta = Tensor(copying: delta, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.adjustHue(images: images, delta: delta), to: output_device) + case .TF_EAGER: + return _RawTFEager.adjustHue(images: images, delta: delta) } - /// Concatenates tensors along one dimension. - /// - /// - Parameters: - /// - concat_dim: 0-D. The dimension along which to concatenate. Must be in the - /// range [0, rank(values)). - /// - values: The `N` Tensors to concatenate. Their ranks and types must match, - /// and their sizes must match in all dimensions except `concat_dim`. - /// - /// - Output output: A `Tensor` with the concatenation of values stacked along the - /// `concat_dim` dimension. This tensor's shape matches that of `values` except - /// in `concat_dim` where it has the sum of the sizes. - @inlinable @inline(__always) - public static func concat( - concatDim: Tensor, - _ values: [Tensor] - ) -> Tensor { - switch commonBackend(concatDim.handle.backend, commonBackend(values)) { - case .XLA: - let output_device = concatDim.device - let concatDim = Tensor(copying: concatDim, to: .defaultTFEager) - let values = [Tensor](copying: values, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.concat(concatDim: concatDim, values), to: output_device) - case .TF_EAGER: - return _RawTFEager.concat(concatDim: concatDim, values) - } - - } - - /// Computes offsets of concat inputs within its output. - /// - /// For example: - /// - /// ``` - /// # 'x' is [2, 2, 7] - /// # 'y' is [2, 3, 7] - /// # 'z' is [2, 5, 7] - /// concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0] - /// ``` - /// - /// This is typically used by gradient computations for a concat operation. - /// - /// - Parameters: - /// - concat_dim: The dimension along which to concatenate. - /// - shape: The `N` int32 vectors representing shape of tensors being concatenated. - /// - /// - Output offset: The `N` int32 vectors representing the starting offset - /// of input tensors within the concatenated output. - @inlinable @inline(__always) - public static func concatOffset( - concatDim: Tensor, - shape: [Tensor] - ) -> [Tensor] { - switch commonBackend(concatDim.handle.backend, commonBackend(shape)) { - case .XLA: - let output_device = concatDim.device - let concatDim = Tensor(copying: concatDim, to: .defaultTFEager) - let shape = [Tensor](copying: shape, to: .defaultTFEager) - return [Tensor]( - copying: _RawTFEager.concatOffset(concatDim: concatDim, shape: shape), to: output_device) - case .TF_EAGER: - return _RawTFEager.concatOffset(concatDim: concatDim, shape: shape) - } - - } - - /// Concatenates tensors along one dimension. - /// - /// - Parameters: - /// - values: List of `N` Tensors to concatenate. Their ranks and types must match, - /// and their sizes must match in all dimensions except `concat_dim`. - /// - axis: 0-D. The dimension along which to concatenate. Must be in the - /// range [-rank(values), rank(values)). - /// - /// - Output output: A `Tensor` with the concatenation of values stacked along the - /// `concat_dim` dimension. This tensor's shape matches that of `values` except - /// in `concat_dim` where it has the sum of the sizes. - @inlinable @inline(__always) - public static func concatV2< - T: TensorFlowScalar, - Tidx: TensorFlowIndex - >( - _ values: [Tensor], - axis: Tensor - ) -> Tensor { - switch commonBackend(commonBackend(values), axis.handle.backend) { - case .XLA: - return _RawXLA.concatV2(values, axis: axis) - case .TF_EAGER: - return _RawTFEager.concatV2(values, axis: axis) - } - - } - - /// Creates a dataset that concatenates `input_dataset` with `another_dataset`. - @inlinable @inline(__always) - public static func concatenateDataset( - inputDataset: VariantHandle, - anotherDataset: VariantHandle, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.concatenateDataset( - inputDataset: inputDataset, anotherDataset: anotherDataset, outputTypes: outputTypes, - outputShapes: outputShapes) - } - - /// Sets up the centralized structures for a distributed TPU system. - /// - /// - Attrs: - /// - embedding_config: Reserved. Do not use. - /// - tpu_embedding_config: Serialized tensorflow.tpu.TPUEmbeddingConfiguration that - /// describes the embedding lookups of the program. - /// - is_global_init: Reserved. Do not use. - /// - /// - Output topology: A serialized tensorflow.tpu.TopologyProto that describes the TPU - /// topology. - @inlinable @inline(__always) - public static func configureDistributedTPU( - embeddingConfig: String, - tpuEmbeddingConfig: String, - isGlobalInit: Bool = false, - enableWholeMeshCompilations: Bool = false, - compilationFailureClosesChips: Bool = true - ) -> StringTensor { - _RawTFEager.configureDistributedTPU( - embeddingConfig: embeddingConfig, tpuEmbeddingConfig: tpuEmbeddingConfig, - isGlobalInit: isGlobalInit, enableWholeMeshCompilations: enableWholeMeshCompilations, - compilationFailureClosesChips: compilationFailureClosesChips) - } - - /// Sets up TPUEmbedding in a distributed TPU system. - /// - /// - Attr config: Serialized tensorflow.tpu.TPUEmbeddingConfiguration that - /// describes the embedding lookups of the program. - @inlinable @inline(__always) - public static func configureTPUEmbedding( - config: String - ) { - _RawTFEager.configureTPUEmbedding(config: config) - } - - /// Returns the complex conjugate of a complex number. - /// - /// Given a tensor `input` of complex numbers, this operation returns a tensor of - /// complex numbers that are the complex conjugate of each element in `input`. The - /// complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the - /// real part and *b* is the imaginary part. - /// - /// The complex conjugate returned by this operation is of the form \\(a - bj\\). - /// - /// For example: - /// - /// ``` - /// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] - /// tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] - /// ``` - @inlinable @inline(__always) - public static func conj( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.conj(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.conj(input) - } - - } - - /// Shuffle dimensions of x according to a permutation and conjugate the result. - /// - /// The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy: - /// `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]` - /// `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])` - @inlinable @inline(__always) - public static func conjugateTranspose< - T: TensorFlowScalar, - Tperm: TensorFlowIndex - >( - _ x: Tensor, - perm: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, perm.handle.backend) { - case .XLA: - let output_device = perm.device - let x = Tensor(copying: x, to: .defaultTFEager) - let perm = Tensor(copying: perm, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.conjugateTranspose(x, perm: perm), to: output_device) - case .TF_EAGER: - return _RawTFEager.conjugateTranspose(x, perm: perm) - } - - } - - @inlinable @inline(__always) - public static func constructionFails() { - _RawTFEager.constructionFails() - } - - /// This op consumes a lock created by `MutexLock`. - /// - /// This op exists to consume a tensor created by `MutexLock` (other than - /// direct control dependencies). It should be the only that consumes the tensor, - /// and will raise an error if it is not. Its only purpose is to keep the - /// mutex lock tensor alive until it is consumed by this op. - /// - /// **NOTE**: This operation must run on the same device as its input. This may - /// be enforced via the `colocate_with` mechanism. - /// - /// - Parameter mutex_lock: A tensor returned by `MutexLock`. - @inlinable @inline(__always) - public static func consumeMutexLock( - mutexLock: VariantHandle - ) { - _RawTFEager.consumeMutexLock(mutexLock: mutexLock) - } - - /// Does nothing. Serves as a control trigger for scheduling. - /// - /// Only useful as a placeholder for control edges. - @inlinable @inline(__always) - public static func controlTrigger() { - _RawTFEager.controlTrigger() - } - - /// Computes a 2-D convolution given 4-D `input` and `filter` tensors. - /// - /// Given an input tensor of shape `[batch, in_height, in_width, in_channels]` - /// and a filter / kernel tensor of shape - /// `[filter_height, filter_width, in_channels, out_channels]`, this op - /// performs the following: - /// - /// 1. Flattens the filter to a 2-D matrix with shape - /// `[filter_height * filter_width * in_channels, output_channels]`. - /// 2. Extracts image patches from the input tensor to form a *virtual* - /// tensor of shape `[batch, out_height, out_width, - /// filter_height * filter_width * in_channels]`. - /// 3. For each patch, right-multiplies the filter matrix and the image patch - /// vector. - /// - /// In detail, with the default NHWC format, - /// - /// output[b, i, j, k] = - /// sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * - /// filter[di, dj, q, k] - /// - /// Must have `strides[0] = strides[3] = 1`. For the most common case of the same - /// horizontal and vertices strides, `strides = [1, stride, stride, 1]`. - /// - /// - Parameters: - /// - input: A 4-D tensor. The dimension order is interpreted according to the value - /// of `data_format`, see below for details. - /// - filter: A 4-D tensor of shape - /// `[filter_height, filter_width, in_channels, out_channels]` - /// - /// - Attrs: - /// - strides: 1-D tensor of length 4. The stride of the sliding window for each - /// dimension of `input`. The dimension order is determined by the value of - /// `data_format`, see below for details. - /// - padding: The type of padding algorithm to use. - /// - explicit_paddings: If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith - /// dimension, the amount of padding inserted before and after the dimension is - /// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If - /// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. - /// - data_format: Specify the data format of the input and output data. With the - /// default format "NHWC", the data is stored in the order of: - /// [batch, height, width, channels]. - /// Alternatively, the format could be "NCHW", the data storage order of: - /// [batch, channels, height, width]. - /// - dilations: 1-D tensor of length 4. The dilation factor for each dimension of - /// `input`. If set to k > 1, there will be k-1 skipped cells between each - /// filter element on that dimension. The dimension order is determined by the - /// value of `data_format`, see above for details. Dilations in the batch and - /// depth dimensions must be 1. - /// - /// - Output output: A 4-D tensor. The dimension order is determined by the value of - /// `data_format`, see below for details. - @inlinable @inline(__always) - public static func conv2D( - _ input: Tensor, - filter: Tensor, - strides: [Int32], - useCudnnOnGpu: Bool = true, - padding: Padding1, - explicitPaddings: [Int32], - dataFormat: DataFormat = .nhwc, - dilations: [Int32] = [1, 1, 1, 1] - ) -> Tensor { - switch commonBackend(input.handle.backend, filter.handle.backend) { - case .XLA: - return _RawXLA.conv2D( - input, filter: filter, strides: strides, useCudnnOnGpu: useCudnnOnGpu, padding: padding, - explicitPaddings: explicitPaddings, dataFormat: dataFormat, dilations: dilations) - case .TF_EAGER: - return _RawTFEager.conv2D( - input, filter: filter, strides: strides, useCudnnOnGpu: useCudnnOnGpu, padding: padding, - explicitPaddings: explicitPaddings, dataFormat: dataFormat, dilations: dilations) - } - - } - - /// Computes the gradients of convolution with respect to the filter. - /// - /// - Parameters: - /// - input: 4-D with shape `[batch, in_height, in_width, in_channels]`. - /// - filter_sizes: An integer vector representing the tensor shape of `filter`, - /// where `filter` is a 4-D - /// `[filter_height, filter_width, in_channels, out_channels]` tensor. - /// - out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. - /// Gradients w.r.t. the output of the convolution. - /// - /// - Attrs: - /// - strides: The stride of the sliding window for each dimension of the input - /// of the convolution. Must be in the same order as the dimension specified with - /// format. - /// - padding: The type of padding algorithm to use. - /// - explicit_paddings: If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith - /// dimension, the amount of padding inserted before and after the dimension is - /// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If - /// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. - /// - data_format: Specify the data format of the input and output data. With the - /// default format "NHWC", the data is stored in the order of: - /// [batch, in_height, in_width, in_channels]. - /// Alternatively, the format could be "NCHW", the data storage order of: - /// [batch, in_channels, in_height, in_width]. - /// - dilations: 1-D tensor of length 4. The dilation factor for each dimension of - /// `input`. If set to k > 1, there will be k-1 skipped cells between each filter - /// element on that dimension. The dimension order is determined by the value of - /// `data_format`, see above for details. Dilations in the batch and depth - /// dimensions must be 1. - /// - /// - Output output: 4-D with shape - /// `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t. - /// the `filter` input of the convolution. - @inlinable @inline(__always) - public static func conv2DBackpropFilter( - _ input: Tensor, - filterSizes: Tensor, - outBackprop: Tensor, - strides: [Int32], - useCudnnOnGpu: Bool = true, - padding: Padding1, - explicitPaddings: [Int32], - dataFormat: DataFormat = .nhwc, - dilations: [Int32] = [1, 1, 1, 1] - ) -> Tensor { - switch commonBackend( - commonBackend(input.handle.backend, filterSizes.handle.backend), outBackprop.handle.backend) - { - case .XLA: - return _RawXLA.conv2DBackpropFilter( - input, filterSizes: filterSizes, outBackprop: outBackprop, strides: strides, - useCudnnOnGpu: useCudnnOnGpu, padding: padding, explicitPaddings: explicitPaddings, - dataFormat: dataFormat, dilations: dilations) - case .TF_EAGER: - return _RawTFEager.conv2DBackpropFilter( - input, filterSizes: filterSizes, outBackprop: outBackprop, strides: strides, - useCudnnOnGpu: useCudnnOnGpu, padding: padding, explicitPaddings: explicitPaddings, - dataFormat: dataFormat, dilations: dilations) - } - - } - - /// Computes the gradients of convolution with respect to the input. - /// - /// - Parameters: - /// - input_sizes: An integer vector representing the shape of `input`, - /// where `input` is a 4-D `[batch, height, width, channels]` tensor. - /// - filter: 4-D with shape - /// `[filter_height, filter_width, in_channels, out_channels]`. - /// - out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. - /// Gradients w.r.t. the output of the convolution. - /// - /// - Attrs: - /// - strides: The stride of the sliding window for each dimension of the input - /// of the convolution. Must be in the same order as the dimension specified with - /// format. - /// - padding: The type of padding algorithm to use. - /// - explicit_paddings: If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith - /// dimension, the amount of padding inserted before and after the dimension is - /// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If - /// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. - /// - data_format: Specify the data format of the input and output data. With the - /// default format "NHWC", the data is stored in the order of: - /// [batch, in_height, in_width, in_channels]. - /// Alternatively, the format could be "NCHW", the data storage order of: - /// [batch, in_channels, in_height, in_width]. - /// - dilations: 1-D tensor of length 4. The dilation factor for each dimension of - /// `input`. If set to k > 1, there will be k-1 skipped cells between each filter - /// element on that dimension. The dimension order is determined by the value of - /// `data_format`, see above for details. Dilations in the batch and depth - /// dimensions must be 1. - /// - /// - Output output: 4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient - /// w.r.t. the input of the convolution. - @inlinable @inline(__always) - public static func conv2DBackpropInput( - inputSizes: Tensor, - filter: Tensor, - outBackprop: Tensor, - strides: [Int32], - useCudnnOnGpu: Bool = true, - padding: Padding1, - explicitPaddings: [Int32], - dataFormat: DataFormat = .nhwc, - dilations: [Int32] = [1, 1, 1, 1] - ) -> Tensor { - switch commonBackend( - commonBackend(inputSizes.handle.backend, filter.handle.backend), outBackprop.handle.backend) - { - case .XLA: - return _RawXLA.conv2DBackpropInput( - inputSizes: inputSizes, filter: filter, outBackprop: outBackprop, strides: strides, - useCudnnOnGpu: useCudnnOnGpu, padding: padding, explicitPaddings: explicitPaddings, - dataFormat: dataFormat, dilations: dilations) - case .TF_EAGER: - return _RawTFEager.conv2DBackpropInput( - inputSizes: inputSizes, filter: filter, outBackprop: outBackprop, strides: strides, - useCudnnOnGpu: useCudnnOnGpu, padding: padding, explicitPaddings: explicitPaddings, - dataFormat: dataFormat, dilations: dilations) - } - - } - - /// Computes a 3-D convolution given 5-D `input` and `filter` tensors. - /// - /// In signal processing, cross-correlation is a measure of similarity of - /// two waveforms as a function of a time-lag applied to one of them. This - /// is also known as a sliding dot product or sliding inner-product. - /// - /// Our Conv3D implements a form of cross-correlation. - /// - /// - Parameters: - /// - input: Shape `[batch, in_depth, in_height, in_width, in_channels]`. - /// - filter: Shape `[filter_depth, filter_height, filter_width, in_channels, - /// out_channels]`. `in_channels` must match between `input` and `filter`. - /// - /// - Attrs: - /// - strides: 1-D tensor of length 5. The stride of the sliding window for each - /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. - /// - padding: The type of padding algorithm to use. - /// - data_format: The data format of the input and output data. With the - /// default format "NDHWC", the data is stored in the order of: - /// [batch, in_depth, in_height, in_width, in_channels]. - /// Alternatively, the format could be "NCDHW", the data storage order is: - /// [batch, in_channels, in_depth, in_height, in_width]. - /// - dilations: 1-D tensor of length 5. The dilation factor for each dimension of - /// `input`. If set to k > 1, there will be k-1 skipped cells between each - /// filter element on that dimension. The dimension order is determined by the - /// value of `data_format`, see above for details. Dilations in the batch and - /// depth dimensions must be 1. - @inlinable @inline(__always) - public static func conv3D( - _ input: Tensor, - filter: Tensor, - strides: [Int32], - padding: Padding, - dataFormat: DataFormat1 = .ndhwc, - dilations: [Int32] = [1, 1, 1, 1, 1] - ) -> Tensor { - switch commonBackend(input.handle.backend, filter.handle.backend) { - case .XLA: - return _RawXLA.conv3D( - input, filter: filter, strides: strides, padding: padding, dataFormat: dataFormat, - dilations: dilations) - case .TF_EAGER: - return _RawTFEager.conv3D( - input, filter: filter, strides: strides, padding: padding, dataFormat: dataFormat, - dilations: dilations) - } - - } - - /// Computes the gradients of 3-D convolution with respect to the filter. - /// - /// - Parameters: - /// - input: Shape `[batch, depth, rows, cols, in_channels]`. - /// - filter: Shape `[depth, rows, cols, in_channels, out_channels]`. - /// `in_channels` must match between `input` and `filter`. - /// - out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, - /// out_channels]`. - /// - /// - Attrs: - /// - strides: 1-D tensor of length 5. The stride of the sliding window for each - /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. - /// - padding: The type of padding algorithm to use. - @inlinable @inline(__always) - public static func conv3DBackpropFilter( - _ input: Tensor, - filter: Tensor, - outBackprop: Tensor, - strides: [Int32], - padding: Padding, - dilations: [Int32] = [1, 1, 1, 1, 1] - ) -> Tensor { - switch commonBackend( - commonBackend(input.handle.backend, filter.handle.backend), outBackprop.handle.backend) - { - case .XLA: - let output_device = outBackprop.device - let input = Tensor(copying: input, to: .defaultTFEager) - let filter = Tensor(copying: filter, to: .defaultTFEager) - let outBackprop = Tensor(copying: outBackprop, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.conv3DBackpropFilter( - input, filter: filter, outBackprop: outBackprop, strides: strides, padding: padding, - dilations: dilations), to: output_device) - case .TF_EAGER: - return _RawTFEager.conv3DBackpropFilter( - input, filter: filter, outBackprop: outBackprop, strides: strides, padding: padding, - dilations: dilations) - } - - } - - /// Computes the gradients of 3-D convolution with respect to the filter. - /// - /// - Parameters: - /// - input: Shape `[batch, depth, rows, cols, in_channels]`. - /// - filter_sizes: An integer vector representing the tensor shape of `filter`, - /// where `filter` is a 5-D - /// `[filter_depth, filter_height, filter_width, in_channels, out_channels]` - /// tensor. - /// - out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, - /// out_channels]`. - /// - /// - Attrs: - /// - strides: 1-D tensor of length 5. The stride of the sliding window for each - /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. - /// - padding: The type of padding algorithm to use. - /// - data_format: The data format of the input and output data. With the - /// default format "NDHWC", the data is stored in the order of: - /// [batch, in_depth, in_height, in_width, in_channels]. - /// Alternatively, the format could be "NCDHW", the data storage order is: - /// [batch, in_channels, in_depth, in_height, in_width]. - /// - dilations: 1-D tensor of length 5. The dilation factor for each dimension of - /// `input`. If set to k > 1, there will be k-1 skipped cells between each - /// filter element on that dimension. The dimension order is determined by the - /// value of `data_format`, see above for details. Dilations in the batch and - /// depth dimensions must be 1. - @inlinable @inline(__always) - public static func conv3DBackpropFilterV2( - _ input: Tensor, - filterSizes: Tensor, - outBackprop: Tensor, - strides: [Int32], - padding: Padding, - dataFormat: DataFormat1 = .ndhwc, - dilations: [Int32] = [1, 1, 1, 1, 1] - ) -> Tensor { - switch commonBackend( - commonBackend(input.handle.backend, filterSizes.handle.backend), outBackprop.handle.backend) - { - case .XLA: - return _RawXLA.conv3DBackpropFilterV2( - input, filterSizes: filterSizes, outBackprop: outBackprop, strides: strides, - padding: padding, dataFormat: dataFormat, dilations: dilations) - case .TF_EAGER: - return _RawTFEager.conv3DBackpropFilterV2( - input, filterSizes: filterSizes, outBackprop: outBackprop, strides: strides, - padding: padding, dataFormat: dataFormat, dilations: dilations) - } - - } - - /// Computes the gradients of 3-D convolution with respect to the input. - /// - /// - Parameters: - /// - input: Shape `[batch, depth, rows, cols, in_channels]`. - /// - filter: Shape `[depth, rows, cols, in_channels, out_channels]`. - /// `in_channels` must match between `input` and `filter`. - /// - out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, - /// out_channels]`. - /// - /// - Attrs: - /// - strides: 1-D tensor of length 5. The stride of the sliding window for each - /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. - /// - padding: The type of padding algorithm to use. - @inlinable @inline(__always) - public static func conv3DBackpropInput( - _ input: Tensor, - filter: Tensor, - outBackprop: Tensor, - strides: [Int32], - padding: Padding, - dilations: [Int32] = [1, 1, 1, 1, 1] - ) -> Tensor { - switch commonBackend( - commonBackend(input.handle.backend, filter.handle.backend), outBackprop.handle.backend) - { - case .XLA: - let output_device = outBackprop.device - let input = Tensor(copying: input, to: .defaultTFEager) - let filter = Tensor(copying: filter, to: .defaultTFEager) - let outBackprop = Tensor(copying: outBackprop, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.conv3DBackpropInput( - input, filter: filter, outBackprop: outBackprop, strides: strides, padding: padding, - dilations: dilations), to: output_device) - case .TF_EAGER: - return _RawTFEager.conv3DBackpropInput( - input, filter: filter, outBackprop: outBackprop, strides: strides, padding: padding, - dilations: dilations) - } - - } - - /// Computes the gradients of 3-D convolution with respect to the input. - /// - /// - Parameters: - /// - input_sizes: An integer vector representing the tensor shape of `input`, - /// where `input` is a 5-D - /// `[batch, depth, rows, cols, in_channels]` tensor. - /// - filter: Shape `[depth, rows, cols, in_channels, out_channels]`. - /// `in_channels` must match between `input` and `filter`. - /// - out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, - /// out_channels]`. - /// - /// - Attrs: - /// - strides: 1-D tensor of length 5. The stride of the sliding window for each - /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. - /// - padding: The type of padding algorithm to use. - /// - data_format: The data format of the input and output data. With the - /// default format "NDHWC", the data is stored in the order of: - /// [batch, in_depth, in_height, in_width, in_channels]. - /// Alternatively, the format could be "NCDHW", the data storage order is: - /// [batch, in_channels, in_depth, in_height, in_width]. - /// - dilations: 1-D tensor of length 5. The dilation factor for each dimension of - /// `input`. If set to k > 1, there will be k-1 skipped cells between each - /// filter element on that dimension. The dimension order is determined by the - /// value of `data_format`, see above for details. Dilations in the batch and - /// depth dimensions must be 1. - @inlinable @inline(__always) - public static func conv3DBackpropInputV2< - T: FloatingPoint & TensorFlowScalar, - Tshape: TensorFlowIndex - >( - inputSizes: Tensor, - filter: Tensor, - outBackprop: Tensor, - strides: [Int32], - padding: Padding, - dataFormat: DataFormat1 = .ndhwc, - dilations: [Int32] = [1, 1, 1, 1, 1] - ) -> Tensor { - switch commonBackend( - commonBackend(inputSizes.handle.backend, filter.handle.backend), outBackprop.handle.backend) - { - case .XLA: - return _RawXLA.conv3DBackpropInputV2( - inputSizes: inputSizes, filter: filter, outBackprop: outBackprop, strides: strides, - padding: padding, dataFormat: dataFormat, dilations: dilations) - case .TF_EAGER: - return _RawTFEager.conv3DBackpropInputV2( - inputSizes: inputSizes, filter: filter, outBackprop: outBackprop, strides: strides, - padding: padding, dataFormat: dataFormat, dilations: dilations) - } - - } - - /// Copy a tensor from CPU-to-CPU or GPU-to-GPU. - /// - /// Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the - /// device on which the tensor is allocated. - /// N.B.: If the all downstream attached debug ops are disabled given the current - /// gRPC gating status, the output will simply forward the input tensor without - /// deep-copying. See the documentation of Debug* ops for more details. - /// - /// Unlike the CopyHost Op, this op does not have HostMemory constraint on its - /// input or output. - /// - /// - Parameter input: Input tensor. - /// - /// - Attrs: - /// - tensor_name: The name of the input tensor. - /// - debug_ops_spec: A list of debug op spec (op, url, gated_grpc) for attached debug - /// ops. Each element of the list has the format - /// ;;, wherein gated_grpc is boolean represented - /// as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1", - /// "DebugIdentity;file:///tmp/tfdbg_1;0". - @inlinable @inline(__always) - public static func copy( - _ input: Tensor, - tensorName: String, - debugOpsSpec: [String] - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.copy(input, tensorName: tensorName, debugOpsSpec: debugOpsSpec), - to: output_device) - case .TF_EAGER: - return _RawTFEager.copy(input, tensorName: tensorName, debugOpsSpec: debugOpsSpec) - } - - } - - /// Copy a tensor to host. - /// - /// Performs CPU-to-CPU deep-copying of tensor. - /// N.B.: If the all downstream attached debug ops are disabled given the current - /// gRPC gating status, the output will simply forward the input tensor without - /// deep-copying. See the documentation of Debug* ops for more details. - /// - /// Unlike the Copy Op, this op has HostMemory constraint on its input or output. - /// - /// - Parameter input: Input tensor. - /// - /// - Attrs: - /// - tensor_name: The name of the input tensor. - /// - debug_ops_spec: A list of debug op spec (op, url, gated_grpc) for attached debug - /// ops. Each element of the list has the format - /// ;;, wherein gated_grpc is boolean represented - /// as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1", - /// "DebugIdentity;file:///tmp/tfdbg_1;0". - @inlinable @inline(__always) - public static func copyHost( - _ input: Tensor, - tensorName: String, - debugOpsSpec: [String] - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.copyHost(input, tensorName: tensorName, debugOpsSpec: debugOpsSpec), - to: output_device) - case .TF_EAGER: - return _RawTFEager.copyHost(input, tensorName: tensorName, debugOpsSpec: debugOpsSpec) - } - - } - - @inlinable @inline(__always) - public static func copyOp( - _ a: Tensor - ) -> Tensor { - switch a.handle.backend { - case .XLA: - let output_device = a.device - let a = Tensor(copying: a, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.copyOp(a), to: output_device) - case .TF_EAGER: - return _RawTFEager.copyOp(a) - } - - } - - /// Computes cos of x element-wise. - /// - /// Given an input tensor, this function computes cosine of every - /// element in the tensor. Input range is `(-inf, inf)` and - /// output range is `[-1,1]`. If input lies outside the boundary, `nan` - /// is returned. - /// - /// ```python - /// x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")]) - /// tf.math.cos(x) ==> [nan -0.91113025 0.87758255 0.5403023 0.36235774 0.48718765 -0.95215535 nan] - /// ``` - @inlinable @inline(__always) - public static func cos( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.cos(x) - case .TF_EAGER: - return _RawTFEager.cos(x) - } - - } - - /// Computes hyperbolic cosine of x element-wise. - /// - /// Given an input tensor, this function computes hyperbolic cosine of every - /// element in the tensor. Input range is `[-inf, inf]` and output range - /// is `[1, inf]`. - /// - /// ```python - /// x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) - /// tf.math.cosh(x) ==> [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 3.7621956e+00 1.1013233e+04 inf] - /// ``` - @inlinable @inline(__always) - public static func cosh( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.cosh(x) - case .TF_EAGER: - return _RawTFEager.cosh(x) - } - - } - - @inlinable @inline(__always) - public static func createSummaryDbWriter( - writer: ResourceHandle, - dbUri: StringTensor, - experimentName: StringTensor, - runName: StringTensor, - userName: StringTensor - ) { - _RawTFEager.createSummaryDbWriter( - writer: writer, dbUri: dbUri, experimentName: experimentName, runName: runName, - userName: userName) - } - - @inlinable @inline(__always) - public static func createSummaryFileWriter( - writer: ResourceHandle, - logdir: StringTensor, - maxQueue: Tensor, - flushMillis: Tensor, - filenameSuffix: StringTensor - ) { - _RawTFEager.createSummaryFileWriter( - writer: writer, logdir: logdir, maxQueue: maxQueue, flushMillis: flushMillis, - filenameSuffix: filenameSuffix) - } - - @inlinable @inline(__always) - public static func createTRTResourceHandle( - resourceName: String - ) -> ResourceHandle { - _RawTFEager.createTRTResourceHandle(resourceName: resourceName) - } - - /// Extracts crops from the input image tensor and resizes them. - /// - /// Extracts crops from the input image tensor and resizes them using bilinear - /// sampling or nearest neighbor sampling (possibly with aspect ratio change) to a - /// common output size specified by `crop_size`. This is more general than the - /// `crop_to_bounding_box` op which extracts a fixed size slice from the input image - /// and does not allow resizing or aspect ratio change. - /// - /// Returns a tensor with `crops` from the input `image` at positions defined at the - /// bounding box locations in `boxes`. The cropped boxes are all resized (with - /// bilinear or nearest neighbor interpolation) to a fixed - /// `size = [crop_height, crop_width]`. The result is a 4-D tensor - /// `[num_boxes, crop_height, crop_width, depth]`. The resizing is corner aligned. - /// In particular, if `boxes = [[0, 0, 1, 1]]`, the method will give identical - /// results to using `tf.image.resize_bilinear()` or - /// `tf.image.resize_nearest_neighbor()`(depends on the `method` argument) with - /// `align_corners=True`. - /// - /// - Parameters: - /// - image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. - /// Both `image_height` and `image_width` need to be positive. - /// - boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor - /// specifies the coordinates of a box in the `box_ind[i]` image and is specified - /// in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of - /// `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the - /// `[0, 1]` interval of normalized image height is mapped to - /// `[0, image_height - 1]` in image height coordinates. We do allow `y1` > `y2`, in - /// which case the sampled crop is an up-down flipped version of the original - /// image. The width dimension is treated similarly. Normalized coordinates - /// outside the `[0, 1]` range are allowed, in which case we use - /// `extrapolation_value` to extrapolate the input image values. - /// - box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. - /// The value of `box_ind[i]` specifies the image that the `i`-th box refers to. - /// - crop_size: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All - /// cropped image patches are resized to this size. The aspect ratio of the image - /// content is not preserved. Both `crop_height` and `crop_width` need to be - /// positive. - /// - /// - Attrs: - /// - method: A string specifying the sampling method for resizing. It can be either - /// `"bilinear"` or `"nearest"` and default to `"bilinear"`. Currently two sampling - /// methods are supported: Bilinear and Nearest Neighbor. - /// - extrapolation_value: Value used for extrapolation, when applicable. - /// - /// - Output crops: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. - @inlinable @inline(__always) - public static func cropAndResize( - image: Tensor, - boxes: Tensor, - boxInd: Tensor, - cropSize: Tensor, - method: Method = .bilinear, - extrapolationValue: Double = 0 - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(image.handle.backend, boxes.handle.backend), boxInd.handle.backend), - cropSize.handle.backend) - { - case .XLA: - let output_device = cropSize.device - let image = Tensor(copying: image, to: .defaultTFEager) - let boxes = Tensor(copying: boxes, to: .defaultTFEager) - let boxInd = Tensor(copying: boxInd, to: .defaultTFEager) - let cropSize = Tensor(copying: cropSize, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.cropAndResize( - image: image, boxes: boxes, boxInd: boxInd, cropSize: cropSize, method: method, - extrapolationValue: extrapolationValue), to: output_device) - case .TF_EAGER: - return _RawTFEager.cropAndResize( - image: image, boxes: boxes, boxInd: boxInd, cropSize: cropSize, method: method, - extrapolationValue: extrapolationValue) - } - - } - - /// Computes the gradient of the crop_and_resize op wrt the input boxes tensor. - /// - /// - Parameters: - /// - grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. - /// - image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. - /// Both `image_height` and `image_width` need to be positive. - /// - boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor - /// specifies the coordinates of a box in the `box_ind[i]` image and is specified - /// in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of - /// `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the - /// `[0, 1]` interval of normalized image height is mapped to - /// `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in - /// which case the sampled crop is an up-down flipped version of the original - /// image. The width dimension is treated similarly. Normalized coordinates - /// outside the `[0, 1]` range are allowed, in which case we use - /// `extrapolation_value` to extrapolate the input image values. - /// - box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. - /// The value of `box_ind[i]` specifies the image that the `i`-th box refers to. - /// - /// - Attr method: A string specifying the interpolation method. Only 'bilinear' is - /// supported for now. - /// - /// - Output output: A 2-D tensor of shape `[num_boxes, 4]`. - @inlinable @inline(__always) - public static func cropAndResizeGradBoxes( - grads: Tensor, - image: Tensor, - boxes: Tensor, - boxInd: Tensor, - method: Method1 = .bilinear - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(grads.handle.backend, image.handle.backend), boxes.handle.backend), - boxInd.handle.backend) - { - case .XLA: - let output_device = boxInd.device - let grads = Tensor(copying: grads, to: .defaultTFEager) - let image = Tensor(copying: image, to: .defaultTFEager) - let boxes = Tensor(copying: boxes, to: .defaultTFEager) - let boxInd = Tensor(copying: boxInd, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.cropAndResizeGradBoxes( - grads: grads, image: image, boxes: boxes, boxInd: boxInd, method: method), - to: output_device) - case .TF_EAGER: - return _RawTFEager.cropAndResizeGradBoxes( - grads: grads, image: image, boxes: boxes, boxInd: boxInd, method: method) - } - - } - - /// Computes the gradient of the crop_and_resize op wrt the input image tensor. - /// - /// - Parameters: - /// - grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. - /// - boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor - /// specifies the coordinates of a box in the `box_ind[i]` image and is specified - /// in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of - /// `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the - /// `[0, 1]` interval of normalized image height is mapped to - /// `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in - /// which case the sampled crop is an up-down flipped version of the original - /// image. The width dimension is treated similarly. Normalized coordinates - /// outside the `[0, 1]` range are allowed, in which case we use - /// `extrapolation_value` to extrapolate the input image values. - /// - box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. - /// The value of `box_ind[i]` specifies the image that the `i`-th box refers to. - /// - image_size: A 1-D tensor with value `[batch, image_height, image_width, depth]` - /// containing the original image size. Both `image_height` and `image_width` need - /// to be positive. - /// - /// - Attr method: A string specifying the interpolation method. Only 'bilinear' is - /// supported for now. - /// - /// - Output output: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. - @inlinable @inline(__always) - public static func cropAndResizeGradImage( - grads: Tensor, - boxes: Tensor, - boxInd: Tensor, - imageSize: Tensor, - method: Method = .bilinear - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(grads.handle.backend, boxes.handle.backend), boxInd.handle.backend), - imageSize.handle.backend) - { - case .XLA: - let output_device = imageSize.device - let grads = Tensor(copying: grads, to: .defaultTFEager) - let boxes = Tensor(copying: boxes, to: .defaultTFEager) - let boxInd = Tensor(copying: boxInd, to: .defaultTFEager) - let imageSize = Tensor(copying: imageSize, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.cropAndResizeGradImage( - grads: grads, boxes: boxes, boxInd: boxInd, imageSize: imageSize, method: method), - to: output_device) - case .TF_EAGER: - return _RawTFEager.cropAndResizeGradImage( - grads: grads, boxes: boxes, boxInd: boxInd, imageSize: imageSize, method: method) - } - - } - - /// Compute the pairwise cross product. - /// - /// `a` and `b` must be the same shape; they can either be simple 3-element vectors, - /// or any shape where the innermost dimension is 3. In the latter case, each pair - /// of corresponding 3-element vectors is cross-multiplied independently. - /// - /// - Parameters: - /// - a: A tensor containing 3-element vectors. - /// - b: Another tensor, of same type and shape as `a`. - /// - /// - Output product: Pairwise cross product of the vectors in `a` and `b`. - @inlinable @inline(__always) - public static func cross( - _ a: Tensor, - _ b: Tensor - ) -> Tensor { - switch commonBackend(a.handle.backend, b.handle.backend) { - case .XLA: - let output_device = b.device - let a = Tensor(copying: a, to: .defaultTFEager) - let b = Tensor(copying: b, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.cross(a, b), to: output_device) - case .TF_EAGER: - return _RawTFEager.cross(a, b) - } - - } - - /// An Op to sum inputs across replicated TPU instances. - /// - /// Each instance supplies its own input. - /// - /// For example, suppose there are 8 TPU instances: `[A, B, C, D, E, F, G, H]`. - /// Passing group_assignment=`[[0,2,4,6],[1,3,5,7]]` sets `A, C, E, G` as group 0, - /// and `B, D, F, H` as group 1. Thus we get the outputs: - /// `[A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H]`. - /// - /// - Parameters: - /// - input: The local input to the sum. - /// - group_assignment: An int32 tensor with shape - /// [num_groups, num_replicas_per_group]. `group_assignment[i]` represents the - /// replica ids in the ith subgroup. - /// - /// - Attr T: The type of elements to be summed. - /// - /// - Output output: The sum of all the distributed inputs. - @inlinable @inline(__always) - public static func crossReplicaSum( - _ input: Tensor, - groupAssignment: Tensor - ) -> Tensor { - switch commonBackend(input.handle.backend, groupAssignment.handle.backend) { - case .XLA: - let output_device = groupAssignment.device - let input = Tensor(copying: input, to: .defaultTFEager) - let groupAssignment = Tensor(copying: groupAssignment, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.crossReplicaSum(input, groupAssignment: groupAssignment), - to: output_device) - case .TF_EAGER: - return _RawTFEager.crossReplicaSum(input, groupAssignment: groupAssignment) - } - - } - - /// A RNN backed by cuDNN. - /// - /// Computes the RNN from the input and initial states, with respect to the params - /// buffer. - /// - /// rnn_mode: Indicates the type of the RNN model. - /// input_mode: Indicate whether there is a linear projection between the input and - /// the actual computation before the first layer. 'skip_input' is only allowed - /// when input_size == num_units; 'auto_select' implies 'skip_input' when - /// input_size == num_units; otherwise, it implies 'linear_input'. - /// direction: Indicates whether a bidirectional model will be used. Should be - /// "unidirectional" or "bidirectional". - /// dropout: Dropout probability. When set to 0., dropout is disabled. - /// seed: The 1st part of a seed to initialize dropout. - /// seed2: The 2nd part of a seed to initialize dropout. - /// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size]. - /// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size, - /// num_units]. - /// input_c: For LSTM, a 3-D tensor with the shape of - /// [num_layer * dir, batch, num_units]. For other models, it is ignored. - /// params: A 1-D tensor that contains the weights and biases in an opaque layout. - /// The size must be created through CudnnRNNParamsSize, and initialized - /// separately. Note that they might not be compatible across different - /// generations. So it is a good idea to save and restore - /// output: A 3-D tensor with the shape of [seq_length, batch_size, - /// dir * num_units]. - /// output_h: The same shape has input_h. - /// output_c: The same shape as input_c for LSTM. An empty tensor for other models. - /// is_training: Indicates whether this operation is used for inferenece or - /// training. - /// reserve_space: An opaque tensor that can be used in backprop calculation. It - /// is only produced if is_training is false. - @inlinable @inline(__always) - public static func cudnnRNN( - _ input: Tensor, - inputH: Tensor, - inputC: Tensor, - params: Tensor, - rnnMode: RnnMode = .lstm, - inputMode: InputMode = .linearInput, - direction: Direction = .unidirectional, - dropout: Double = 0, - seed: Int64 = 0, - seed2: Int64 = 0, - isTraining: Bool = true - ) -> (output: Tensor, outputH: Tensor, outputC: Tensor, reserveSpace: Tensor) { - _RawTFEager.cudnnRNN( - input, inputH: inputH, inputC: inputC, params: params, rnnMode: rnnMode, - inputMode: inputMode, direction: direction, dropout: dropout, seed: seed, seed2: seed2, - isTraining: isTraining) - } - - /// Backprop step of CudnnRNN. - /// - /// Compute the backprop of both data and weights in a RNN. - /// - /// rnn_mode: Indicates the type of the RNN model. - /// input_mode: Indicate whether there is a linear projection between the input and - /// the actual computation before the first layer. 'skip_input' is only allowed - /// when input_size == num_units; 'auto_select' implies 'skip_input' when - /// input_size == num_units; otherwise, it implies 'linear_input'. - /// direction: Indicates whether a bidirectional model will be used. Should be - /// "unidirectional" or "bidirectional". - /// dropout: Dropout probability. When set to 0., dropout is disabled. - /// seed: The 1st part of a seed to initialize dropout. - /// seed2: The 2nd part of a seed to initialize dropout. - /// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size]. - /// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size, - /// num_units]. - /// input_c: For LSTM, a 3-D tensor with the shape of - /// [num_layer * dir, batch, num_units]. For other models, it is ignored. - /// params: A 1-D tensor that contains the weights and biases in an opaque layout. - /// The size must be created through CudnnRNNParamsSize, and initialized - /// separately. Note that they might not be compatible across different - /// generations. So it is a good idea to save and restore - /// output: A 3-D tensor with the shape of [seq_length, batch_size, - /// dir * num_units]. - /// output_h: The same shape has input_h. - /// output_c: The same shape as input_c for LSTM. An empty tensor for other models. - /// output_backprop: A 3-D tensor with the same shape as output in the forward pass. - /// output_h_backprop: A 3-D tensor with the same shape as output_h in the forward - /// pass. - /// output_c_backprop: A 3-D tensor with the same shape as output_c in the forward - /// pass. - /// reserve_space: The same reserve_space produced in for forward operation. - /// input_backprop: The backprop to input in the forward pass. Has the same shape - /// as input. - /// input_h_backprop: The backprop to input_h in the forward pass. Has the same - /// shape as input_h. - /// input_c_backprop: The backprop to input_c in the forward pass. Has the same - /// shape as input_c. - /// params_backprop: The backprop to the params buffer in the forward pass. Has the - /// same shape as params. - @inlinable @inline(__always) - public static func cudnnRNNBackprop( - _ input: Tensor, - inputH: Tensor, - inputC: Tensor, - params: Tensor, - output: Tensor, - outputH: Tensor, - outputC: Tensor, - outputBackprop: Tensor, - outputHBackprop: Tensor, - outputCBackprop: Tensor, - reserveSpace: Tensor, - rnnMode: RnnMode = .lstm, - inputMode: InputMode = .linearInput, - direction: Direction = .unidirectional, - dropout: Double = 0, - seed: Int64 = 0, - seed2: Int64 = 0 - ) -> ( - inputBackprop: Tensor, inputHBackprop: Tensor, inputCBackprop: Tensor, - paramsBackprop: Tensor - ) { - _RawTFEager.cudnnRNNBackprop( - input, inputH: inputH, inputC: inputC, params: params, output: output, outputH: outputH, - outputC: outputC, outputBackprop: outputBackprop, outputHBackprop: outputHBackprop, - outputCBackprop: outputCBackprop, reserveSpace: reserveSpace, rnnMode: rnnMode, - inputMode: inputMode, direction: direction, dropout: dropout, seed: seed, seed2: seed2) - } - - /// Backprop step of CudnnRNN. - /// - /// Compute the backprop of both data and weights in a RNN. Takes an extra - /// "host_reserved" inupt than CudnnRNNBackprop, which is used to determine RNN - /// cudnnRNNAlgo_t and cudnnMathType_t. - /// - /// rnn_mode: Indicates the type of the RNN model. - /// input_mode: Indicates whether there is a linear projection between the input and - /// the actual computation before the first layer. 'skip_input' is only allowed - /// when input_size == num_units; 'auto_select' implies 'skip_input' when - /// input_size == num_units; otherwise, it implies 'linear_input'. - /// direction: Indicates whether a bidirectional model will be used. Should be - /// "unidirectional" or "bidirectional". - /// dropout: Dropout probability. When set to 0., dropout is disabled. - /// seed: The 1st part of a seed to initialize dropout. - /// seed2: The 2nd part of a seed to initialize dropout. - /// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size]. - /// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size, - /// num_units]. - /// input_c: For LSTM, a 3-D tensor with the shape of - /// [num_layer * dir, batch, num_units]. For other models, it is ignored. - /// params: A 1-D tensor that contains the weights and biases in an opaque layout. - /// The size must be created through CudnnRNNParamsSize, and initialized - /// separately. Note that they might not be compatible across different - /// generations. So it is a good idea to save and restore - /// output: A 3-D tensor with the shape of [seq_length, batch_size, - /// dir * num_units]. - /// output_h: The same shape has input_h. - /// output_c: The same shape as input_c for LSTM. An empty tensor for other models. - /// output_backprop: A 3-D tensor with the same shape as output in the forward pass. - /// output_h_backprop: A 3-D tensor with the same shape as output_h in the forward - /// pass. - /// output_c_backprop: A 3-D tensor with the same shape as output_c in the forward - /// pass. - /// reserve_space: The same reserve_space produced in the forward operation. - /// host_reserved: The same host_reserved produced in the forward operation. - /// input_backprop: The backprop to input in the forward pass. Has the same shape - /// as input. - /// input_h_backprop: The backprop to input_h in the forward pass. Has the same - /// shape as input_h. - /// input_c_backprop: The backprop to input_c in the forward pass. Has the same - /// shape as input_c. - /// params_backprop: The backprop to the params buffer in the forward pass. Has the - /// same shape as params. - @inlinable @inline(__always) - public static func cudnnRNNBackpropV2( - _ input: Tensor, - inputH: Tensor, - inputC: Tensor, - params: Tensor, - output: Tensor, - outputH: Tensor, - outputC: Tensor, - outputBackprop: Tensor, - outputHBackprop: Tensor, - outputCBackprop: Tensor, - reserveSpace: Tensor, - hostReserved: Tensor, - rnnMode: RnnMode = .lstm, - inputMode: InputMode = .linearInput, - direction: Direction = .unidirectional, - dropout: Double = 0, - seed: Int64 = 0, - seed2: Int64 = 0 - ) -> ( - inputBackprop: Tensor, inputHBackprop: Tensor, inputCBackprop: Tensor, - paramsBackprop: Tensor - ) { - _RawTFEager.cudnnRNNBackpropV2( - input, inputH: inputH, inputC: inputC, params: params, output: output, outputH: outputH, - outputC: outputC, outputBackprop: outputBackprop, outputHBackprop: outputHBackprop, - outputCBackprop: outputCBackprop, reserveSpace: reserveSpace, hostReserved: hostReserved, - rnnMode: rnnMode, inputMode: inputMode, direction: direction, dropout: dropout, seed: seed, - seed2: seed2) - } - - /// Backprop step of CudnnRNNV3. - /// - /// Compute the backprop of both data and weights in a RNN. Takes an extra - /// "sequence_lengths" input than CudnnRNNBackprop. - /// - /// rnn_mode: Indicates the type of the RNN model. - /// input_mode: Indicates whether there is a linear projection between the input and - /// the actual computation before the first layer. 'skip_input' is only allowed - /// when input_size == num_units; 'auto_select' implies 'skip_input' when - /// input_size == num_units; otherwise, it implies 'linear_input'. - /// direction: Indicates whether a bidirectional model will be used. Should be - /// "unidirectional" or "bidirectional". - /// dropout: Dropout probability. When set to 0., dropout is disabled. - /// seed: The 1st part of a seed to initialize dropout. - /// seed2: The 2nd part of a seed to initialize dropout. - /// input: If time_major is true, this is a 3-D tensor with the shape of - /// [seq_length, batch_size, input_size]. If time_major is false, the shape is - /// [batch_size, seq_length, input_size]. - /// input_h: If time_major is true, this is a 3-D tensor with the shape of - /// [num_layer * dir, batch_size, num_units]. If time_major is false, the shape - /// is [batch_size, num_layer * dir, num_units]. - /// input_c: For LSTM, a 3-D tensor with the shape of - /// [num_layer * dir, batch, num_units]. For other models, it is ignored. - /// params: A 1-D tensor that contains the weights and biases in an opaque layout. - /// The size must be created through CudnnRNNParamsSize, and initialized - /// separately. Note that they might not be compatible across different - /// generations. So it is a good idea to save and restore - /// sequence_lengths: a vector of lengths of each input sequence. - /// output: If time_major is true, this is a 3-D tensor with the shape of - /// [seq_length, batch_size, dir * num_units]. If time_major is false, the - /// shape is [batch_size, seq_length, dir * num_units]. - /// output_h: The same shape has input_h. - /// output_c: The same shape as input_c for LSTM. An empty tensor for other models. - /// output_backprop: A 3-D tensor with the same shape as output in the forward pass. - /// output_h_backprop: A 3-D tensor with the same shape as output_h in the forward - /// pass. - /// output_c_backprop: A 3-D tensor with the same shape as output_c in the forward - /// pass. - /// time_major: Indicates whether the input/output format is time major or batch - /// major. - /// reserve_space: The same reserve_space produced in the forward operation. - /// input_backprop: The backprop to input in the forward pass. Has the same shape - /// as input. - /// input_h_backprop: The backprop to input_h in the forward pass. Has the same - /// shape as input_h. - /// input_c_backprop: The backprop to input_c in the forward pass. Has the same - /// shape as input_c. - /// params_backprop: The backprop to the params buffer in the forward pass. Has the - /// same shape as params. - @inlinable @inline(__always) - public static func cudnnRNNBackpropV3( - _ input: Tensor, - inputH: Tensor, - inputC: Tensor, - params: Tensor, - sequenceLengths: Tensor, - output: Tensor, - outputH: Tensor, - outputC: Tensor, - outputBackprop: Tensor, - outputHBackprop: Tensor, - outputCBackprop: Tensor, - reserveSpace: Tensor, - hostReserved: Tensor, - rnnMode: RnnMode = .lstm, - inputMode: InputMode = .linearInput, - direction: Direction = .unidirectional, - dropout: Double = 0, - seed: Int64 = 0, - seed2: Int64 = 0, - numProj: Int64 = 0, - timeMajor: Bool = true - ) -> ( - inputBackprop: Tensor, inputHBackprop: Tensor, inputCBackprop: Tensor, - paramsBackprop: Tensor - ) { - _RawTFEager.cudnnRNNBackpropV3( - input, inputH: inputH, inputC: inputC, params: params, sequenceLengths: sequenceLengths, - output: output, outputH: outputH, outputC: outputC, outputBackprop: outputBackprop, - outputHBackprop: outputHBackprop, outputCBackprop: outputCBackprop, - reserveSpace: reserveSpace, hostReserved: hostReserved, rnnMode: rnnMode, - inputMode: inputMode, direction: direction, dropout: dropout, seed: seed, seed2: seed2, - numProj: numProj, timeMajor: timeMajor) - } - - /// Converts CudnnRNN params from canonical form to usable form. - /// - /// Writes a set of weights into the opaque params buffer so they can be used in - /// upcoming training or inferences. - /// - /// Note that the params buffer may not be compatible across different GPUs. So any - /// save and restoration should be converted to and from the canonical weights and - /// biases. - /// - /// num_layers: Specifies the number of layers in the RNN model. - /// num_units: Specifies the size of the hidden state. - /// input_size: Specifies the size of the input state. - /// weights: the canonical form of weights that can be used for saving - /// and restoration. They are more likely to be compatible across different - /// generations. - /// biases: the canonical form of biases that can be used for saving - /// and restoration. They are more likely to be compatible across different - /// generations. - /// num_params: number of parameter sets for all layers. - /// Each layer may contain multiple parameter sets, with each set consisting of - /// a weight matrix and a bias vector. - /// rnn_mode: Indicates the type of the RNN model. - /// input_mode: Indicate whether there is a linear projection between the input and - /// The actual computation before the first layer. 'skip_input' is only allowed - /// when input_size == num_units; 'auto_select' implies 'skip_input' when - /// input_size == num_units; otherwise, it implies 'linear_input'. - /// direction: Indicates whether a bidirectional model will be used. - /// dir = (direction == bidirectional) ? 2 : 1 - /// dropout: dropout probability. When set to 0., dropout is disabled. - /// seed: the 1st part of a seed to initialize dropout. - /// seed2: the 2nd part of a seed to initialize dropout. - @inlinable @inline(__always) - public static func cudnnRNNCanonicalToParams( - numLayers: Tensor, - numUnits: Tensor, - inputSize: Tensor, - weights: [Tensor], - biases: [Tensor], - rnnMode: RnnMode = .lstm, - inputMode: InputMode = .linearInput, - direction: Direction = .unidirectional, - dropout: Double = 0, - seed: Int64 = 0, - seed2: Int64 = 0 - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend( - commonBackend(numLayers.handle.backend, numUnits.handle.backend), - inputSize.handle.backend), commonBackend(weights)), commonBackend(biases)) - { - case .XLA: - let output_device = inputSize.device - let numLayers = Tensor(copying: numLayers, to: .defaultTFEager) - let numUnits = Tensor(copying: numUnits, to: .defaultTFEager) - let inputSize = Tensor(copying: inputSize, to: .defaultTFEager) - let weights = [Tensor](copying: weights, to: .defaultTFEager) - let biases = [Tensor](copying: biases, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.cudnnRNNCanonicalToParams( - numLayers: numLayers, numUnits: numUnits, inputSize: inputSize, weights: weights, - biases: biases, rnnMode: rnnMode, inputMode: inputMode, direction: direction, - dropout: dropout, seed: seed, seed2: seed2), to: output_device) - case .TF_EAGER: - return _RawTFEager.cudnnRNNCanonicalToParams( - numLayers: numLayers, numUnits: numUnits, inputSize: inputSize, weights: weights, - biases: biases, rnnMode: rnnMode, inputMode: inputMode, direction: direction, - dropout: dropout, seed: seed, seed2: seed2) - } - - } - - /// Converts CudnnRNN params from canonical form to usable form. It supports the projection in LSTM. - /// - /// Writes a set of weights into the opaque params buffer so they can be used in - /// upcoming training or inferences. - /// - /// Note that the params buffer may not be compatible across different GPUs. So any - /// save and restoration should be converted to and from the canonical weights and - /// biases. - /// - /// num_layers: Specifies the number of layers in the RNN model. - /// num_units: Specifies the size of the hidden state. - /// input_size: Specifies the size of the input state. - /// weights: the canonical form of weights that can be used for saving - /// and restoration. They are more likely to be compatible across different - /// generations. - /// biases: the canonical form of biases that can be used for saving - /// and restoration. They are more likely to be compatible across different - /// generations. - /// num_params_weights: number of weight parameter matrix for all layers. - /// num_params_biases: number of bias parameter vector for all layers. - /// rnn_mode: Indicates the type of the RNN model. - /// input_mode: Indicate whether there is a linear projection between the input and - /// The actual computation before the first layer. 'skip_input' is only allowed - /// when input_size == num_units; 'auto_select' implies 'skip_input' when - /// input_size == num_units; otherwise, it implies 'linear_input'. - /// direction: Indicates whether a bidirectional model will be used. - /// dir = (direction == bidirectional) ? 2 : 1 - /// dropout: dropout probability. When set to 0., dropout is disabled. - /// seed: the 1st part of a seed to initialize dropout. - /// seed2: the 2nd part of a seed to initialize dropout. - /// num_proj: The output dimensionality for the projection matrices. If None or 0, - /// no projection is performed. - @inlinable @inline(__always) - public static func cudnnRNNCanonicalToParamsV2( - numLayers: Tensor, - numUnits: Tensor, - inputSize: Tensor, - weights: [Tensor], - biases: [Tensor], - rnnMode: RnnMode = .lstm, - inputMode: InputMode = .linearInput, - direction: Direction = .unidirectional, - dropout: Double = 0, - seed: Int64 = 0, - seed2: Int64 = 0, - numProj: Int64 = 0 - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend( - commonBackend(numLayers.handle.backend, numUnits.handle.backend), - inputSize.handle.backend), commonBackend(weights)), commonBackend(biases)) - { - case .XLA: - let output_device = inputSize.device - let numLayers = Tensor(copying: numLayers, to: .defaultTFEager) - let numUnits = Tensor(copying: numUnits, to: .defaultTFEager) - let inputSize = Tensor(copying: inputSize, to: .defaultTFEager) - let weights = [Tensor](copying: weights, to: .defaultTFEager) - let biases = [Tensor](copying: biases, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.cudnnRNNCanonicalToParamsV2( - numLayers: numLayers, numUnits: numUnits, inputSize: inputSize, weights: weights, - biases: biases, rnnMode: rnnMode, inputMode: inputMode, direction: direction, - dropout: dropout, seed: seed, seed2: seed2, numProj: numProj), to: output_device) - case .TF_EAGER: - return _RawTFEager.cudnnRNNCanonicalToParamsV2( - numLayers: numLayers, numUnits: numUnits, inputSize: inputSize, weights: weights, - biases: biases, rnnMode: rnnMode, inputMode: inputMode, direction: direction, - dropout: dropout, seed: seed, seed2: seed2, numProj: numProj) - } - - } - - /// Computes size of weights that can be used by a Cudnn RNN model. - /// - /// Return the params size that can be used by the Cudnn RNN model. Subsequent - /// weight allocation and initialization should use this size. - /// - /// num_layers: Specifies the number of layers in the RNN model. - /// num_units: Specifies the size of the hidden state. - /// input_size: Specifies the size of the input state. - /// rnn_mode: Indicates the type of the RNN model. - /// input_mode: Indicate whether there is a linear projection between the input and - /// The actual computation before the first layer. 'skip_input' is only allowed - /// when input_size == num_units; 'auto_select' implies 'skip_input' when - /// input_size == num_units; otherwise, it implies 'linear_input'. - /// direction: Indicates whether a bidirectional model will be used. - /// dir = (direction == bidirectional) ? 2 : 1 - /// dropout: dropout probability. When set to 0., dropout is disabled. - /// seed: the 1st part of a seed to initialize dropout. - /// seed2: the 2nd part of a seed to initialize dropout. - /// params_size: The size of the params buffer that should be allocated and - /// initialized for this RNN model. Note that this params buffer may not be - /// compatible across GPUs. Please use CudnnRNNParamsWeights and - /// CudnnRNNParamsBiases to save and restore them in a way that is compatible - /// across different runs. - @inlinable @inline(__always) - public static func cudnnRNNParamsSize( - numLayers: Tensor, - numUnits: Tensor, - inputSize: Tensor, - t: TensorDataType, - rnnMode: RnnMode = .lstm, - inputMode: InputMode = .linearInput, - direction: Direction = .unidirectional, - dropout: Double = 0, - seed: Int64 = 0, - seed2: Int64 = 0, - numProj: Int64 = 0 - ) -> Tensor { - switch commonBackend( - commonBackend(numLayers.handle.backend, numUnits.handle.backend), inputSize.handle.backend) - { - case .XLA: - let output_device = inputSize.device - let numLayers = Tensor(copying: numLayers, to: .defaultTFEager) - let numUnits = Tensor(copying: numUnits, to: .defaultTFEager) - let inputSize = Tensor(copying: inputSize, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.cudnnRNNParamsSize( - numLayers: numLayers, numUnits: numUnits, inputSize: inputSize, t: t, rnnMode: rnnMode, - inputMode: inputMode, direction: direction, dropout: dropout, seed: seed, seed2: seed2, - numProj: numProj), to: output_device) - case .TF_EAGER: - return _RawTFEager.cudnnRNNParamsSize( - numLayers: numLayers, numUnits: numUnits, inputSize: inputSize, t: t, rnnMode: rnnMode, - inputMode: inputMode, direction: direction, dropout: dropout, seed: seed, seed2: seed2, - numProj: numProj) - } - - } - - /// Retrieves CudnnRNN params in canonical form. - /// - /// Retrieves a set of weights from the opaque params buffer that can be saved and - /// restored in a way compatible with future runs. - /// - /// Note that the params buffer may not be compatible across different GPUs. So any - /// save and restoration should be converted to and from the canonical weights and - /// biases. - /// - /// num_layers: Specifies the number of layers in the RNN model. - /// num_units: Specifies the size of the hidden state. - /// input_size: Specifies the size of the input state. - /// num_params: number of parameter sets for all layers. - /// Each layer may contain multiple parameter sets, with each set consisting of - /// a weight matrix and a bias vector. - /// weights: the canonical form of weights that can be used for saving - /// and restoration. They are more likely to be compatible across different - /// generations. - /// biases: the canonical form of biases that can be used for saving - /// and restoration. They are more likely to be compatible across different - /// generations. - /// rnn_mode: Indicates the type of the RNN model. - /// input_mode: Indicate whether there is a linear projection between the input and - /// The actual computation before the first layer. 'skip_input' is only allowed - /// when input_size == num_units; 'auto_select' implies 'skip_input' when - /// input_size == num_units; otherwise, it implies 'linear_input'. - /// direction: Indicates whether a bidirectional model will be used. - /// dir = (direction == bidirectional) ? 2 : 1 - /// dropout: dropout probability. When set to 0., dropout is disabled. - /// seed: the 1st part of a seed to initialize dropout. - /// seed2: the 2nd part of a seed to initialize dropout. - @inlinable @inline(__always) - public static func cudnnRNNParamsToCanonical( - numLayers: Tensor, - numUnits: Tensor, - inputSize: Tensor, - params: Tensor, - numParams: Int64, - rnnMode: RnnMode = .lstm, - inputMode: InputMode = .linearInput, - direction: Direction = .unidirectional, - dropout: Double = 0, - seed: Int64 = 0, - seed2: Int64 = 0 - ) -> (weights: [Tensor], biases: [Tensor]) { - _RawTFEager.cudnnRNNParamsToCanonical( - numLayers: numLayers, numUnits: numUnits, inputSize: inputSize, params: params, - numParams: numParams, rnnMode: rnnMode, inputMode: inputMode, direction: direction, - dropout: dropout, seed: seed, seed2: seed2) + } + + /// Adjust the saturation of one or more images. + /// + /// `images` is a tensor of at least 3 dimensions. The last dimension is + /// interpretted as channels, and must be three. + /// + /// The input image is considered in the RGB colorspace. Conceptually, the RGB + /// colors are first mapped into HSV. A scale is then applied all the saturation + /// values, and then remapped back to RGB colorspace. + /// + /// - Parameters: + /// - images: Images to adjust. At least 3-D. + /// - scale: A float scale to add to the saturation. + /// + /// - Output output: The hue-adjusted image or images. + @inlinable @inline(__always) + public static func adjustSaturation( + images: Tensor, + scale: Tensor + ) -> Tensor { + switch commonBackend(images.handle.backend, scale.handle.backend) { + case .XLA: + let output_device = scale.device + let images = Tensor(copying: images, to: .defaultTFEager) + let scale = Tensor(copying: scale, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.adjustSaturation(images: images, scale: scale), to: output_device) + case .TF_EAGER: + return _RawTFEager.adjustSaturation(images: images, scale: scale) } - /// Retrieves CudnnRNN params in canonical form. It supports the projection in LSTM. - /// - /// Retrieves a set of weights from the opaque params buffer that can be saved and - /// restored in a way compatible with future runs. - /// - /// Note that the params buffer may not be compatible across different GPUs. So any - /// save and restoration should be converted to and from the canonical weights and - /// biases. - /// - /// num_layers: Specifies the number of layers in the RNN model. - /// num_units: Specifies the size of the hidden state. - /// input_size: Specifies the size of the input state. - /// num_params_weights: number of weight parameter matrix for all layers. - /// num_params_biases: number of bias parameter vector for all layers. - /// weights: the canonical form of weights that can be used for saving - /// and restoration. They are more likely to be compatible across different - /// generations. - /// biases: the canonical form of biases that can be used for saving - /// and restoration. They are more likely to be compatible across different - /// generations. - /// rnn_mode: Indicates the type of the RNN model. - /// input_mode: Indicate whether there is a linear projection between the input and - /// The actual computation before the first layer. 'skip_input' is only allowed - /// when input_size == num_units; 'auto_select' implies 'skip_input' when - /// input_size == num_units; otherwise, it implies 'linear_input'. - /// direction: Indicates whether a bidirectional model will be used. - /// dir = (direction == bidirectional) ? 2 : 1 - /// dropout: dropout probability. When set to 0., dropout is disabled. - /// seed: the 1st part of a seed to initialize dropout. - /// seed2: the 2nd part of a seed to initialize dropout. - /// num_proj: The output dimensionality for the projection matrices. If None or 0, - /// no projection is performed. - @inlinable @inline(__always) - public static func cudnnRNNParamsToCanonicalV2( - numLayers: Tensor, - numUnits: Tensor, - inputSize: Tensor, - params: Tensor, - numParamsWeights: Int64, - numParamsBiases: Int64, - rnnMode: RnnMode = .lstm, - inputMode: InputMode = .linearInput, - direction: Direction = .unidirectional, - dropout: Double = 0, - seed: Int64 = 0, - seed2: Int64 = 0, - numProj: Int64 = 0 - ) -> (weights: [Tensor], biases: [Tensor]) { - _RawTFEager.cudnnRNNParamsToCanonicalV2( - numLayers: numLayers, numUnits: numUnits, inputSize: inputSize, params: params, - numParamsWeights: numParamsWeights, numParamsBiases: numParamsBiases, rnnMode: rnnMode, - inputMode: inputMode, direction: direction, dropout: dropout, seed: seed, seed2: seed2, - numProj: numProj) + } + + /// Computes the "logical and" of elements across dimensions of a tensor. + /// + /// Reduces `input` along the dimensions given in `axis`. Unless + /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + /// `axis`. If `keep_dims` is true, the reduced dimensions are + /// retained with length 1. + /// + /// - Parameters: + /// - input: The tensor to reduce. + /// - reduction_indices: The dimensions to reduce. Must be in the range + /// `[-rank(input), rank(input))`. + /// + /// - Attr keep_dims: If true, retain reduced dimensions with length 1. + /// + /// - Output output: The reduced tensor. + @inlinable @inline(__always) + public static func all( + _ input: Tensor, + reductionIndices: Tensor, + keepDims: Bool = false + ) -> Tensor { + switch commonBackend(input.handle.backend, reductionIndices.handle.backend) { + case .XLA: + return _RawXLA.all(input, reductionIndices: reductionIndices, keepDims: keepDims) + case .TF_EAGER: + return _RawTFEager.all(input, reductionIndices: reductionIndices, keepDims: keepDims) } - /// A RNN backed by cuDNN. - /// - /// Computes the RNN from the input and initial states, with respect to the params - /// buffer. Produces one extra output "host_reserved" than CudnnRNN. - /// - /// rnn_mode: Indicates the type of the RNN model. - /// input_mode: Indicates whether there is a linear projection between the input and - /// the actual computation before the first layer. 'skip_input' is only allowed - /// when input_size == num_units; 'auto_select' implies 'skip_input' when - /// input_size == num_units; otherwise, it implies 'linear_input'. - /// direction: Indicates whether a bidirectional model will be used. Should be - /// "unidirectional" or "bidirectional". - /// dropout: Dropout probability. When set to 0., dropout is disabled. - /// seed: The 1st part of a seed to initialize dropout. - /// seed2: The 2nd part of a seed to initialize dropout. - /// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size]. - /// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size, - /// num_units]. - /// input_c: For LSTM, a 3-D tensor with the shape of - /// [num_layer * dir, batch, num_units]. For other models, it is ignored. - /// params: A 1-D tensor that contains the weights and biases in an opaque layout. - /// The size must be created through CudnnRNNParamsSize, and initialized - /// separately. Note that they might not be compatible across different - /// generations. So it is a good idea to save and restore - /// output: A 3-D tensor with the shape of [seq_length, batch_size, - /// dir * num_units]. - /// output_h: The same shape has input_h. - /// output_c: The same shape as input_c for LSTM. An empty tensor for other models. - /// is_training: Indicates whether this operation is used for inferenece or - /// training. - /// reserve_space: An opaque tensor that can be used in backprop calculation. It - /// is only produced if is_training is true. - /// host_reserved: An opaque tensor that can be used in backprop calculation. It is - /// only produced if is_training is true. It is output on host memory rather than - /// device memory. - @inlinable @inline(__always) - public static func cudnnRNNV2( - _ input: Tensor, - inputH: Tensor, - inputC: Tensor, - params: Tensor, - rnnMode: RnnMode = .lstm, - inputMode: InputMode = .linearInput, - direction: Direction = .unidirectional, - dropout: Double = 0, - seed: Int64 = 0, - seed2: Int64 = 0, - isTraining: Bool = true - ) -> ( - output: Tensor, outputH: Tensor, outputC: Tensor, reserveSpace: Tensor, - hostReserved: Tensor - ) { - _RawTFEager.cudnnRNNV2( - input, inputH: inputH, inputC: inputC, params: params, rnnMode: rnnMode, - inputMode: inputMode, direction: direction, dropout: dropout, seed: seed, seed2: seed2, - isTraining: isTraining) - } - - /// A RNN backed by cuDNN. - /// - /// Computes the RNN from the input and initial states, with respect to the params - /// buffer. Accepts one extra input "sequence_lengths" than CudnnRNN. - /// - /// rnn_mode: Indicates the type of the RNN model. - /// input_mode: Indicates whether there is a linear projection between the input and - /// the actual computation before the first layer. 'skip_input' is only allowed - /// when input_size == num_units; 'auto_select' implies 'skip_input' when - /// input_size == num_units; otherwise, it implies 'linear_input'. - /// direction: Indicates whether a bidirectional model will be used. Should be - /// "unidirectional" or "bidirectional". - /// dropout: Dropout probability. When set to 0., dropout is disabled. - /// seed: The 1st part of a seed to initialize dropout. - /// seed2: The 2nd part of a seed to initialize dropout. - /// input: If time_major is true, this is a 3-D tensor with the shape of - /// [seq_length, batch_size, input_size]. If time_major is false, the shape is - /// [batch_size, seq_length, input_size]. - /// input_h: If time_major is true, this is a 3-D tensor with the shape of - /// [num_layer * dir, batch_size, num_units]. If time_major is false, the shape - /// is [batch_size, num_layer * dir, num_units]. - /// input_c: For LSTM, a 3-D tensor with the shape of - /// [num_layer * dir, batch, num_units]. For other models, it is ignored. - /// params: A 1-D tensor that contains the weights and biases in an opaque layout. - /// The size must be created through CudnnRNNParamsSize, and initialized - /// separately. Note that they might not be compatible across different - /// generations. So it is a good idea to save and restore - /// sequence_lengths: a vector of lengths of each input sequence. - /// output: If time_major is true, this is a 3-D tensor with the shape of - /// [seq_length, batch_size, dir * num_units]. If time_major is false, the - /// shape is [batch_size, seq_length, dir * num_units]. - /// output_h: The same shape has input_h. - /// output_c: The same shape as input_c for LSTM. An empty tensor for other models. - /// is_training: Indicates whether this operation is used for inferenece or - /// training. - /// time_major: Indicates whether the input/output format is time major or batch - /// major. - /// reserve_space: An opaque tensor that can be used in backprop calculation. It - /// is only produced if is_training is true. - @inlinable @inline(__always) - public static func cudnnRNNV3( - _ input: Tensor, - inputH: Tensor, - inputC: Tensor, - params: Tensor, - sequenceLengths: Tensor, - rnnMode: RnnMode = .lstm, - inputMode: InputMode = .linearInput, - direction: Direction = .unidirectional, - dropout: Double = 0, - seed: Int64 = 0, - seed2: Int64 = 0, - numProj: Int64 = 0, - isTraining: Bool = true, - timeMajor: Bool = true - ) -> ( - output: Tensor, outputH: Tensor, outputC: Tensor, reserveSpace: Tensor, - hostReserved: Tensor - ) { - _RawTFEager.cudnnRNNV3( - input, inputH: inputH, inputC: inputC, params: params, sequenceLengths: sequenceLengths, - rnnMode: rnnMode, inputMode: inputMode, direction: direction, dropout: dropout, seed: seed, - seed2: seed2, numProj: numProj, isTraining: isTraining, timeMajor: timeMajor) - } - - /// Compute the cumulative product of the tensor `x` along `axis`. - /// - /// By default, this op performs an inclusive cumprod, which means that the first - /// element of the input is identical to the first element of the output: - /// - /// ```python - /// tf.cumprod([a, b, c]) # => [a, a * b, a * b * c] - /// ``` - /// - /// By setting the `exclusive` kwarg to `True`, an exclusive cumprod is - /// performed instead: - /// - /// ```python - /// tf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b] - /// ``` - /// - /// By setting the `reverse` kwarg to `True`, the cumprod is performed in the - /// opposite direction: - /// - /// ```python - /// tf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c] - /// ``` - /// - /// This is more efficient than using separate `tf.reverse` ops. - /// - /// The `reverse` and `exclusive` kwargs can also be combined: - /// - /// ```python - /// tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1] - /// ``` - /// - /// - Parameters: - /// - x: A `Tensor`. Must be one of the following types: `float32`, `float64`, - /// `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, - /// `complex128`, `qint8`, `quint8`, `qint32`, `half`. - /// - axis: A `Tensor` of type `int32` (default: 0). Must be in the range - /// `[-rank(x), rank(x))`. - /// - /// - Attrs: - /// - exclusive: If `True`, perform exclusive cumprod. - /// - reverse: A `bool` (default: False). - @inlinable @inline(__always) - public static func cumprod< - T: TensorFlowNumeric, - Tidx: TensorFlowIndex - >( - _ x: Tensor, - axis: Tensor, - exclusive: Bool = false, - reverse: Bool = false - ) -> Tensor { - switch commonBackend(x.handle.backend, axis.handle.backend) { - case .XLA: - return _RawXLA.cumprod(x, axis: axis, exclusive: exclusive, reverse: reverse) - case .TF_EAGER: - return _RawTFEager.cumprod(x, axis: axis, exclusive: exclusive, reverse: reverse) - } - - } - - /// Compute the cumulative sum of the tensor `x` along `axis`. - /// - /// By default, this op performs an inclusive cumsum, which means that the first - /// element of the input is identical to the first element of the output: - /// - /// ```python - /// tf.cumsum([a, b, c]) # => [a, a + b, a + b + c] - /// ``` - /// - /// By setting the `exclusive` kwarg to `True`, an exclusive cumsum is - /// performed instead: - /// - /// ```python - /// tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b] - /// ``` - /// - /// By setting the `reverse` kwarg to `True`, the cumsum is performed in the - /// opposite direction: - /// - /// ```python - /// tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c] - /// ``` - /// - /// This is more efficient than using separate `tf.reverse` ops. - /// - /// The `reverse` and `exclusive` kwargs can also be combined: - /// - /// ```python - /// tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0] - /// ``` - /// - /// - Parameters: - /// - x: A `Tensor`. Must be one of the following types: `float32`, `float64`, - /// `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, - /// `complex128`, `qint8`, `quint8`, `qint32`, `half`. - /// - axis: A `Tensor` of type `int32` (default: 0). Must be in the range - /// `[-rank(x), rank(x))`. - /// - /// - Attrs: - /// - exclusive: If `True`, perform exclusive cumsum. - /// - reverse: A `bool` (default: False). - @inlinable @inline(__always) - public static func cumsum< - T: TensorFlowNumeric, - Tidx: TensorFlowIndex - >( - _ x: Tensor, - axis: Tensor, - exclusive: Bool = false, - reverse: Bool = false - ) -> Tensor { - switch commonBackend(x.handle.backend, axis.handle.backend) { - case .XLA: - return _RawXLA.cumsum(x, axis: axis, exclusive: exclusive, reverse: reverse) - case .TF_EAGER: - return _RawTFEager.cumsum(x, axis: axis, exclusive: exclusive, reverse: reverse) - } - - } - - /// Compute the cumulative product of the tensor `x` along `axis`. - /// - /// By default, this op performs an inclusive cumulative log-sum-exp, - /// which means that the first - /// element of the input is identical to the first element of the output: - /// ```python - /// tf.math.cumulative_logsumexp([a, b, c]) # => [a, log(exp(a) + exp(b)), log(exp(a) + exp(b) + exp(c))] - /// ``` - /// - /// By setting the `exclusive` kwarg to `True`, an exclusive cumulative log-sum-exp is - /// performed instead: - /// ```python - /// tf.cumulative_logsumexp([a, b, c], exclusive=True) # => [-inf, a, log(exp(a) * exp(b))] - /// ``` - /// Note that the neutral element of the log-sum-exp operation is `-inf`, - /// however, for performance reasons, the minimal value representable by the - /// floating point type is used instead. - /// - /// By setting the `reverse` kwarg to `True`, the cumulative log-sum-exp is performed in the - /// opposite direction. - /// - /// - Parameters: - /// - x: A `Tensor`. Must be one of the following types: `float16`, `float32`, `float64`. - /// - axis: A `Tensor` of type `int32` (default: 0). Must be in the range - /// `[-rank(x), rank(x))`. - /// - /// - Attrs: - /// - exclusive: If `True`, perform exclusive cumulative log-sum-exp. - /// - reverse: A `bool` (default: False). - @inlinable @inline(__always) - public static func cumulativeLogsumexp< - T: FloatingPoint & TensorFlowScalar, - Tidx: TensorFlowIndex - >( - _ x: Tensor, - axis: Tensor, - exclusive: Bool = false, - reverse: Bool = false - ) -> Tensor { - switch commonBackend(x.handle.backend, axis.handle.backend) { - case .XLA: - let output_device = axis.device - let x = Tensor(copying: x, to: .defaultTFEager) - let axis = Tensor(copying: axis, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.cumulativeLogsumexp( - x, axis: axis, exclusive: exclusive, reverse: reverse), to: output_device) - case .TF_EAGER: - return _RawTFEager.cumulativeLogsumexp( - x, axis: axis, exclusive: exclusive, reverse: reverse) - } - - } - - /// Returns the dimension index in the destination data format given the one in - /// - /// the source data format. - /// - /// - Parameter x: A Tensor with each element as a dimension index in source data format. - /// Must be in the range [-4, 4). - /// - /// - Attrs: - /// - src_format: source data format. - /// - dst_format: destination data format. - /// - /// - Output y: A Tensor with each element as a dimension index in destination data format. - @inlinable @inline(__always) - public static func dataFormatDimMap( - _ x: Tensor, - srcFormat: String = "NHWC", - dstFormat: String = "NCHW" - ) -> Tensor { - switch x.handle.backend { - case .XLA: - let output_device = x.device - let x = Tensor(copying: x, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.dataFormatDimMap(x, srcFormat: srcFormat, dstFormat: dstFormat), - to: output_device) - case .TF_EAGER: - return _RawTFEager.dataFormatDimMap(x, srcFormat: srcFormat, dstFormat: dstFormat) - } - - } - - /// Returns the permuted vector/tensor in the destination data format given the - /// - /// one in the source data format. - /// - /// - Parameter x: Vector of size 4 or Tensor of shape (4, 2) in source data format. - /// - /// - Attrs: - /// - src_format: source data format. - /// - dst_format: destination data format. - /// - /// - Output y: Vector of size 4 or Tensor of shape (4, 2) in destination data format. - @inlinable @inline(__always) - public static func dataFormatVecPermute( - _ x: Tensor, - srcFormat: String = "NHWC", - dstFormat: String = "NCHW" - ) -> Tensor { - switch x.handle.backend { - case .XLA: - let output_device = x.device - let x = Tensor(copying: x, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.dataFormatVecPermute(x, srcFormat: srcFormat, dstFormat: dstFormat), - to: output_device) - case .TF_EAGER: - return _RawTFEager.dataFormatVecPermute(x, srcFormat: srcFormat, dstFormat: dstFormat) - } - - } - - /// Returns the cardinality of `input_dataset`. - /// - /// Returns the cardinality of `input_dataset`. - /// - /// - Parameter input_dataset: A variant tensor representing the dataset to return cardinality for. - /// - /// - Output cardinality: The cardinality of `input_dataset`. Named constants are used to represent - /// infinite and unknown cardinality. - @inlinable @inline(__always) - public static func datasetCardinality( - inputDataset: VariantHandle - ) -> Tensor { - _RawTFEager.datasetCardinality(inputDataset: inputDataset) - } - - /// Creates a dataset from the given `graph_def`. - /// - /// Creates a dataset from the provided `graph_def`. - /// - /// - Parameter graph_def: The graph representation of the dataset (as serialized GraphDef). - /// - /// - Output handle: A variant tensor representing the dataset. - @inlinable @inline(__always) - public static func datasetFromGraph( - graphDef: StringTensor - ) -> VariantHandle { - _RawTFEager.datasetFromGraph(graphDef: graphDef) - } - - /// Returns a serialized GraphDef representing `input_dataset`. - /// - /// Returns a graph representation for `input_dataset`. - /// - /// - Parameter input_dataset: A variant tensor representing the dataset to return the graph representation for. - /// - /// - Output graph: The graph representation of the dataset (as serialized GraphDef). - @inlinable @inline(__always) - public static func datasetToGraph( - inputDataset: VariantHandle, - statefulWhitelist: [String], - allowStateful: Bool = false, - stripDeviceAssignment: Bool = false - ) -> StringTensor { - _RawTFEager.datasetToGraph( - inputDataset: inputDataset, statefulWhitelist: statefulWhitelist, - allowStateful: allowStateful, stripDeviceAssignment: stripDeviceAssignment) - } - - /// Returns a serialized GraphDef representing `input_dataset`. - /// - /// Returns a graph representation for `input_dataset`. - /// - /// - Parameter input_dataset: A variant tensor representing the dataset to return the graph representation for. - /// - /// - Output graph: The graph representation of the dataset (as serialized GraphDef). - @inlinable @inline(__always) - public static func datasetToGraphV2( - inputDataset: VariantHandle, - externalStatePolicy: Int64 = 0, - stripDeviceAssignment: Bool = false - ) -> StringTensor { - _RawTFEager.datasetToGraphV2( - inputDataset: inputDataset, externalStatePolicy: externalStatePolicy, - stripDeviceAssignment: stripDeviceAssignment) - } - - /// Outputs the single element from the given dataset. - /// - /// - Parameter dataset: A handle to a dataset that contains a single element. - /// - /// - Output components: The components of the single element of `input`. - @inlinable @inline(__always) - public static func datasetToSingleElement( - dataset: VariantHandle, - outputShapes: [TensorShape?] - ) -> OutputTypes { - _RawTFEager.datasetToSingleElement(dataset: dataset, outputShapes: outputShapes) - } - - /// Writes the given dataset to the given file using the TFRecord format. - /// - /// - Parameters: - /// - input_dataset: A variant tensor representing the dataset to write. - /// - filename: A scalar string tensor representing the filename to use. - /// - compression_type: A scalar string tensor containing either (i) the empty string (no - /// compression), (ii) "ZLIB", or (iii) "GZIP". - @inlinable @inline(__always) - public static func datasetToTFRecord( - inputDataset: VariantHandle, - filename: StringTensor, - compressionType: StringTensor - ) { - _RawTFEager.datasetToTFRecord( - inputDataset: inputDataset, filename: filename, compressionType: compressionType) - } - - /// Identity op for gradient debugging. - /// - /// This op is hidden from public in Python. It is used by TensorFlow Debugger to - /// register gradient tensors for gradient debugging. - /// This op operates on non-reference-type tensors. - @inlinable @inline(__always) - public static func debugGradientIdentity( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.debugGradientIdentity(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.debugGradientIdentity(input) - } - - } - - /// Provides an identity mapping of the non-Ref type input tensor for debugging. - /// - /// Provides an identity mapping of the non-Ref type input tensor for debugging. - /// - /// - Parameter input: Input tensor, non-Reference type - /// - /// - Attrs: - /// - device_name: Name of the device on which the tensor resides. - /// - tensor_name: Name of the input tensor. - /// - debug_urls: List of URLs to debug targets, e.g., - /// file:///foo/tfdbg_dump, grpc:://localhost:11011 - /// - gated_grpc: Whether this op will be gated. If any of the debug_urls of this - /// debug node is of the grpc:// scheme, when the value of this attribute is set - /// to True, the data will not actually be sent via the grpc stream unless this - /// debug op has been enabled at the debug_url. If all of the debug_urls of this - /// debug node are of the grpc:// scheme and the debug op is enabled at none of - /// them, the output will be an empty Tensor. - @inlinable @inline(__always) - public static func debugIdentity( - _ input: Tensor, - deviceName: String, - tensorName: String, - debugUrls: [String], - gatedGrpc: Bool = false - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.debugIdentity( - input, deviceName: deviceName, tensorName: tensorName, debugUrls: debugUrls, - gatedGrpc: gatedGrpc), to: output_device) - case .TF_EAGER: - return _RawTFEager.debugIdentity( - input, deviceName: deviceName, tensorName: tensorName, debugUrls: debugUrls, - gatedGrpc: gatedGrpc) - } - - } - - /// Debug Identity V2 Op. - /// - /// Provides an identity mapping from input to output, while writing the content of - /// the input tensor by calling DebugEventsWriter. - /// - /// The semantics of the input tensor depends on tensor_debug_mode. In typical - /// usage, the input tensor comes directly from the user computation only when - /// graph_debug_mode is FULL_TENSOR (see protobuf/debug_event.proto for a - /// list of all the possible values of graph_debug_mode). For the other debug modes, - /// the input tensor should be produced by an additional op or subgraph that - /// computes summary information about one or more tensors. - /// - /// - Parameter input: Input tensor, non-Reference type - /// - /// - Attrs: - /// - tfdbg_context_id: A tfdbg-generated ID for the context that the op belongs to, - /// e.g., a concrete compiled tf.function. - /// - op_name: Optional. Name of the op that the debug op is concerned with. - /// Used only for single-tensor trace. - /// - output_slot: Optional. Output slot index of the tensor that the debug op - /// is concerned with. Used only for single-tensor trace. - /// - tensor_debug_mode: TensorDebugMode enum value. See debug_event.proto for details. - /// - debug_urls: List of URLs to debug targets, e.g., file:///foo/tfdbg_dump. - @inlinable @inline(__always) - public static func debugIdentityV2( - _ input: Tensor, - tfdbgContextId: String, - opName: String, - outputSlot: Int64 = -1, - tensorDebugMode: Int64 = -1, - debugUrls: [String] - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.debugIdentityV2( - input, tfdbgContextId: tfdbgContextId, opName: opName, outputSlot: outputSlot, - tensorDebugMode: tensorDebugMode, debugUrls: debugUrls), to: output_device) - case .TF_EAGER: - return _RawTFEager.debugIdentityV2( - input, tfdbgContextId: tfdbgContextId, opName: opName, outputSlot: outputSlot, - tensorDebugMode: tensorDebugMode, debugUrls: debugUrls) - } - - } - - /// Debug NaN Value Counter Op. - /// - /// Counts number of NaNs in the input tensor, for debugging. - /// - /// - Parameter input: Input tensor, non-Reference type. - /// - /// - Attrs: - /// - tensor_name: Name of the input tensor. - /// - debug_urls: List of URLs to debug targets, e.g., - /// file:///foo/tfdbg_dump, grpc:://localhost:11011. - /// - gated_grpc: Whether this op will be gated. If any of the debug_urls of this - /// debug node is of the grpc:// scheme, when the value of this attribute is set - /// to True, the data will not actually be sent via the grpc stream unless this - /// debug op has been enabled at the debug_url. If all of the debug_urls of this - /// debug node are of the grpc:// scheme and the debug op is enabled at none of - /// them, the output will be an empty Tensor. - @inlinable @inline(__always) - public static func debugNanCount( - _ input: Tensor, - deviceName: String, - tensorName: String, - debugUrls: [String], - gatedGrpc: Bool = false - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.debugNanCount( - input, deviceName: deviceName, tensorName: tensorName, debugUrls: debugUrls, - gatedGrpc: gatedGrpc), to: output_device) - case .TF_EAGER: - return _RawTFEager.debugNanCount( - input, deviceName: deviceName, tensorName: tensorName, debugUrls: debugUrls, - gatedGrpc: gatedGrpc) - } - - } - - /// Debug Numeric Summary Op. - /// - /// Provide a basic summary of numeric value types, range and distribution. - /// - /// output: A double tensor of shape [14 + nDimensions], where nDimensions is the - /// the number of dimensions of the tensor's shape. The elements of output are: - /// [0]: is initialized (1.0) or not (0.0). - /// [1]: total number of elements - /// [2]: NaN element count - /// [3]: generalized -inf count: elements <= lower_bound. lower_bound is -inf by - /// default. - /// [4]: negative element count (excluding -inf), if lower_bound is the default - /// -inf. Otherwise, this is the count of elements > lower_bound and < 0. - /// [5]: zero element count - /// [6]: positive element count (excluding +inf), if upper_bound is the default - /// -inf. Otherwise, this is the count of elements < upper_bound and > 0. - /// [7]: generalized +inf count, elements >= upper_bound. upper_bound is +inf by - /// default. - /// Output elements [1:8] are all zero, if the tensor is uninitialized. - /// [8]: minimum of all non-inf and non-NaN elements. - /// If uninitialized or no such element exists: +inf. - /// [9]: maximum of all non-inf and non-NaN elements. - /// If uninitialized or no such element exists: -inf. - /// [10]: mean of all non-inf and non-NaN elements. - /// If uninitialized or no such element exists: NaN. - /// [11]: variance of all non-inf and non-NaN elements. - /// If uninitialized or no such element exists: NaN. - /// [12]: Data type of the tensor encoded as an enum integer. See the DataType - /// proto for more details. - /// [13]: Number of dimensions of the tensor (ndims). - /// [14+]: Sizes of the dimensions. - /// - /// - /// - Parameter input: Input tensor, non-Reference type. - /// - /// - Attrs: - /// - tensor_name: Name of the input tensor. - /// - debug_urls: List of URLs to debug targets, e.g., - /// file:///foo/tfdbg_dump, grpc:://localhost:11011. - /// - lower_bound: (float) The lower bound <= which values will be included in the - /// generalized -inf count. Default: -inf. - /// - upper_bound: (float) The upper bound >= which values will be included in the - /// generalized +inf count. Default: +inf. - /// - mute_if_healthy: (bool) Do not send data to the debug URLs unless at least one - /// of elements [2], [3] and [7] (i.e., the nan count and the generalized -inf and - /// inf counts) is non-zero. - /// - gated_grpc: Whether this op will be gated. If any of the debug_urls of this - /// debug node is of the grpc:// scheme, when the value of this attribute is set - /// to True, the data will not actually be sent via the grpc stream unless this - /// debug op has been enabled at the debug_url. If all of the debug_urls of this - /// debug node are of the grpc:// scheme and the debug op is enabled at none of - /// them, the output will be an empty Tensor. - @inlinable @inline(__always) - public static func debugNumericSummary( - _ input: Tensor, - deviceName: String, - tensorName: String, - debugUrls: [String], - lowerBound: Double = -Double.infinity, - upperBound: Double = Double.infinity, - muteIfHealthy: Bool = false, - gatedGrpc: Bool = false - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.debugNumericSummary( - input, deviceName: deviceName, tensorName: tensorName, debugUrls: debugUrls, - lowerBound: lowerBound, upperBound: upperBound, muteIfHealthy: muteIfHealthy, - gatedGrpc: gatedGrpc), to: output_device) - case .TF_EAGER: - return _RawTFEager.debugNumericSummary( - input, deviceName: deviceName, tensorName: tensorName, debugUrls: debugUrls, - lowerBound: lowerBound, upperBound: upperBound, muteIfHealthy: muteIfHealthy, - gatedGrpc: gatedGrpc) - } - - } - - /// - /// - Parameter input: Input tensor, to be summarized by the op. - /// - /// - Attrs: - /// - tensor_debug_mode: Tensor debug mode: the mode in which the input tensor is summarized - /// by the op. See the TensorDebugMode enum in - /// tensorflow/core/protobuf/debug_event.proto for details. - /// - /// Supported values: - /// 2 (CURT_HEALTH): Output a float32/64 tensor of shape [2]. The 1st - /// element is the tensor_id, if provided, and -1 otherwise. The 2nd - /// element is a bit which is set to 1 if the input tensor has an - /// infinity or nan value, or zero otherwise. - /// - /// 3 (CONCISE_HEALTH): Output a float32/64 tensor of shape [5]. The 1st - /// element is the tensor_id, if provided, and -1 otherwise. The - /// remaining four slots are the total number of elements, -infs, - /// +infs, and nans in the input tensor respectively. - /// - /// 4 (FULL_HEALTH): Output a float32/64 tensor of shape [11]. The 1st - /// element is the tensor_id, if provided, and -1 otherwise. The 2nd - /// element is the device_id, if provided, and -1 otherwise. The 3rd - /// element holds the datatype value of the input tensor as according - /// to the enumerated type in tensorflow/core/framework/types.proto. - /// The remaining elements hold the total number of elements, -infs, - /// +infs, nans, negative finite numbers, zeros, and positive finite - /// numbers in the input tensor respectively. - /// - /// 5 (SHAPE): Output a float32/64 tensor of shape [10]. The 1st - /// element is the tensor_id, if provided, and -1 otherwise. The 2nd - /// element holds the datatype value of the input tensor as according - /// to the enumerated type in tensorflow/core/framework/types.proto. - /// The 3rd element holds the rank of the tensor. The 4th element holds - /// the number of elements within the tensor. Finally the remaining 6 - /// elements hold the shape of the tensor. If the rank of the tensor - /// is lower than 6, the shape is right padded with zeros. If the rank - /// is greater than 6, the head of the shape is truncated. - /// - /// 6 (FULL_NUMERICS): Output a float32/64 tensor of shape [22]. The 1st - /// element is the tensor_id, if provided, and -1 otherwise. The 2nd - /// element is the device_id, if provided, and -1 otherwise. The 3rd - /// element holds the datatype value of the input tensor as according - /// to the enumerated type in tensorflow/core/framework/types.proto. - /// The 4th element holds the rank of the tensor. The 5th to 11th - /// elements hold the shape of the tensor. If the rank of the tensor - /// is lower than 6, the shape is right padded with zeros. If the rank - /// is greater than 6, the head of the shape is truncated. The 12th to - /// 18th elements hold the number of elements, -infs, +infs, nans, - /// denormal floats, negative finite numbers, zeros, and positive - /// finite numbers in the input tensor respectively. The final four - /// elements hold the min value, max value, mean, and variance of the - /// input tensor. - /// - /// 8 (REDUCE_INF_NAN_THREE_SLOTS): Output a float32/64 tensor of shape - /// [3]. The 1st element is -inf if any elements of the input tensor - /// is -inf, or zero otherwise. The 2nd element is +inf if any elements - /// of the input tensor is +inf, or zero otherwise. The 3rd element is - /// nan if any element of the input tensor is nan, or zero otherwise. - /// - tensor_id: Optional. An integer identifier for the tensor being summarized by this op. - @inlinable @inline(__always) - public static func debugNumericSummaryV2( - _ input: Tensor, - tensorDebugMode: Int64 = -1, - tensorId: Int64 = -1 - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.debugNumericSummaryV2( - input, tensorDebugMode: tensorDebugMode, tensorId: tensorId), to: output_device) - case .TF_EAGER: - return _RawTFEager.debugNumericSummaryV2( - input, tensorDebugMode: tensorDebugMode, tensorId: tensorId) - } - - } - - /// Decode and Crop a JPEG-encoded image to a uint8 tensor. - /// - /// The attr `channels` indicates the desired number of color channels for the - /// decoded image. - /// - /// Accepted values are: - /// - /// * 0: Use the number of channels in the JPEG-encoded image. - /// * 1: output a grayscale image. - /// * 3: output an RGB image. - /// - /// If needed, the JPEG-encoded image is transformed to match the requested number - /// of color channels. - /// - /// The attr `ratio` allows downscaling the image by an integer factor during - /// decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than - /// downscaling the image later. - /// - /// - /// It is equivalent to a combination of decode and crop, but much faster by only - /// decoding partial jpeg image. - /// - /// - Parameters: - /// - contents: 0-D. The JPEG-encoded image. - /// - crop_window: 1-D. The crop window: [crop_y, crop_x, crop_height, crop_width]. - /// - /// - Attrs: - /// - channels: Number of color channels for the decoded image. - /// - ratio: Downscaling ratio. - /// - fancy_upscaling: If true use a slower but nicer upscaling of the - /// chroma planes (yuv420/422 only). - /// - try_recover_truncated: If true try to recover an image from truncated input. - /// - acceptable_fraction: The minimum required fraction of lines before a truncated - /// input is accepted. - /// - dct_method: string specifying a hint about the algorithm used for - /// decompression. Defaults to "" which maps to a system-specific - /// default. Currently valid values are ["INTEGER_FAST", - /// "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal - /// jpeg library changes to a version that does not have that specific - /// option.) - /// - /// - Output image: 3-D with shape `[height, width, channels]`.. - @inlinable @inline(__always) - public static func decodeAndCropJpeg( - contents: StringTensor, - cropWindow: Tensor, - channels: Int64 = 0, - ratio: Int64 = 1, - fancyUpscaling: Bool = true, - tryRecoverTruncated: Bool = false, - acceptableFraction: Double = 1, - dctMethod: String - ) -> Tensor { - switch cropWindow.handle.backend { - case .XLA: - let output_device = cropWindow.device - let cropWindow = Tensor(copying: cropWindow, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.decodeAndCropJpeg( - contents: contents, cropWindow: cropWindow, channels: channels, ratio: ratio, - fancyUpscaling: fancyUpscaling, tryRecoverTruncated: tryRecoverTruncated, - acceptableFraction: acceptableFraction, dctMethod: dctMethod), to: output_device) - case .TF_EAGER: - return _RawTFEager.decodeAndCropJpeg( - contents: contents, cropWindow: cropWindow, channels: channels, ratio: ratio, - fancyUpscaling: fancyUpscaling, tryRecoverTruncated: tryRecoverTruncated, - acceptableFraction: acceptableFraction, dctMethod: dctMethod) - } - - } - - /// Decode web-safe base64-encoded strings. - /// - /// Input may or may not have padding at the end. See EncodeBase64 for padding. - /// Web-safe means that input must use - and _ instead of + and /. - /// - /// - Parameter input: Base64 strings to decode. - /// - /// - Output output: Decoded strings. - @inlinable @inline(__always) - public static func decodeBase64( - _ input: StringTensor - ) -> StringTensor { - _RawTFEager.decodeBase64(input) - } - - /// Decode the first frame of a BMP-encoded image to a uint8 tensor. - /// - /// The attr `channels` indicates the desired number of color channels for the - /// decoded image. - /// - /// Accepted values are: - /// - /// * 0: Use the number of channels in the BMP-encoded image. - /// * 3: output an RGB image. - /// * 4: output an RGBA image. - /// - /// - Parameter contents: 0-D. The BMP-encoded image. - /// - /// - Output image: 3-D with shape `[height, width, channels]`. RGB order - @inlinable @inline(__always) - public static func decodeBmp( - contents: StringTensor, - channels: Int64 = 0 - ) -> Tensor { - _RawTFEager.decodeBmp(contents: contents, channels: channels) - } - - /// Convert CSV records to tensors. Each column maps to one tensor. - /// - /// RFC 4180 format is expected for the CSV records. - /// (https://tools.ietf.org/html/rfc4180) - /// Note that we allow leading and trailing spaces with int or float field. - /// - /// - Parameters: - /// - records: Each string is a record/row in the csv and all records should have - /// the same format. - /// - record_defaults: One tensor per column of the input record, with either a - /// scalar default value for that column or an empty vector if the column is - /// required. - /// - /// - Attrs: - /// - field_delim: char delimiter to separate fields in a record. - /// - use_quote_delim: If false, treats double quotation marks as regular - /// characters inside of the string fields (ignoring RFC 4180, Section 2, - /// Bullet 5). - /// - na_value: Additional string to recognize as NA/NaN. - /// - /// - Output output: Each tensor will have the same shape as records. - @inlinable @inline(__always) - public static func decodeCSV( - records: StringTensor, - recordDefaults: OutType, - fieldDelim: String = ",", - useQuoteDelim: Bool = true, - naValue: String, - selectCols: [Int32] - ) -> OutType { - _RawTFEager.decodeCSV( - records: records, recordDefaults: recordDefaults, fieldDelim: fieldDelim, - useQuoteDelim: useQuoteDelim, naValue: naValue, selectCols: selectCols) - } - - /// Decompress strings. - /// - /// This op decompresses each element of the `bytes` input `Tensor`, which - /// is assumed to be compressed using the given `compression_type`. - /// - /// The `output` is a string `Tensor` of the same shape as `bytes`, - /// each element containing the decompressed data from the corresponding - /// element in `bytes`. - /// - /// - Parameter bytes: A Tensor of string which is compressed. - /// - /// - Attr compression_type: A scalar containing either (i) the empty string (no - /// compression), (ii) "ZLIB", or (iii) "GZIP". - /// - /// - Output output: A Tensor with the same shape as input `bytes`, uncompressed - /// from bytes. - @inlinable @inline(__always) - public static func decodeCompressed( - bytes: StringTensor, - compressionType: String - ) -> StringTensor { - _RawTFEager.decodeCompressed(bytes: bytes, compressionType: compressionType) - } - - /// Decode the frame(s) of a GIF-encoded image to a uint8 tensor. - /// - /// GIF images with frame or transparency compression are not supported. - /// On Linux and MacOS systems, convert animated GIFs from compressed to - /// uncompressed by running: - /// - /// convert $src.gif -coalesce $dst.gif - /// - /// This op also supports decoding JPEGs and PNGs, though it is cleaner to use - /// `tf.image.decode_image`. - /// - /// - Parameter contents: 0-D. The GIF-encoded image. - /// - /// - Output image: 4-D with shape `[num_frames, height, width, 3]`. RGB channel order. - @inlinable @inline(__always) - public static func decodeGif( - contents: StringTensor - ) -> Tensor { - _RawTFEager.decodeGif(contents: contents) - } - - /// Convert JSON-encoded Example records to binary protocol buffer strings. - /// - /// This op translates a tensor containing Example records, encoded using - /// the [standard JSON - /// mapping](https://developers.google.com/protocol-buffers/docs/proto3#json), - /// into a tensor containing the same records encoded as binary protocol - /// buffers. The resulting tensor can then be fed to any of the other - /// Example-parsing ops. - /// - /// - Parameter json_examples: Each string is a JSON object serialized according to the JSON - /// mapping of the Example proto. - /// - /// - Output binary_examples: Each string is a binary Example protocol buffer corresponding - /// to the respective element of `json_examples`. - @inlinable @inline(__always) - public static func decodeJSONExample( - jsonExamples: StringTensor - ) -> StringTensor { - _RawTFEager.decodeJSONExample(jsonExamples: jsonExamples) - } - - /// Decode a JPEG-encoded image to a uint8 tensor. - /// - /// The attr `channels` indicates the desired number of color channels for the - /// decoded image. - /// - /// Accepted values are: - /// - /// * 0: Use the number of channels in the JPEG-encoded image. - /// * 1: output a grayscale image. - /// * 3: output an RGB image. - /// - /// If needed, the JPEG-encoded image is transformed to match the requested number - /// of color channels. - /// - /// The attr `ratio` allows downscaling the image by an integer factor during - /// decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than - /// downscaling the image later. - /// - /// - /// This op also supports decoding PNGs and non-animated GIFs since the interface is - /// the same, though it is cleaner to use `tf.image.decode_image`. - /// - /// - Parameter contents: 0-D. The JPEG-encoded image. - /// - /// - Attrs: - /// - channels: Number of color channels for the decoded image. - /// - ratio: Downscaling ratio. - /// - fancy_upscaling: If true use a slower but nicer upscaling of the - /// chroma planes (yuv420/422 only). - /// - try_recover_truncated: If true try to recover an image from truncated input. - /// - acceptable_fraction: The minimum required fraction of lines before a truncated - /// input is accepted. - /// - dct_method: string specifying a hint about the algorithm used for - /// decompression. Defaults to "" which maps to a system-specific - /// default. Currently valid values are ["INTEGER_FAST", - /// "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal - /// jpeg library changes to a version that does not have that specific - /// option.) - /// - /// - Output image: 3-D with shape `[height, width, channels]`.. - @inlinable @inline(__always) - public static func decodeJpeg( - contents: StringTensor, - channels: Int64 = 0, - ratio: Int64 = 1, - fancyUpscaling: Bool = true, - tryRecoverTruncated: Bool = false, - acceptableFraction: Double = 1, - dctMethod: String - ) -> Tensor { - _RawTFEager.decodeJpeg( - contents: contents, channels: channels, ratio: ratio, fancyUpscaling: fancyUpscaling, - tryRecoverTruncated: tryRecoverTruncated, acceptableFraction: acceptableFraction, - dctMethod: dctMethod) - } - - /// Reinterpret the bytes of a string as a vector of numbers. - /// - /// - Parameters: - /// - input_bytes: Tensor of string to be decoded. - /// - fixed_length: Length in bytes for each element of the decoded output. Must be a multiple - /// of the size of the output type. - /// - /// - Attr little_endian: Whether the input `input_bytes` is in little-endian order. Ignored for - /// `out_type` values that are stored in a single byte, like `uint8` - /// - /// - Output output: A Tensor with one more dimension than the input `bytes`. The added dimension - /// will have size equal to the length of the elements of `bytes` divided by the - /// number of bytes to represent `out_type`. - @inlinable @inline(__always) - public static func decodePaddedRaw( - inputBytes: StringTensor, - fixedLength: Tensor, - littleEndian: Bool = true - ) -> Tensor { - switch fixedLength.handle.backend { - case .XLA: - let output_device = fixedLength.device - let fixedLength = Tensor(copying: fixedLength, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.decodePaddedRaw( - inputBytes: inputBytes, fixedLength: fixedLength, littleEndian: littleEndian), - to: output_device) - case .TF_EAGER: - return _RawTFEager.decodePaddedRaw( - inputBytes: inputBytes, fixedLength: fixedLength, littleEndian: littleEndian) - } - - } - - /// Decode a PNG-encoded image to a uint8 or uint16 tensor. - /// - /// The attr `channels` indicates the desired number of color channels for the - /// decoded image. - /// - /// Accepted values are: - /// - /// * 0: Use the number of channels in the PNG-encoded image. - /// * 1: output a grayscale image. - /// * 3: output an RGB image. - /// * 4: output an RGBA image. - /// - /// If needed, the PNG-encoded image is transformed to match the requested number - /// of color channels. - /// - /// This op also supports decoding JPEGs and non-animated GIFs since the interface - /// is the same, though it is cleaner to use `tf.image.decode_image`. - /// - /// - Parameter contents: 0-D. The PNG-encoded image. - /// - /// - Attr channels: Number of color channels for the decoded image. - /// - /// - Output image: 3-D with shape `[height, width, channels]`. - @inlinable @inline(__always) - public static func decodePng( - contents: StringTensor, - channels: Int64 = 0 - ) -> Tensor { - _RawTFEager.decodePng(contents: contents, channels: channels) - } - - /// The op extracts fields from a serialized protocol buffers message into tensors. - /// - /// The `decode_proto` op extracts fields from a serialized protocol buffers - /// message into tensors. The fields in `field_names` are decoded and converted - /// to the corresponding `output_types` if possible. - /// - /// A `message_type` name must be provided to give context for the field names. - /// The actual message descriptor can be looked up either in the linked-in - /// descriptor pool or a filename provided by the caller using the - /// `descriptor_source` attribute. - /// - /// Each output tensor is a dense tensor. This means that it is padded to hold - /// the largest number of repeated elements seen in the input minibatch. (The - /// shape is also padded by one to prevent zero-sized dimensions). The actual - /// repeat counts for each example in the minibatch can be found in the `sizes` - /// output. In many cases the output of `decode_proto` is fed immediately into - /// tf.squeeze if missing values are not a concern. When using tf.squeeze, always - /// pass the squeeze dimension explicitly to avoid surprises. - /// - /// For the most part, the mapping between Proto field types and TensorFlow dtypes - /// is straightforward. However, there are a few special cases: - /// - /// - A proto field that contains a submessage or group can only be converted - /// to `DT_STRING` (the serialized submessage). This is to reduce the complexity - /// of the API. The resulting string can be used as input to another instance of - /// the decode_proto op. - /// - /// - TensorFlow lacks support for unsigned integers. The ops represent uint64 - /// types as a `DT_INT64` with the same twos-complement bit pattern (the obvious - /// way). Unsigned int32 values can be represented exactly by specifying type - /// `DT_INT64`, or using twos-complement if the caller specifies `DT_INT32` in - /// the `output_types` attribute. - /// - /// Both binary and text proto serializations are supported, and can be - /// chosen using the `format` attribute. - /// - /// The `descriptor_source` attribute selects the source of protocol - /// descriptors to consult when looking up `message_type`. This may be: - /// - /// - An empty string or "local://", in which case protocol descriptors are - /// created for C++ (not Python) proto definitions linked to the binary. - /// - /// - A file, in which case protocol descriptors are created from the file, - /// which is expected to contain a `FileDescriptorSet` serialized as a string. - /// NOTE: You can build a `descriptor_source` file using the `--descriptor_set_out` - /// and `--include_imports` options to the protocol compiler `protoc`. - /// - /// - A "bytes://", in which protocol descriptors are created from ``, - /// which is expected to be a `FileDescriptorSet` serialized as a string. - /// - /// - Parameter bytes: Tensor of serialized protos with shape `batch_shape`. - /// - /// - Attrs: - /// - message_type: Name of the proto message type to decode. - /// - field_names: List of strings containing proto field names. An extension field can be decoded - /// by using its full name, e.g. EXT_PACKAGE.EXT_FIELD_NAME. - /// - output_types: List of TF types to use for the respective field in field_names. - /// - descriptor_source: Either the special value `local://` or a path to a file containing - /// a serialized `FileDescriptorSet`. - /// - message_format: Either `binary` or `text`. - /// - sanitize: Whether to sanitize the result or not. - /// - /// - Outputs: - /// - sizes: Tensor of int32 with shape `[batch_shape, len(field_names)]`. - /// Each entry is the number of values found for the corresponding field. - /// Optional fields may have 0 or 1 values. - /// - values: List of tensors containing values for the corresponding field. - /// `values[i]` has datatype `output_types[i]` - /// and shape `[batch_shape, max(sizes[...,i])]`. - @inlinable @inline(__always) - public static func decodeProtoV2( - bytes: StringTensor, - messageType: String, - fieldNames: [String], - descriptorSource: String = "local://", - messageFormat: String = "binary", - sanitize: Bool = false - ) -> (sizes: Tensor, values: OutputTypes) { - _RawTFEager.decodeProtoV2( - bytes: bytes, messageType: messageType, fieldNames: fieldNames, - descriptorSource: descriptorSource, messageFormat: messageFormat, sanitize: sanitize) - } - - /// Reinterpret the bytes of a string as a vector of numbers. - /// - /// - Parameter bytes: All the elements must have the same length. - /// - /// - Attr little_endian: Whether the input `bytes` are in little-endian order. - /// Ignored for `out_type` values that are stored in a single byte like - /// `uint8`. - /// - /// - Output output: A Tensor with one more dimension than the input `bytes`. The - /// added dimension will have size equal to the length of the elements - /// of `bytes` divided by the number of bytes to represent `out_type`. - @inlinable @inline(__always) - public static func decodeRaw( - bytes: StringTensor, - littleEndian: Bool = true - ) -> Tensor { - _RawTFEager.decodeRaw(bytes: bytes, littleEndian: littleEndian) - } - - /// Decode a 16-bit PCM WAV file to a float tensor. - /// - /// The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float. - /// - /// When desired_channels is set, if the input contains fewer channels than this - /// then the last channel will be duplicated to give the requested number, else if - /// the input has more channels than requested then the additional channels will be - /// ignored. - /// - /// If desired_samples is set, then the audio will be cropped or padded with zeroes - /// to the requested length. - /// - /// The first output contains a Tensor with the content of the audio samples. The - /// lowest dimension will be the number of channels, and the second will be the - /// number of samples. For example, a ten-sample-long stereo WAV file should give an - /// output shape of [10, 2]. - /// - /// - Parameter contents: The WAV-encoded audio, usually from a file. - /// - /// - Attrs: - /// - desired_channels: Number of sample channels wanted. - /// - desired_samples: Length of audio requested. - /// - /// - Outputs: - /// - audio: 2-D with shape `[length, channels]`. - /// - sample_rate: Scalar holding the sample rate found in the WAV header. - @inlinable @inline(__always) - public static func decodeWav( - contents: StringTensor, - desiredChannels: Int64 = -1, - desiredSamples: Int64 = -1 - ) -> (audio: Tensor, sampleRate: Tensor) { - _RawTFEager.decodeWav( - contents: contents, desiredChannels: desiredChannels, desiredSamples: desiredSamples) - } - - /// Makes a copy of `x`. - /// - /// - Parameter x: The source tensor of type `T`. - /// - /// - Output y: y: A `Tensor` of type `T`. A copy of `x`. Guaranteed that `y` - /// is not an alias of `x`. - @inlinable @inline(__always) - public static func deepCopy( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - let output_device = x.device - let x = Tensor(copying: x, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.deepCopy(x), to: output_device) - case .TF_EAGER: - return _RawTFEager.deepCopy(x) - } - - } - - /// A container for an iterator resource. - /// - /// - Parameters: - /// - handle: A handle to the iterator to delete. - /// - deleter: A variant deleter. - @inlinable @inline(__always) - public static func deleteIterator( - handle: ResourceHandle, - deleter: VariantHandle - ) { - _RawTFEager.deleteIterator(handle: handle, deleter: deleter) + } + + /// Generates labels for candidate sampling with a learned unigram distribution. + /// + /// See explanations of candidate sampling and the data formats at + /// go/candidate-sampling. + /// + /// For each batch, this op picks a single set of sampled candidate labels. + /// + /// The advantages of sampling candidates per-batch are simplicity and the + /// possibility of efficient dense matrix multiplication. The disadvantage is that + /// the sampled candidates must be chosen independently of the context and of the + /// true labels. + /// + /// - Parameter true_classes: A batch_size * num_true matrix, in which each row contains the + /// IDs of the num_true target_classes in the corresponding original label. + /// + /// - Attrs: + /// - num_true: Number of true labels per context. + /// - num_sampled: Number of candidates to produce. + /// - unique: If unique is true, we sample with rejection, so that all sampled + /// candidates in a batch are unique. This requires some approximation to + /// estimate the post-rejection sampling probabilities. + /// - seed: If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// - seed2: An second seed to avoid seed collision. + /// + /// - Outputs: + /// - sampled_candidates: A vector of length num_sampled, in which each element is + /// the ID of a sampled candidate. + /// - true_expected_count: A batch_size * num_true matrix, representing + /// the number of times each candidate is expected to occur in a batch + /// of sampled candidates. If unique=true, then this is a probability. + /// - sampled_expected_count: A vector of length num_sampled, for each sampled + /// candidate representing the number of times the candidate is expected + /// to occur in a batch of sampled candidates. If unique=true, then this is a + /// probability. + @inlinable @inline(__always) + public static func allCandidateSampler( + trueClasses: Tensor, + numTrue: Int64, + numSampled: Int64, + unique: Bool, + seed: Int64 = 0, + seed2: Int64 = 0 + ) -> ( + sampledCandidates: Tensor, trueExpectedCount: Tensor, + sampledExpectedCount: Tensor + ) { + _RawTFEager.allCandidateSampler( + trueClasses: trueClasses, numTrue: numTrue, numSampled: numSampled, unique: unique, + seed: seed, seed2: seed2) + } + + /// An Op to exchange data across TPU replicas. + /// + /// On each replica, the input is split into `split_count` blocks along + /// `split_dimension` and send to the other replicas given group_assignment. After + /// receiving `split_count` - 1 blocks from other replicas, we concatenate the + /// blocks along `concat_dimension` as the output. + /// + /// For example, suppose there are 2 TPU replicas: + /// replica 0 receives input: `[[A, B]]` + /// replica 1 receives input: `[[C, D]]` + /// + /// group_assignment=`[[0, 1]]` + /// concat_dimension=0 + /// split_dimension=1 + /// split_count=2 + /// + /// replica 0's output: `[[A], [C]]` + /// replica 1's output: `[[B], [D]]` + /// + /// - Parameters: + /// - input: The local input to the sum. + /// - group_assignment: An int32 tensor with shape + /// [num_groups, num_replicas_per_group]. `group_assignment[i]` represents the + /// replica ids in the ith subgroup. + /// + /// - Attrs: + /// - T: The type of elements to be exchanged. + /// - concat_dimension: The dimension number to concatenate. + /// - split_dimension: The dimension number to split. + /// - split_count: The number of splits, this number must equal to the sub-group + /// size(group_assignment.get_shape()[1]) + /// + /// - Output output: The exchanged result. + @inlinable @inline(__always) + public static func allToAll( + _ input: Tensor, + groupAssignment: Tensor, + concatDimension: Int64, + splitDimension: Int64, + splitCount: Int64 + ) -> Tensor { + switch commonBackend(input.handle.backend, groupAssignment.handle.backend) { + case .XLA: + let output_device = groupAssignment.device + let input = Tensor(copying: input, to: .defaultTFEager) + let groupAssignment = Tensor(copying: groupAssignment, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.allToAll( + input, groupAssignment: groupAssignment, concatDimension: concatDimension, + splitDimension: splitDimension, splitCount: splitCount), to: output_device) + case .TF_EAGER: + return _RawTFEager.allToAll( + input, groupAssignment: groupAssignment, concatDimension: concatDimension, + splitDimension: splitDimension, splitCount: splitCount) } - @inlinable @inline(__always) - public static func deleteMemoryCache( - handle: ResourceHandle, - deleter: VariantHandle - ) { - _RawTFEager.deleteMemoryCache(handle: handle, deleter: deleter) - } - - /// A container for an iterator resource. - /// - /// - Parameters: - /// - multi_device_iterator: A handle to the multi device iterator to delete. - /// - iterators: A list of iterator handles (unused). This is added so that automatic control dependencies get added during function tracing that ensure this op runs after all the dependent iterators are deleted. - /// - deleter: A variant deleter. - @inlinable @inline(__always) - public static func deleteMultiDeviceIterator( - multiDeviceIterator: ResourceHandle, - iterators: [ResourceHandle], - deleter: VariantHandle - ) { - _RawTFEager.deleteMultiDeviceIterator( - multiDeviceIterator: multiDeviceIterator, iterators: iterators, deleter: deleter) + } + + /// Returns the argument of a complex number. + /// + /// Given a tensor `input` of complex numbers, this operation returns a tensor of + /// type `float` that is the argument of each element in `input`. All elements in + /// `input` must be complex numbers of the form \\(a + bj\\), where *a* + /// is the real part and *b* is the imaginary part. + /// + /// The argument returned by this operation is of the form \\(atan2(b, a)\\). + /// + /// For example: + /// + /// ``` + /// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + /// tf.angle(input) ==> [2.0132, 1.056] + /// ``` + /// + /// @compatibility(numpy) + /// Equivalent to np.angle. + /// @end_compatibility + @inlinable @inline(__always) + public static func angle< + T: TensorFlowScalar, + Tout: FloatingPoint & TensorFlowScalar + >( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.angle(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.angle(input) } - @inlinable @inline(__always) - public static func deleteRandomSeedGenerator( - handle: ResourceHandle, - deleter: VariantHandle - ) { - _RawTFEager.deleteRandomSeedGenerator(handle: handle, deleter: deleter) + } + + /// A container for an iterator resource. + /// + /// - Output handle: A handle to the iterator that can be passed to a "MakeIterator" or + /// "IteratorGetNext" op. In contrast to Iterator, AnonymousIterator prevents + /// resource sharing by name, and does not keep a reference to the resource + /// container. + @inlinable @inline(__always) + public static func anonymousIterator( + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> ResourceHandle { + _RawTFEager.anonymousIterator(outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// A container for an iterator resource. + /// + /// - Outputs: + /// - handle: A handle to the iterator that can be passed to a "MakeIterator" or + /// "IteratorGetNext" op. In contrast to Iterator, AnonymousIterator prevents + /// resource sharing by name, and does not keep a reference to the resource + /// container. + /// - deleter: A variant deleter that should be passed into the op that deletes the iterator. + @inlinable @inline(__always) + public static func anonymousIteratorV2( + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> (handle: ResourceHandle, deleter: VariantHandle) { + _RawTFEager.anonymousIteratorV2(outputTypes: outputTypes, outputShapes: outputShapes) + } + + @inlinable @inline(__always) + public static func anonymousMemoryCache() -> (handle: ResourceHandle, deleter: VariantHandle) { + _RawTFEager.anonymousMemoryCache() + } + + /// A container for a multi device iterator resource. + /// + /// - Outputs: + /// - handle: A handle to a multi device iterator that can be passed to a + /// "MultiDeviceIteratorGetNextFromShard" op. In contrast to MultiDeviceIterator, + /// AnonymousIterator prevents resource sharing by name, and does not keep a + /// reference to the resource container. + /// - deleter: A variant deleter that should be passed into the op that deletes the iterator. + @inlinable @inline(__always) + public static func anonymousMultiDeviceIterator( + devices: [String], + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> (handle: ResourceHandle, deleter: VariantHandle) { + _RawTFEager.anonymousMultiDeviceIterator( + devices: devices, outputTypes: outputTypes, outputShapes: outputShapes) + } + + @inlinable @inline(__always) + public static func anonymousRandomSeedGenerator( + seed: Tensor, + seed2: Tensor + ) -> (handle: ResourceHandle, deleter: VariantHandle) { + _RawTFEager.anonymousRandomSeedGenerator(seed: seed, seed2: seed2) + } + + /// Computes the "logical or" of elements across dimensions of a tensor. + /// + /// Reduces `input` along the dimensions given in `axis`. Unless + /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + /// `axis`. If `keep_dims` is true, the reduced dimensions are + /// retained with length 1. + /// + /// - Parameters: + /// - input: The tensor to reduce. + /// - reduction_indices: The dimensions to reduce. Must be in the range + /// `[-rank(input), rank(input))`. + /// + /// - Attr keep_dims: If true, retain reduced dimensions with length 1. + /// + /// - Output output: The reduced tensor. + @inlinable @inline(__always) + public static func any( + _ input: Tensor, + reductionIndices: Tensor, + keepDims: Bool = false + ) -> Tensor { + switch commonBackend(input.handle.backend, reductionIndices.handle.backend) { + case .XLA: + return _RawXLA.any(input, reductionIndices: reductionIndices, keepDims: keepDims) + case .TF_EAGER: + return _RawTFEager.any(input, reductionIndices: reductionIndices, keepDims: keepDims) } - /// Delete the tensor specified by its handle in the session. - /// - /// - Parameter handle: The handle for a tensor stored in the session state. - @inlinable @inline(__always) - public static func deleteSessionTensor( - handle: StringTensor - ) { - _RawTFEager.deleteSessionTensor(handle: handle) - } - - /// Converts a dense tensor to a (possibly batched) CSRSparseMatrix. - /// - /// - Parameters: - /// - dense_input: A Dense tensor. - /// - indices: Indices of nonzero elements. - /// - /// - Output sparse_output: A (possibly batched) CSRSparseMatrix. - @inlinable @inline(__always) - public static func denseToCSRSparseMatrix( - denseInput: Tensor, - indices: Tensor - ) -> VariantHandle { - _RawTFEager.denseToCSRSparseMatrix(denseInput: denseInput, indices: indices) - } - - /// Applies set operation along last dimension of 2 `Tensor` inputs. - /// - /// See SetOperationOp::SetOperationFromContext for values of `set_operation`. - /// - /// Output `result` is a `SparseTensor` represented by `result_indices`, - /// `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this - /// has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` - /// dimension contains the result of `set_operation` applied to the corresponding - /// `[0...n-1]` dimension of `set`. - /// - /// - Parameters: - /// - set1: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`. - /// Dimension `n` contains values in a set, duplicates are allowed but ignored. - /// - set2: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`. - /// Dimension `n` contains values in a set, duplicates are allowed but ignored. - /// - /// - Outputs: - /// - result_indices: 2D indices of a `SparseTensor`. - /// - result_values: 1D values of a `SparseTensor`. - /// - result_shape: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is - /// the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]` - /// is the max result set size across all `0...n-1` dimensions. - @inlinable @inline(__always) - public static func denseToDenseSetOperation( - set1: Tensor, - set2: Tensor, - setOperation: String, - validateIndices: Bool = true - ) -> (resultIndices: Tensor, resultValues: Tensor, resultShape: Tensor) { - _RawTFEager.denseToDenseSetOperation( - set1: set1, set2: set2, setOperation: setOperation, validateIndices: validateIndices) - } - - /// Applies set operation along last dimension of 2 `Tensor` inputs. - /// - /// See SetOperationOp::SetOperationFromContext for values of `set_operation`. - /// - /// Output `result` is a `SparseTensor` represented by `result_indices`, - /// `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this - /// has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` - /// dimension contains the result of `set_operation` applied to the corresponding - /// `[0...n-1]` dimension of `set`. - /// - /// - Parameters: - /// - set1: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`. - /// Dimension `n` contains values in a set, duplicates are allowed but ignored. - /// - set2: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`. - /// Dimension `n` contains values in a set, duplicates are allowed but ignored. - /// - /// - Outputs: - /// - result_indices: 2D indices of a `SparseTensor`. - /// - result_values: 1D values of a `SparseTensor`. - /// - result_shape: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is - /// the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]` - /// is the max result set size across all `0...n-1` dimensions. - @inlinable @inline(__always) - public static func denseToDenseSetOperation( - set1: StringTensor, - set2: StringTensor, - setOperation: String, - validateIndices: Bool = true - ) -> (resultIndices: Tensor, resultValues: StringTensor, resultShape: Tensor) { - _RawTFEager.denseToDenseSetOperation( - set1: set1, set2: set2, setOperation: setOperation, validateIndices: validateIndices) - } - - /// Creates a dataset that batches input elements into a SparseTensor. - /// - /// - Parameters: - /// - input_dataset: A handle to an input dataset. Must have a single component. - /// - batch_size: A scalar representing the number of elements to accumulate in a - /// batch. - /// - row_shape: A vector representing the dense shape of each row in the produced - /// SparseTensor. The shape may be partially specified, using `-1` to indicate - /// that a particular dimension should use the maximum size of all batch elements. - @inlinable @inline(__always) - public static func denseToSparseBatchDataset( - inputDataset: VariantHandle, - batchSize: Tensor, - rowShape: Tensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.denseToSparseBatchDataset( - inputDataset: inputDataset, batchSize: batchSize, rowShape: rowShape, - outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Applies set operation along last dimension of `Tensor` and `SparseTensor`. - /// - /// See SetOperationOp::SetOperationFromContext for values of `set_operation`. - /// - /// Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`, - /// and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same - /// as `set1`. Dimension `n` contains values in a set, duplicates are allowed but - /// ignored. - /// - /// If `validate_indices` is `True`, this op validates the order and range of `set2` - /// indices. - /// - /// Output `result` is a `SparseTensor` represented by `result_indices`, - /// `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this - /// has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` - /// dimension contains the result of `set_operation` applied to the corresponding - /// `[0...n-1]` dimension of `set`. - /// - /// - Parameters: - /// - set1: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`. - /// Dimension `n` contains values in a set, duplicates are allowed but ignored. - /// - set2_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major - /// order. - /// - set2_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major - /// order. - /// - set2_shape: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must - /// be the same as the 1st `n-1` dimensions of `set1`, `result_shape[n]` is the - /// max set size across `n-1` dimensions. - /// - /// - Outputs: - /// - result_indices: 2D indices of a `SparseTensor`. - /// - result_values: 1D values of a `SparseTensor`. - /// - result_shape: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is - /// the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]` - /// is the max result set size across all `0...n-1` dimensions. - @inlinable @inline(__always) - public static func denseToSparseSetOperation( - set1: Tensor, - set2Indices: Tensor, - set2Values: Tensor, - set2Shape: Tensor, - setOperation: String, - validateIndices: Bool = true - ) -> (resultIndices: Tensor, resultValues: Tensor, resultShape: Tensor) { - _RawTFEager.denseToSparseSetOperation( - set1: set1, set2Indices: set2Indices, set2Values: set2Values, set2Shape: set2Shape, - setOperation: setOperation, validateIndices: validateIndices) - } - - /// Applies set operation along last dimension of `Tensor` and `SparseTensor`. - /// - /// See SetOperationOp::SetOperationFromContext for values of `set_operation`. - /// - /// Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`, - /// and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same - /// as `set1`. Dimension `n` contains values in a set, duplicates are allowed but - /// ignored. - /// - /// If `validate_indices` is `True`, this op validates the order and range of `set2` - /// indices. - /// - /// Output `result` is a `SparseTensor` represented by `result_indices`, - /// `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this - /// has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` - /// dimension contains the result of `set_operation` applied to the corresponding - /// `[0...n-1]` dimension of `set`. - /// - /// - Parameters: - /// - set1: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`. - /// Dimension `n` contains values in a set, duplicates are allowed but ignored. - /// - set2_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major - /// order. - /// - set2_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major - /// order. - /// - set2_shape: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must - /// be the same as the 1st `n-1` dimensions of `set1`, `result_shape[n]` is the - /// max set size across `n-1` dimensions. - /// - /// - Outputs: - /// - result_indices: 2D indices of a `SparseTensor`. - /// - result_values: 1D values of a `SparseTensor`. - /// - result_shape: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is - /// the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]` - /// is the max result set size across all `0...n-1` dimensions. - @inlinable @inline(__always) - public static func denseToSparseSetOperation( - set1: StringTensor, - set2Indices: Tensor, - set2Values: StringTensor, - set2Shape: Tensor, - setOperation: String, - validateIndices: Bool = true - ) -> (resultIndices: Tensor, resultValues: StringTensor, resultShape: Tensor) { - _RawTFEager.denseToSparseSetOperation( - set1: set1, set2Indices: set2Indices, set2Values: set2Values, set2Shape: set2Shape, - setOperation: setOperation, validateIndices: validateIndices) - } - - /// DepthToSpace for tensors of type T. - /// - /// Rearranges data from depth into blocks of spatial data. - /// This is the reverse transformation of SpaceToDepth. More specifically, - /// this op outputs a copy of the input tensor where values from the `depth` - /// dimension are moved in spatial blocks to the `height` and `width` dimensions. - /// The attr `block_size` indicates the input block size and how the data is moved. - /// - /// * Chunks of data of size `block_size * block_size` from depth are rearranged - /// into non-overlapping blocks of size `block_size x block_size` - /// * The width the output tensor is `input_depth * block_size`, whereas the - /// height is `input_height * block_size`. - /// * The Y, X coordinates within each block of the output image are determined - /// by the high order component of the input channel index. - /// * The depth of the input tensor must be divisible by - /// `block_size * block_size`. - /// - /// The `data_format` attr specifies the layout of the input and output tensors - /// with the following options: - /// "NHWC": `[ batch, height, width, channels ]` - /// "NCHW": `[ batch, channels, height, width ]` - /// "NCHW_VECT_C": - /// `qint8 [ batch, channels / 4, height, width, 4 ]` - /// - /// It is useful to consider the operation as transforming a 6-D Tensor. - /// e.g. for data_format = NHWC, - /// Each element in the input tensor can be specified via 6 coordinates, - /// ordered by decreasing memory layout significance as: - /// n,iY,iX,bY,bX,oC (where n=batch index, iX, iY means X or Y coordinates - /// within the input image, bX, bY means coordinates - /// within the output block, oC means output channels). - /// The output would be the input transposed to the following layout: - /// n,iY,bY,iX,bX,oC - /// - /// This operation is useful for resizing the activations between convolutions - /// (but keeping all data), e.g. instead of pooling. It is also useful for training - /// purely convolutional models. - /// - /// For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" and - /// block_size = 2: - /// - /// ``` - /// x = [[[[1, 2, 3, 4]]]] - /// - /// ``` - /// - /// This operation will output a tensor of shape `[1, 2, 2, 1]`: - /// - /// ``` - /// [[[[1], [2]], - /// [[3], [4]]]] - /// ``` - /// - /// Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`, - /// the corresponding output will have 2x2 elements and will have a depth of - /// 1 channel (1 = `4 / (block_size * block_size)`). - /// The output element shape is `[2, 2, 1]`. - /// - /// For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g. - /// - /// ``` - /// x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] - /// ``` - /// - /// This operation, for block size of 2, will return the following tensor of shape - /// `[1, 2, 2, 3]` - /// - /// ``` - /// [[[[1, 2, 3], [4, 5, 6]], - /// [[7, 8, 9], [10, 11, 12]]]] - /// - /// ``` - /// - /// Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2: - /// - /// ``` - /// x = [[[[1, 2, 3, 4], - /// [5, 6, 7, 8]], - /// [[9, 10, 11, 12], - /// [13, 14, 15, 16]]]] - /// ``` - /// - /// the operator will return the following tensor of shape `[1 4 4 1]`: - /// - /// ``` - /// x = [[[ [1], [2], [5], [6]], - /// [ [3], [4], [7], [8]], - /// [ [9], [10], [13], [14]], - /// [ [11], [12], [15], [16]]]] - /// - /// ``` - /// - /// - Attr block_size: The size of the spatial block, same as in Space2Depth. - @inlinable @inline(__always) - public static func depthToSpace( - _ input: Tensor, - blockSize: Int64, - dataFormat: DataFormat2 = .nhwc - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.depthToSpace(input, blockSize: blockSize, dataFormat: dataFormat), - to: output_device) - case .TF_EAGER: - return _RawTFEager.depthToSpace(input, blockSize: blockSize, dataFormat: dataFormat) - } - - } - - /// Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors. - /// - /// Given an input tensor of shape `[batch, in_height, in_width, in_channels]` - /// and a filter / kernel tensor of shape - /// `[filter_height, filter_width, in_channels, channel_multiplier]`, containing - /// `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies - /// a different filter to each input channel (expanding from 1 channel to - /// `channel_multiplier` channels for each), then concatenates the results - /// together. Thus, the output has `in_channels * channel_multiplier` channels. - /// - /// ``` - /// for k in 0..in_channels-1 - /// for q in 0..channel_multiplier-1 - /// output[b, i, j, k * channel_multiplier + q] = - /// sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] * - /// filter[di, dj, k, q] - /// ``` - /// - /// Must have `strides[0] = strides[3] = 1`. For the most common case of the same - /// horizontal and vertices strides, `strides = [1, stride, stride, 1]`. - /// - /// - Attrs: - /// - strides: 1-D of length 4. The stride of the sliding window for each dimension - /// of `input`. - /// - padding: The type of padding algorithm to use. - /// - data_format: Specify the data format of the input and output data. With the - /// default format "NHWC", the data is stored in the order of: - /// [batch, height, width, channels]. - /// Alternatively, the format could be "NCHW", the data storage order of: - /// [batch, channels, height, width]. - /// - dilations: 1-D tensor of length 4. The dilation factor for each dimension of - /// `input`. If set to k > 1, there will be k-1 skipped cells between each filter - /// element on that dimension. The dimension order is determined by the value of - /// `data_format`, see above for details. Dilations in the batch and depth - /// dimensions must be 1. - @inlinable @inline(__always) - public static func depthwiseConv2dNative( - _ input: Tensor, - filter: Tensor, - strides: [Int32], - padding: Padding, - dataFormat: DataFormat = .nhwc, - dilations: [Int32] = [1, 1, 1, 1] - ) -> Tensor { - switch commonBackend(input.handle.backend, filter.handle.backend) { - case .XLA: - return _RawXLA.depthwiseConv2dNative( - input, filter: filter, strides: strides, padding: padding, dataFormat: dataFormat, - dilations: dilations) - case .TF_EAGER: - return _RawTFEager.depthwiseConv2dNative( - input, filter: filter, strides: strides, padding: padding, dataFormat: dataFormat, - dilations: dilations) - } - - } - - /// Computes the gradients of depthwise convolution with respect to the filter. - /// - /// - Parameters: - /// - input: 4-D with shape based on `data_format`. For example, if - /// `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height, - /// in_width, in_channels]` tensor. - /// - filter_sizes: An integer vector representing the tensor shape of `filter`, - /// where `filter` is a 4-D - /// `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor. - /// - out_backprop: 4-D with shape based on `data_format`. - /// For example, if `data_format` is 'NHWC' then - /// out_backprop shape is `[batch, out_height, out_width, out_channels]`. - /// Gradients w.r.t. the output of the convolution. - /// - /// - Attrs: - /// - strides: The stride of the sliding window for each dimension of the input - /// of the convolution. - /// - padding: The type of padding algorithm to use. - /// - data_format: Specify the data format of the input and output data. With the - /// default format "NHWC", the data is stored in the order of: - /// [batch, height, width, channels]. - /// Alternatively, the format could be "NCHW", the data storage order of: - /// [batch, channels, height, width]. - /// - dilations: 1-D tensor of length 4. The dilation factor for each dimension of - /// `input`. If set to k > 1, there will be k-1 skipped cells between each filter - /// element on that dimension. The dimension order is determined by the value of - /// `data_format`, see above for details. Dilations in the batch and depth - /// dimensions must be 1. - /// - /// - Output output: 4-D with shape - /// `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t. - /// the `filter` input of the convolution. - @inlinable @inline(__always) - public static func depthwiseConv2dNativeBackpropFilter( - _ input: Tensor, - filterSizes: Tensor, - outBackprop: Tensor, - strides: [Int32], - padding: Padding, - dataFormat: DataFormat = .nhwc, - dilations: [Int32] = [1, 1, 1, 1] - ) -> Tensor { - switch commonBackend( - commonBackend(input.handle.backend, filterSizes.handle.backend), outBackprop.handle.backend) - { - case .XLA: - return _RawXLA.depthwiseConv2dNativeBackpropFilter( - input, filterSizes: filterSizes, outBackprop: outBackprop, strides: strides, - padding: padding, dataFormat: dataFormat, dilations: dilations) - case .TF_EAGER: - return _RawTFEager.depthwiseConv2dNativeBackpropFilter( - input, filterSizes: filterSizes, outBackprop: outBackprop, strides: strides, - padding: padding, dataFormat: dataFormat, dilations: dilations) - } - - } - - /// Computes the gradients of depthwise convolution with respect to the input. - /// - /// - Parameters: - /// - input_sizes: An integer vector representing the shape of `input`, based - /// on `data_format`. For example, if `data_format` is 'NHWC' then - /// `input` is a 4-D `[batch, height, width, channels]` tensor. - /// - filter: 4-D with shape - /// `[filter_height, filter_width, in_channels, depthwise_multiplier]`. - /// - out_backprop: 4-D with shape based on `data_format`. - /// For example, if `data_format` is 'NHWC' then - /// out_backprop shape is `[batch, out_height, out_width, out_channels]`. - /// Gradients w.r.t. the output of the convolution. - /// - /// - Attrs: - /// - strides: The stride of the sliding window for each dimension of the input - /// of the convolution. - /// - padding: The type of padding algorithm to use. - /// - data_format: Specify the data format of the input and output data. With the - /// default format "NHWC", the data is stored in the order of: - /// [batch, height, width, channels]. - /// Alternatively, the format could be "NCHW", the data storage order of: - /// [batch, channels, height, width]. - /// - dilations: 1-D tensor of length 4. The dilation factor for each dimension of - /// `input`. If set to k > 1, there will be k-1 skipped cells between each filter - /// element on that dimension. The dimension order is determined by the value of - /// `data_format`, see above for details. Dilations in the batch and depth - /// dimensions must be 1. - /// - /// - Output output: 4-D with shape according to `data_format`. For example, if - /// `data_format` is 'NHWC', output shape is `[batch, in_height, - /// in_width, in_channels]`. Gradient w.r.t. the input of the - /// convolution. - @inlinable @inline(__always) - public static func depthwiseConv2dNativeBackpropInput( - inputSizes: Tensor, - filter: Tensor, - outBackprop: Tensor, - strides: [Int32], - padding: Padding, - dataFormat: DataFormat = .nhwc, - dilations: [Int32] = [1, 1, 1, 1] - ) -> Tensor { - switch commonBackend( - commonBackend(inputSizes.handle.backend, filter.handle.backend), outBackprop.handle.backend) - { - case .XLA: - return _RawXLA.depthwiseConv2dNativeBackpropInput( - inputSizes: inputSizes, filter: filter, outBackprop: outBackprop, strides: strides, - padding: padding, dataFormat: dataFormat, dilations: dilations) - case .TF_EAGER: - return _RawTFEager.depthwiseConv2dNativeBackpropInput( - inputSizes: inputSizes, filter: filter, outBackprop: outBackprop, strides: strides, - padding: padding, dataFormat: dataFormat, dilations: dilations) - } - - } - - /// - /// - Parameters: - /// - min_range: The minimum scalar value possibly produced for the input. - /// - max_range: The maximum scalar value possibly produced for the input. - @inlinable @inline(__always) - public static func dequantize( - _ input: Tensor, - minRange: Tensor, - maxRange: Tensor, - mode: Mode = .minCombined, - narrowRange: Bool = false, - axis: Int64 = -1 - ) -> Tensor { - switch commonBackend( - commonBackend(input.handle.backend, minRange.handle.backend), maxRange.handle.backend) - { - case .XLA: - let output_device = maxRange.device - let input = Tensor(copying: input, to: .defaultTFEager) - let minRange = Tensor(copying: minRange, to: .defaultTFEager) - let maxRange = Tensor(copying: maxRange, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.dequantize( - input, minRange: minRange, maxRange: maxRange, mode: mode, narrowRange: narrowRange, - axis: axis), to: output_device) - case .TF_EAGER: - return _RawTFEager.dequantize( - input, minRange: minRange, maxRange: maxRange, mode: mode, narrowRange: narrowRange, - axis: axis) - } + } + /// Returns the truth value of abs(x-y) < tolerance element-wise. + @inlinable @inline(__always) + public static func approximateEqual( + _ x: Tensor, + _ y: Tensor, + tolerance: Double = 1e-05 + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + return _RawXLA.approximateEqual(x, y, tolerance: tolerance) + case .TF_EAGER: + return _RawTFEager.approximateEqual(x, y, tolerance: tolerance) } - /// Converts the given variant tensor to an iterator and stores it in the given resource. - /// - /// - Parameters: - /// - resource_handle: A handle to an iterator resource. - /// - serialized: A variant tensor storing the state of the iterator contained in the - /// resource. - @inlinable @inline(__always) - public static func deserializeIterator( - resourceHandle: ResourceHandle, - serialized: VariantHandle - ) { - _RawTFEager.deserializeIterator(resourceHandle: resourceHandle, serialized: serialized) - } - - /// Deserialize and concatenate `SparseTensors` from a serialized minibatch. - /// - /// The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where - /// `N` is the minibatch size and the rows correspond to packed outputs of - /// `SerializeSparse`. The ranks of the original `SparseTensor` objects - /// must all match. When the final `SparseTensor` is created, it has rank one - /// higher than the ranks of the incoming `SparseTensor` objects - /// (they have been concatenated along a new row dimension). - /// - /// The output `SparseTensor` object's shape values for all dimensions but the - /// first are the max across the input `SparseTensor` objects' shape values - /// for the corresponding dimensions. Its first shape value is `N`, the minibatch - /// size. - /// - /// The input `SparseTensor` objects' indices are assumed ordered in - /// standard lexicographic order. If this is not the case, after this - /// step run `SparseReorder` to restore index ordering. - /// - /// For example, if the serialized input is a `[2 x 3]` matrix representing two - /// original `SparseTensor` objects: - /// - /// index = [ 0] - /// [10] - /// [20] - /// values = [1, 2, 3] - /// shape = [50] - /// - /// and - /// - /// index = [ 2] - /// [10] - /// values = [4, 5] - /// shape = [30] - /// - /// then the final deserialized `SparseTensor` will be: - /// - /// index = [0 0] - /// [0 10] - /// [0 20] - /// [1 2] - /// [1 10] - /// values = [1, 2, 3, 4, 5] - /// shape = [2 50] - /// - /// - Parameter serialized_sparse: 2-D, The `N` serialized `SparseTensor` objects. - /// Must have 3 columns. - /// - /// - Attr dtype: The `dtype` of the serialized `SparseTensor` objects. - @inlinable @inline(__always) - public static func deserializeManySparse( - serializedSparse: StringTensor - ) -> (sparseIndices: Tensor, sparseValues: Tensor, sparseShape: Tensor) { - _RawTFEager.deserializeManySparse(serializedSparse: serializedSparse) - } - - /// Deserialize `SparseTensor` objects. - /// - /// The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where - /// the last dimension stores serialized `SparseTensor` objects and the other N - /// dimensions (N >= 0) correspond to a batch. The ranks of the original - /// `SparseTensor` objects must all match. When the final `SparseTensor` is - /// created, its rank is the rank of the incoming `SparseTensor` objects plus N; - /// the sparse tensors have been concatenated along new dimensions, one for each - /// batch. - /// - /// The output `SparseTensor` object's shape values for the original dimensions - /// are the max across the input `SparseTensor` objects' shape values for the - /// corresponding dimensions. The new dimensions match the size of the batch. - /// - /// The input `SparseTensor` objects' indices are assumed ordered in - /// standard lexicographic order. If this is not the case, after this - /// step run `SparseReorder` to restore index ordering. - /// - /// For example, if the serialized input is a `[2 x 3]` matrix representing two - /// original `SparseTensor` objects: - /// - /// index = [ 0] - /// [10] - /// [20] - /// values = [1, 2, 3] - /// shape = [50] - /// - /// and - /// - /// index = [ 2] - /// [10] - /// values = [4, 5] - /// shape = [30] - /// - /// then the final deserialized `SparseTensor` will be: - /// - /// index = [0 0] - /// [0 10] - /// [0 20] - /// [1 2] - /// [1 10] - /// values = [1, 2, 3, 4, 5] - /// shape = [2 50] - /// - /// - Parameter serialized_sparse: The serialized `SparseTensor` objects. The last dimension - /// must have 3 columns. - /// - /// - Attr dtype: The `dtype` of the serialized `SparseTensor` objects. - @inlinable @inline(__always) - public static func deserializeSparse< - Dtype: TensorFlowScalar, - Tserialized: TensorFlowScalar - >( - serializedSparse: Tensor - ) -> (sparseIndices: Tensor, sparseValues: Tensor, sparseShape: Tensor) { - _RawTFEager.deserializeSparse(serializedSparse: serializedSparse) - } - - /// Deserialize `SparseTensor` objects. - /// - /// The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where - /// the last dimension stores serialized `SparseTensor` objects and the other N - /// dimensions (N >= 0) correspond to a batch. The ranks of the original - /// `SparseTensor` objects must all match. When the final `SparseTensor` is - /// created, its rank is the rank of the incoming `SparseTensor` objects plus N; - /// the sparse tensors have been concatenated along new dimensions, one for each - /// batch. - /// - /// The output `SparseTensor` object's shape values for the original dimensions - /// are the max across the input `SparseTensor` objects' shape values for the - /// corresponding dimensions. The new dimensions match the size of the batch. - /// - /// The input `SparseTensor` objects' indices are assumed ordered in - /// standard lexicographic order. If this is not the case, after this - /// step run `SparseReorder` to restore index ordering. - /// - /// For example, if the serialized input is a `[2 x 3]` matrix representing two - /// original `SparseTensor` objects: - /// - /// index = [ 0] - /// [10] - /// [20] - /// values = [1, 2, 3] - /// shape = [50] - /// - /// and - /// - /// index = [ 2] - /// [10] - /// values = [4, 5] - /// shape = [30] - /// - /// then the final deserialized `SparseTensor` will be: - /// - /// index = [0 0] - /// [0 10] - /// [0 20] - /// [1 2] - /// [1 10] - /// values = [1, 2, 3, 4, 5] - /// shape = [2 50] - /// - /// - Parameter serialized_sparse: The serialized `SparseTensor` objects. The last dimension - /// must have 3 columns. - /// - /// - Attr dtype: The `dtype` of the serialized `SparseTensor` objects. - @inlinable @inline(__always) - public static func deserializeSparse( - serializedSparse: StringTensor - ) -> (sparseIndices: Tensor, sparseValues: Tensor, sparseShape: Tensor) { - _RawTFEager.deserializeSparse(serializedSparse: serializedSparse) - } - - /// Deletes the resource specified by the handle. - /// - /// All subsequent operations using the resource will result in a NotFound - /// error status. - /// - /// - Parameter resource: handle to the resource to delete. - /// - /// - Attr ignore_lookup_error: whether to ignore the error when the resource - /// doesn't exist. - @inlinable @inline(__always) - public static func destroyResourceOp( - resource: ResourceHandle, - ignoreLookupError: Bool = true - ) { - _RawTFEager.destroyResourceOp(resource: resource, ignoreLookupError: ignoreLookupError) - } - - @inlinable @inline(__always) - public static func devicePlacementOp() -> StringTensor { - _RawTFEager.devicePlacementOp() - } - - /// Returns a diagonal tensor with a given diagonal values. - /// - /// Given a `diagonal`, this operation returns a tensor with the `diagonal` and - /// everything else padded with zeros. The diagonal is computed as follows: - /// - /// Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of - /// rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where: - /// - /// `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else. - /// - /// For example: - /// - /// ``` - /// # 'diagonal' is [1, 2, 3, 4] - /// tf.diag(diagonal) ==> [[1, 0, 0, 0] - /// [0, 2, 0, 0] - /// [0, 0, 3, 0] - /// [0, 0, 0, 4]] - /// ``` - /// - /// - Parameter diagonal: Rank k tensor where k is at most 1. - @inlinable @inline(__always) - public static func diag( - diagonal: Tensor - ) -> Tensor { - switch diagonal.handle.backend { - case .XLA: - let output_device = diagonal.device - let diagonal = Tensor(copying: diagonal, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.diag(diagonal: diagonal), to: output_device) - case .TF_EAGER: - return _RawTFEager.diag(diagonal: diagonal) - } - - } - - /// Returns the diagonal part of the tensor. - /// - /// This operation returns a tensor with the `diagonal` part - /// of the `input`. The `diagonal` part is computed as follows: - /// - /// Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a - /// tensor of rank `k` with dimensions `[D1,..., Dk]` where: - /// - /// `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`. - /// - /// For example: - /// - /// ``` - /// # 'input' is [[1, 0, 0, 0] - /// [0, 2, 0, 0] - /// [0, 0, 3, 0] - /// [0, 0, 0, 4]] - /// - /// tf.diag_part(input) ==> [1, 2, 3, 4] - /// ``` - /// - /// - Parameter input: Rank k tensor where k is even and not zero. - /// - /// - Output diagonal: The extracted diagonal. - @inlinable @inline(__always) - public static func diagPart( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - return _RawXLA.diagPart(input) - case .TF_EAGER: - return _RawTFEager.diagPart(input) - } - - } - - /// Computes Psi, the derivative of Lgamma (the log of the absolute value of - /// - /// `Gamma(x)`), element-wise. - @inlinable @inline(__always) - public static func digamma( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - let output_device = x.device - let x = Tensor(copying: x, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.digamma(x), to: output_device) - case .TF_EAGER: - return _RawTFEager.digamma(x) - } - - } - - /// Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors. - /// - /// The `input` tensor has shape `[batch, in_height, in_width, depth]` and the - /// `filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each - /// input channel is processed independently of the others with its own structuring - /// function. The `output` tensor has shape - /// `[batch, out_height, out_width, depth]`. The spatial dimensions of the output - /// tensor depend on the `padding` algorithm. We currently only support the default - /// "NHWC" `data_format`. - /// - /// In detail, the grayscale morphological 2-D dilation is the max-sum correlation - /// (for consistency with `conv2d`, we use unmirrored filters): - /// - /// output[b, y, x, c] = - /// max_{dy, dx} input[b, - /// strides[1] * y + rates[1] * dy, - /// strides[2] * x + rates[2] * dx, - /// c] + - /// filter[dy, dx, c] - /// - /// Max-pooling is a special case when the filter has size equal to the pooling - /// kernel size and contains all zeros. - /// - /// Note on duality: The dilation of `input` by the `filter` is equal to the - /// negation of the erosion of `-input` by the reflected `filter`. - /// - /// - Parameters: - /// - input: 4-D with shape `[batch, in_height, in_width, depth]`. - /// - filter: 3-D with shape `[filter_height, filter_width, depth]`. - /// - /// - Attrs: - /// - strides: The stride of the sliding window for each dimension of the input - /// tensor. Must be: `[1, stride_height, stride_width, 1]`. - /// - rates: The input stride for atrous morphological dilation. Must be: - /// `[1, rate_height, rate_width, 1]`. - /// - padding: The type of padding algorithm to use. - /// - /// - Output output: 4-D with shape `[batch, out_height, out_width, depth]`. - @inlinable @inline(__always) - public static func dilation2D( - _ input: Tensor, - filter: Tensor, - strides: [Int32], - rates: [Int32], - padding: Padding - ) -> Tensor { - switch commonBackend(input.handle.backend, filter.handle.backend) { - case .XLA: - let output_device = filter.device - let input = Tensor(copying: input, to: .defaultTFEager) - let filter = Tensor(copying: filter, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.dilation2D( - input, filter: filter, strides: strides, rates: rates, padding: padding), - to: output_device) - case .TF_EAGER: - return _RawTFEager.dilation2D( - input, filter: filter, strides: strides, rates: rates, padding: padding) - } - - } - - /// Computes the gradient of morphological 2-D dilation with respect to the filter. - /// - /// - Parameters: - /// - input: 4-D with shape `[batch, in_height, in_width, depth]`. - /// - filter: 3-D with shape `[filter_height, filter_width, depth]`. - /// - out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`. - /// - /// - Attrs: - /// - strides: 1-D of length 4. The stride of the sliding window for each dimension of - /// the input tensor. Must be: `[1, stride_height, stride_width, 1]`. - /// - rates: 1-D of length 4. The input stride for atrous morphological dilation. - /// Must be: `[1, rate_height, rate_width, 1]`. - /// - padding: The type of padding algorithm to use. - /// - /// - Output filter_backprop: 3-D with shape `[filter_height, filter_width, depth]`. - @inlinable @inline(__always) - public static func dilation2DBackpropFilter( - _ input: Tensor, - filter: Tensor, - outBackprop: Tensor, - strides: [Int32], - rates: [Int32], - padding: Padding - ) -> Tensor { - switch commonBackend( - commonBackend(input.handle.backend, filter.handle.backend), outBackprop.handle.backend) - { - case .XLA: - let output_device = outBackprop.device - let input = Tensor(copying: input, to: .defaultTFEager) - let filter = Tensor(copying: filter, to: .defaultTFEager) - let outBackprop = Tensor(copying: outBackprop, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.dilation2DBackpropFilter( - input, filter: filter, outBackprop: outBackprop, strides: strides, rates: rates, - padding: padding), to: output_device) - case .TF_EAGER: - return _RawTFEager.dilation2DBackpropFilter( - input, filter: filter, outBackprop: outBackprop, strides: strides, rates: rates, - padding: padding) - } - - } - - /// Computes the gradient of morphological 2-D dilation with respect to the input. - /// - /// - Parameters: - /// - input: 4-D with shape `[batch, in_height, in_width, depth]`. - /// - filter: 3-D with shape `[filter_height, filter_width, depth]`. - /// - out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`. - /// - /// - Attrs: - /// - strides: 1-D of length 4. The stride of the sliding window for each dimension of - /// the input tensor. Must be: `[1, stride_height, stride_width, 1]`. - /// - rates: 1-D of length 4. The input stride for atrous morphological dilation. - /// Must be: `[1, rate_height, rate_width, 1]`. - /// - padding: The type of padding algorithm to use. - /// - /// - Output in_backprop: 4-D with shape `[batch, in_height, in_width, depth]`. - @inlinable @inline(__always) - public static func dilation2DBackpropInput( - _ input: Tensor, - filter: Tensor, - outBackprop: Tensor, - strides: [Int32], - rates: [Int32], - padding: Padding - ) -> Tensor { - switch commonBackend( - commonBackend(input.handle.backend, filter.handle.backend), outBackprop.handle.backend) - { - case .XLA: - let output_device = outBackprop.device - let input = Tensor(copying: input, to: .defaultTFEager) - let filter = Tensor(copying: filter, to: .defaultTFEager) - let outBackprop = Tensor(copying: outBackprop, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.dilation2DBackpropInput( - input, filter: filter, outBackprop: outBackprop, strides: strides, rates: rates, - padding: padding), to: output_device) - case .TF_EAGER: - return _RawTFEager.dilation2DBackpropInput( - input, filter: filter, outBackprop: outBackprop, strides: strides, rates: rates, - padding: padding) - } - - } - - /// A substitute for `InterleaveDataset` on a fixed list of `N` datasets. - /// - /// - Parameters: - /// - selector_input_dataset: A dataset of scalar `DT_INT64` elements that determines which of the - /// `N` data inputs should produce the next output element. - /// - data_input_datasets: `N` datasets with the same type that will be interleaved according to - /// the values of `selector_input_dataset`. - @inlinable @inline(__always) - public static func directedInterleaveDataset( - selectorInputDataset: VariantHandle, - dataInputDatasets: [VariantHandle], - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.directedInterleaveDataset( - selectorInputDataset: selectorInputDataset, dataInputDatasets: dataInputDatasets, - outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Returns x / y element-wise. - /// - /// *NOTE*: `Div` supports broadcasting. More about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - @inlinable @inline(__always) - public static func div( - _ x: Tensor, - _ y: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - return _RawXLA.div(x, y) - case .TF_EAGER: - return _RawTFEager.div(x, y) - } - - } - - /// Returns 0 if the denominator is zero. - /// - /// - /// *NOTE*: `DivNoNan` supports broadcasting. More about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - @inlinable @inline(__always) - public static func divNoNan( - _ x: Tensor, - _ y: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - let output_device = y.device - let x = Tensor(copying: x, to: .defaultTFEager) - let y = Tensor(copying: y, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.divNoNan(x, y), to: output_device) - case .TF_EAGER: - return _RawTFEager.divNoNan(x, y) - } - - } - - /// Draw bounding boxes on a batch of images. - /// - /// Outputs a copy of `images` but draws on top of the pixels zero or more bounding - /// boxes specified by the locations in `boxes`. The coordinates of the each - /// bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The - /// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and - /// height of the underlying image. - /// - /// For example, if an image is 100 x 200 pixels (height x width) and the bounding - /// box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of - /// the bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates). - /// - /// Parts of the bounding box may fall outside the image. - /// - /// - Parameters: - /// - images: 4-D with shape `[batch, height, width, depth]`. A batch of images. - /// - boxes: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding - /// boxes. - /// - /// - Output output: 4-D with the same shape as `images`. The batch of input images with - /// bounding boxes drawn on the images. - @inlinable @inline(__always) - public static func drawBoundingBoxes( - images: Tensor, - boxes: Tensor - ) -> Tensor { - switch commonBackend(images.handle.backend, boxes.handle.backend) { - case .XLA: - let output_device = boxes.device - let images = Tensor(copying: images, to: .defaultTFEager) - let boxes = Tensor(copying: boxes, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.drawBoundingBoxes(images: images, boxes: boxes), to: output_device) - case .TF_EAGER: - return _RawTFEager.drawBoundingBoxes(images: images, boxes: boxes) - } - - } - - /// Draw bounding boxes on a batch of images. - /// - /// Outputs a copy of `images` but draws on top of the pixels zero or more bounding - /// boxes specified by the locations in `boxes`. The coordinates of the each - /// bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The - /// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and - /// height of the underlying image. - /// - /// For example, if an image is 100 x 200 pixels (height x width) and the bounding - /// box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of - /// the bounding box will be `(40, 10)` to `(100, 50)` (in (x,y) coordinates). - /// - /// Parts of the bounding box may fall outside the image. - /// - /// - Parameters: - /// - images: 4-D with shape `[batch, height, width, depth]`. A batch of images. - /// - boxes: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding - /// boxes. - /// - colors: 2-D. A list of RGBA colors to cycle through for the boxes. - /// - /// - Output output: 4-D with the same shape as `images`. The batch of input images with - /// bounding boxes drawn on the images. - @inlinable @inline(__always) - public static func drawBoundingBoxesV2( - images: Tensor, - boxes: Tensor, - colors: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(images.handle.backend, boxes.handle.backend), colors.handle.backend) - { - case .XLA: - let output_device = colors.device - let images = Tensor(copying: images, to: .defaultTFEager) - let boxes = Tensor(copying: boxes, to: .defaultTFEager) - let colors = Tensor(copying: colors, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.drawBoundingBoxesV2(images: images, boxes: boxes, colors: colors), - to: output_device) - case .TF_EAGER: - return _RawTFEager.drawBoundingBoxesV2(images: images, boxes: boxes, colors: colors) - } - - } - - /// Partitions `data` into `num_partitions` tensors using indices from `partitions`. - /// - /// For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]` - /// becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = i` - /// are placed in `outputs[i]` in lexicographic order of `js`, and the first - /// dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`. - /// In detail, - /// - /// ```python - /// outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:] - /// - /// outputs[i] = pack([data[js, ...] for js if partitions[js] == i]) - /// ``` - /// - /// `data.shape` must start with `partitions.shape`. - /// - /// For example: - /// - /// ```python - /// # Scalar partitions. - /// partitions = 1 - /// num_partitions = 2 - /// data = [10, 20] - /// outputs[0] = [] # Empty with shape [0, 2] - /// outputs[1] = [[10, 20]] - /// - /// # Vector partitions. - /// partitions = [0, 0, 1, 1, 0] - /// num_partitions = 2 - /// data = [10, 20, 30, 40, 50] - /// outputs[0] = [10, 20, 50] - /// outputs[1] = [30, 40] - /// ``` - /// - /// See `dynamic_stitch` for an example on how to merge partitions back. - /// - ///
- /// - ///
- /// - /// - Parameter partitions: Any shape. Indices in the range `[0, num_partitions)`. - /// - /// - Attr num_partitions: The number of partitions to output. - @inlinable @inline(__always) - public static func dynamicPartition( - data: Tensor, - partitions: Tensor, - numPartitions: Int64 - ) -> [Tensor] { - switch commonBackend(data.handle.backend, partitions.handle.backend) { - case .XLA: - let output_device = partitions.device - let data = Tensor(copying: data, to: .defaultTFEager) - let partitions = Tensor(copying: partitions, to: .defaultTFEager) - return [Tensor]( - copying: _RawTFEager.dynamicPartition( - data: data, partitions: partitions, numPartitions: numPartitions), to: output_device) - case .TF_EAGER: - return _RawTFEager.dynamicPartition( - data: data, partitions: partitions, numPartitions: numPartitions) - } - - } - - /// Interleave the values from the `data` tensors into a single tensor. - /// - /// Builds a merged tensor such that - /// - /// ```python - /// merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] - /// ``` - /// - /// For example, if each `indices[m]` is scalar or vector, we have - /// - /// ```python - /// # Scalar indices: - /// merged[indices[m], ...] = data[m][...] - /// - /// # Vector indices: - /// merged[indices[m][i], ...] = data[m][i, ...] - /// ``` - /// - /// Each `data[i].shape` must start with the corresponding `indices[i].shape`, - /// and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we - /// must have `data[i].shape = indices[i].shape + constant`. In terms of this - /// `constant`, the output shape is - /// - /// merged.shape = [max(indices)] + constant - /// - /// Values are merged in order, so if an index appears in both `indices[m][i]` and - /// `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the - /// merged result. If you do not need this guarantee, ParallelDynamicStitch might - /// perform better on some devices. - /// - /// For example: - /// - /// ```python - /// indices[0] = 6 - /// indices[1] = [4, 1] - /// indices[2] = [[5, 2], [0, 3]] - /// data[0] = [61, 62] - /// data[1] = [[41, 42], [11, 12]] - /// data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]] - /// merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], - /// [51, 52], [61, 62]] - /// ``` - /// - /// This method can be used to merge partitions created by `dynamic_partition` - /// as illustrated on the following example: - /// - /// ```python - /// # Apply function (increments x_i) on elements for which a certain condition - /// # apply (x_i != -1 in this example). - /// x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4]) - /// condition_mask=tf.not_equal(x,tf.constant(-1.)) - /// partitioned_data = tf.dynamic_partition( - /// x, tf.cast(condition_mask, tf.int32) , 2) - /// partitioned_data[1] = partitioned_data[1] + 1.0 - /// condition_indices = tf.dynamic_partition( - /// tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2) - /// x = tf.dynamic_stitch(condition_indices, partitioned_data) - /// # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain - /// # unchanged. - /// ``` - /// - ///
- /// - ///
- @inlinable @inline(__always) - public static func dynamicStitch( - indices: [Tensor], - data: [Tensor] - ) -> Tensor { - _RawTFEager.dynamicStitch(indices: indices, data: data) - } - - /// Eagerly executes a python function to compute func(input)->output. The - /// - /// semantics of the input, output, and attributes are the same as those for - /// PyFunc. - @inlinable @inline(__always) - public static func eagerPyFunc< - Tin: TensorArrayProtocol, - Tout: TensorGroup - >( - _ input: Tin, - token: String, - isAsync: Bool = false - ) -> Tout { - _RawTFEager.eagerPyFunc(input, token: token, isAsync: isAsync) - } - - /// Computes the (possibly normalized) Levenshtein Edit Distance. - /// - /// The inputs are variable-length sequences provided by SparseTensors - /// (hypothesis_indices, hypothesis_values, hypothesis_shape) - /// and - /// (truth_indices, truth_values, truth_shape). - /// - /// The inputs are: - /// - /// - Parameters: - /// - hypothesis_indices: The indices of the hypothesis list SparseTensor. - /// This is an N x R int64 matrix. - /// - hypothesis_values: The values of the hypothesis list SparseTensor. - /// This is an N-length vector. - /// - hypothesis_shape: The shape of the hypothesis list SparseTensor. - /// This is an R-length vector. - /// - truth_indices: The indices of the truth list SparseTensor. - /// This is an M x R int64 matrix. - /// - truth_values: The values of the truth list SparseTensor. - /// This is an M-length vector. - /// - truth_shape: truth indices, vector. - /// - /// - Attr normalize: boolean (if true, edit distances are normalized by length of truth). - /// - /// The output is: - /// - /// - Output output: A dense float tensor with rank R - 1. - /// - /// For the example input: - /// - /// // hypothesis represents a 2x1 matrix with variable-length values: - /// // (0,0) = ["a"] - /// // (1,0) = ["b"] - /// hypothesis_indices = [[0, 0, 0], - /// [1, 0, 0]] - /// hypothesis_values = ["a", "b"] - /// hypothesis_shape = [2, 1, 1] - /// - /// // truth represents a 2x2 matrix with variable-length values: - /// // (0,0) = [] - /// // (0,1) = ["a"] - /// // (1,0) = ["b", "c"] - /// // (1,1) = ["a"] - /// truth_indices = [[0, 1, 0], - /// [1, 0, 0], - /// [1, 0, 1], - /// [1, 1, 0]] - /// truth_values = ["a", "b", "c", "a"] - /// truth_shape = [2, 2, 2] - /// normalize = true - /// - /// The output will be: - /// - /// // output is a 2x2 matrix with edit distances normalized by truth lengths. - /// output = [[inf, 1.0], // (0,0): no truth, (0,1): no hypothesis - /// [0.5, 1.0]] // (1,0): addition, (1,1): no hypothesis - @inlinable @inline(__always) - public static func editDistance( - hypothesisIndices: Tensor, - hypothesisValues: Tensor, - hypothesisShape: Tensor, - truthIndices: Tensor, - truthValues: Tensor, - truthShape: Tensor, - normalize: Bool = true - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend( - commonBackend( - commonBackend(hypothesisIndices.handle.backend, hypothesisValues.handle.backend), - hypothesisShape.handle.backend), truthIndices.handle.backend), - truthValues.handle.backend), truthShape.handle.backend) - { - case .XLA: - let output_device = truthShape.device - let hypothesisIndices = Tensor(copying: hypothesisIndices, to: .defaultTFEager) - let hypothesisValues = Tensor(copying: hypothesisValues, to: .defaultTFEager) - let hypothesisShape = Tensor(copying: hypothesisShape, to: .defaultTFEager) - let truthIndices = Tensor(copying: truthIndices, to: .defaultTFEager) - let truthValues = Tensor(copying: truthValues, to: .defaultTFEager) - let truthShape = Tensor(copying: truthShape, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.editDistance( - hypothesisIndices: hypothesisIndices, hypothesisValues: hypothesisValues, - hypothesisShape: hypothesisShape, truthIndices: truthIndices, truthValues: truthValues, - truthShape: truthShape, normalize: normalize), to: output_device) - case .TF_EAGER: - return _RawTFEager.editDistance( - hypothesisIndices: hypothesisIndices, hypothesisValues: hypothesisValues, - hypothesisShape: hypothesisShape, truthIndices: truthIndices, truthValues: truthValues, - truthShape: truthShape, normalize: normalize) - } - - } - - /// Computes the eigen decomposition of one or more square matrices. - /// - /// Computes the eigenvalues and (optionally) right eigenvectors of each inner matrix in - /// `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The eigenvalues - /// are sorted in non-decreasing order. - /// - /// ```python - /// # a is a tensor. - /// # e is a tensor of eigenvalues. - /// # v is a tensor of eigenvectors. - /// e, v = eig(a) - /// e = eig(a, compute_v=False) - /// ``` - /// - /// - Parameter input: `Tensor` input of shape `[N, N]`. - /// - /// - Attr compute_v: If `True` then eigenvectors will be computed and returned in `v`. - /// Otherwise, only the eigenvalues will be computed. - /// - /// - Outputs: - /// - e: Eigenvalues. Shape is `[N]`. - /// - v: Eigenvectors. Shape is `[N, N]`. - @inlinable @inline(__always) - public static func eig< - T: FloatingPoint & TensorFlowScalar, - Tout: TensorFlowScalar - >( - _ input: Tensor, - computeV: Bool = true - ) -> (e: Tensor, v: Tensor) { - _RawTFEager.eig(input, computeV: computeV) - } - - /// Tensor contraction according to Einstein summation convention. - /// - /// Implements generalized Tensor contraction and reduction. Each input Tensor must - /// have a corresponding input subscript appearing in the comma-separated left-hand - /// side of the equation. The right-hand side of the equation consists of the - /// output subscript. The input subscripts and the output subscript should consist - /// of zero or more named axis labels and at most one ellipsis (`...`). - /// - /// The named axis labels may be any single character other than those having - /// special meaning, namely `,.->`. The behavior of this Op is undefined if it - /// receives an ill-formatted equation; since the validation is done at - /// graph-building time, we omit format validation checks at runtime. - /// - /// Note: This Op is *not* intended to be called by the user; instead users should - /// call `tf.einsum` directly. It is a hidden Op used by `tf.einsum`. - /// - /// Operations are applied to the input(s) according to the following rules: - /// - /// (a) Generalized Diagonals: For input dimensions corresponding to axis labels - /// appearing more than once in the same input subscript, we take the - /// generalized (`k`-dimensional) diagonal. - /// For example, in the equation `iii->i` with input shape `[3, 3, 3]`, the - /// generalized diagonal would consist of `3` elements at indices `(0, 0, 0)`, - /// `(1, 1, 1)` and `(2, 2, 2)` to create a Tensor of shape `[3]`. - /// - /// (b) Reduction: Axes corresponding to labels appearing only in one input - /// subscript but not in the output subscript are summed over prior to Tensor - /// contraction. - /// For example, in the equation `ab,bc->b`, the axis labels `a` and `c` are - /// the reduction axis labels. - /// - /// (c) Batch Dimensions: Axes corresponding to labels appearing in each of the - /// input subscripts and also in the output subscript make up the batch - /// dimensions in Tensor contraction. Unnamed axis labels corresponding to - /// ellipsis (`...`) also correspond to batch dimensions. - /// For example, for the equation denoting batch matrix multiplication, - /// `bij,bjk->bik`, the axis label `b` corresponds to a batch dimension. - /// - /// (d) Contraction: In case of binary einsum, axes corresponding to labels - /// appearing in two different inputs (and not in the output) are contracted - /// against each other. - /// Considering the batch matrix multiplication equation again - /// (`bij,bjk->bik`), the contracted axis label is `j`. - /// - /// (e) Expand Diagonal: If the output subscripts contain repeated (explicit) axis - /// labels, the opposite operation of (a) is applied. For example, in the - /// equation `i->iii`, and input shape `[3]`, the output of shape `[3, 3, 3]` - /// are all zeros, except for the (generalized) diagonal which is populated - /// with values from the input. - /// Note: This operation is not supported by `np.einsum` or `tf.einsum`; it is - /// provided to enable computing the symbolic gradient of `tf.einsum`. - /// - /// The output subscripts must contain only labels appearing in at least one of the - /// input subscripts. Furthermore, all dimensions mapping to the same axis label - /// must be equal. - /// - /// Any of the input and output subscripts may contain at most a single ellipsis - /// (`...`). These ellipsis are mapped against dimensions not corresponding to any - /// named axis label. If two inputs contain ellipsis, then they are broadcasted - /// according to standard NumPy broadcasting - /// [rules](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). - /// - /// The broadcasted dimensions are placed in the corresponding location of the - /// ellipsis in the output subscript. If the broadcasted dimensions are non-empty - /// and the output subscripts do not contain ellipsis, then an InvalidArgument error - /// is raised. - /// - /// @compatibility(numpy) - /// Similar to [`numpy.einsum`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html). - /// - /// Comparison with `numpy.einsum`: - /// - /// * This Op only supports unary and binary forms of `numpy.einsum`. - /// * This Op does not support implicit form. (i.e. equations without `->`). - /// * This Op also supports repeated indices in the output subscript, which is not - /// supported by `numpy.einsum`. - /// @end_compatibility - /// - /// - /// - Parameter inputs: List of 1 or 2 Tensors. - /// - /// - Attr equation: String describing the Einstein Summation operation; in the format of np.einsum. - /// - /// - Output output: Output Tensor with shape depending upon `equation`. - @inlinable @inline(__always) - public static func einsum( - inputs: [Tensor], - equation: String - ) -> Tensor { - _RawTFEager.einsum(inputs: inputs, equation: equation) - } - - /// Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise. - /// - /// See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) - /// ](http://arxiv.org/abs/1511.07289) - @inlinable @inline(__always) - public static func elu( - features: Tensor - ) -> Tensor { - switch features.handle.backend { - case .XLA: - return _RawXLA.elu(features: features) - case .TF_EAGER: - return _RawTFEager.elu(features: features) - } - - } - - /// Computes gradients for the exponential linear (Elu) operation. - /// - /// - Parameters: - /// - gradients: The backpropagated gradients to the corresponding Elu operation. - /// - outputs: The outputs of the corresponding Elu operation. - /// - /// - Output backprops: The gradients: `gradients * (outputs + 1)` if outputs < 0, - /// `gradients` otherwise. - @inlinable @inline(__always) - public static func eluGrad( - gradients: Tensor, - outputs: Tensor - ) -> Tensor { - switch commonBackend(gradients.handle.backend, outputs.handle.backend) { - case .XLA: - return _RawXLA.eluGrad(gradients: gradients, outputs: outputs) - case .TF_EAGER: - return _RawTFEager.eluGrad(gradients: gradients, outputs: outputs) - } - - } - - /// Creates a tensor with the given shape. - /// - /// This operation creates a tensor of `shape` and `dtype`. - /// - /// - Parameter shape: 1-D. Represents the shape of the output tensor. - /// - /// - Attr init: If True, initialize the returned tensor with the default value of dtype. Otherwise, the implementation is free not to initializethe tensor's content. - /// - /// - Output output: A `Tensor` of type `T`. - @inlinable @inline(__always) - public static func empty( - shape: Tensor, - init_: Bool = false - ) -> Tensor { - switch shape.handle.backend { - case .XLA: - let output_device = shape.device - let shape = Tensor(copying: shape, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.empty(shape: shape, init_: init_), to: output_device) - case .TF_EAGER: - return _RawTFEager.empty(shape: shape, init_: init_) - } - - } - - /// Creates and returns an empty tensor list. - /// - /// All list elements must be tensors of dtype element_dtype and shape compatible - /// with element_shape. - /// - /// handle: an empty tensor list. - /// element_dtype: the type of elements in the list. - /// element_shape: a shape compatible with that of elements in the list. - @inlinable @inline(__always) - public static func emptyTensorList( - elementShape: Tensor, - maxNumElements: Tensor, - elementDtype: TensorDataType - ) -> VariantHandle { - _RawTFEager.emptyTensorList( - elementShape: elementShape, maxNumElements: maxNumElements, elementDtype: elementDtype) - } - - /// Encode strings into web-safe base64 format. - /// - /// Refer to the following article for more information on base64 format: - /// en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the - /// end so that the encoded has length multiple of 4. See Padding section of the - /// link above. - /// - /// Web-safe means that the encoder uses - and _ instead of + and /. - /// - /// - Parameter input: Strings to be encoded. - /// - /// - Attr pad: Bool whether padding is applied at the ends. - /// - /// - Output output: Input strings encoded in base64. - @inlinable @inline(__always) - public static func encodeBase64( - _ input: StringTensor, - pad: Bool = false - ) -> StringTensor { - _RawTFEager.encodeBase64(input, pad: pad) - } - - /// JPEG-encode an image. - /// - /// `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`. - /// - /// The attr `format` can be used to override the color format of the encoded - /// output. Values can be: - /// - /// * `''`: Use a default format based on the number of channels in the image. - /// * `grayscale`: Output a grayscale JPEG image. The `channels` dimension - /// of `image` must be 1. - /// * `rgb`: Output an RGB JPEG image. The `channels` dimension - /// of `image` must be 3. - /// - /// If `format` is not specified or is the empty string, a default format is picked - /// in function of the number of channels in `image`: - /// - /// * 1: Output a grayscale image. - /// * 3: Output an RGB image. - /// - /// - Parameter image: 3-D with shape `[height, width, channels]`. - /// - /// - Attrs: - /// - format: Per pixel image format. - /// - quality: Quality of the compression from 0 to 100 (higher is better and slower). - /// - progressive: If True, create a JPEG that loads progressively (coarse to fine). - /// - optimize_size: If True, spend CPU/RAM to reduce size with no quality change. - /// - chroma_downsampling: See http://en.wikipedia.org/wiki/Chroma_subsampling. - /// - density_unit: Unit used to specify `x_density` and `y_density`: - /// pixels per inch (`'in'`) or centimeter (`'cm'`). - /// - x_density: Horizontal pixels per density unit. - /// - y_density: Vertical pixels per density unit. - /// - xmp_metadata: If not empty, embed this XMP metadata in the image header. - /// - /// - Output contents: 0-D. JPEG-encoded image. - @inlinable @inline(__always) - public static func encodeJpeg( - image: Tensor, - format: Format, - quality: Int64 = 95, - progressive: Bool = false, - optimizeSize: Bool = false, - chromaDownsampling: Bool = true, - densityUnit: DensityUnit = .in_, - xDensity: Int64 = 300, - yDensity: Int64 = 300, - xmpMetadata: String - ) -> StringTensor { - _RawTFEager.encodeJpeg( - image: image, format: format, quality: quality, progressive: progressive, - optimizeSize: optimizeSize, chromaDownsampling: chromaDownsampling, - densityUnit: densityUnit, xDensity: xDensity, yDensity: yDensity, xmpMetadata: xmpMetadata) - } - - /// JPEG encode input image with provided compression quality. - /// - /// `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`. - /// `quality` is an int32 jpeg compression quality value between 0 and 100. - /// - /// - /// - Parameters: - /// - images: Images to adjust. At least 3-D. - /// - quality: An int quality to encode to. - /// - /// - Output contents: 0-D. JPEG-encoded image. - @inlinable @inline(__always) - public static func encodeJpegVariableQuality( - images: Tensor, - quality: Tensor - ) -> StringTensor { - _RawTFEager.encodeJpegVariableQuality(images: images, quality: quality) - } - - /// PNG-encode an image. - /// - /// `image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]` - /// where `channels` is: - /// - /// * 1: for grayscale. - /// * 2: for grayscale + alpha. - /// * 3: for RGB. - /// * 4: for RGBA. - /// - /// The ZLIB compression level, `compression`, can be -1 for the PNG-encoder - /// default or a value from 0 to 9. 9 is the highest compression level, generating - /// the smallest output, but is slower. - /// - /// - Parameter image: 3-D with shape `[height, width, channels]`. - /// - /// - Attr compression: Compression level. - /// - /// - Output contents: 0-D. PNG-encoded image. - @inlinable @inline(__always) - public static func encodePng( - image: Tensor, - compression: Int64 = -1 - ) -> StringTensor { - _RawTFEager.encodePng(image: image, compression: compression) - } - - /// The op serializes protobuf messages provided in the input tensors. - /// - /// The types of the tensors in `values` must match the schema for the fields - /// specified in `field_names`. All the tensors in `values` must have a common - /// shape prefix, *batch_shape*. - /// - /// The `sizes` tensor specifies repeat counts for each field. The repeat count - /// (last dimension) of a each tensor in `values` must be greater than or equal - /// to corresponding repeat count in `sizes`. - /// - /// A `message_type` name must be provided to give context for the field names. - /// The actual message descriptor can be looked up either in the linked-in - /// descriptor pool or a filename provided by the caller using the - /// `descriptor_source` attribute. - /// - /// For the most part, the mapping between Proto field types and TensorFlow dtypes - /// is straightforward. However, there are a few special cases: - /// - /// - A proto field that contains a submessage or group can only be converted - /// to `DT_STRING` (the serialized submessage). This is to reduce the complexity - /// of the API. The resulting string can be used as input to another instance of - /// the decode_proto op. - /// - /// - TensorFlow lacks support for unsigned integers. The ops represent uint64 - /// types as a `DT_INT64` with the same twos-complement bit pattern (the obvious - /// way). Unsigned int32 values can be represented exactly by specifying type - /// `DT_INT64`, or using twos-complement if the caller specifies `DT_INT32` in - /// the `output_types` attribute. - /// - /// The `descriptor_source` attribute selects the source of protocol - /// descriptors to consult when looking up `message_type`. This may be: - /// - /// - An empty string or "local://", in which case protocol descriptors are - /// created for C++ (not Python) proto definitions linked to the binary. - /// - /// - A file, in which case protocol descriptors are created from the file, - /// which is expected to contain a `FileDescriptorSet` serialized as a string. - /// NOTE: You can build a `descriptor_source` file using the `--descriptor_set_out` - /// and `--include_imports` options to the protocol compiler `protoc`. - /// - /// - A "bytes://", in which protocol descriptors are created from ``, - /// which is expected to be a `FileDescriptorSet` serialized as a string. - /// - /// - Parameters: - /// - sizes: Tensor of int32 with shape `[batch_shape, len(field_names)]`. - /// - values: List of tensors containing values for the corresponding field. - /// - /// - Attrs: - /// - field_names: List of strings containing proto field names. - /// - message_type: Name of the proto message type to decode. - /// - Tinput_types: The input types. - /// - /// - Output bytes: Tensor of serialized protos with shape `batch_shape`. - @inlinable @inline(__always) - public static func encodeProto( - sizes: Tensor, - _ values: TinputTypes, - fieldNames: [String], - messageType: String, - descriptorSource: String = "local://" - ) -> StringTensor { - _RawTFEager.encodeProto( - sizes: sizes, values, fieldNames: fieldNames, messageType: messageType, - descriptorSource: descriptorSource) - } - - /// Encode audio data using the WAV file format. - /// - /// This operation will generate a string suitable to be saved out to create a .wav - /// audio file. It will be encoded in the 16-bit PCM format. It takes in float - /// values in the range -1.0f to 1.0f, and any outside that value will be clamped to - /// that range. - /// - /// `audio` is a 2-D float Tensor of shape `[length, channels]`. - /// `sample_rate` is a scalar Tensor holding the rate to use (e.g. 44100). - /// - /// - Parameters: - /// - audio: 2-D with shape `[length, channels]`. - /// - sample_rate: Scalar containing the sample frequency. - /// - /// - Output contents: 0-D. WAV-encoded file contents. - @inlinable @inline(__always) - public static func encodeWav( - audio: Tensor, - sampleRate: Tensor - ) -> StringTensor { - _RawTFEager.encodeWav(audio: audio, sampleRate: sampleRate) - } - - /// An op that enqueues a list of input batch tensors to TPUEmbedding. - /// - /// - Parameters: - /// - batch: A list of 1D tensors, one for each embedding table, containing the - /// indices into the tables. - /// - mode_override: A string input that overrides the mode specified in the - /// TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', - /// 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set - /// in TPUEmbeddingConfiguration is used, otherwise mode_override is used. - /// - /// - Attr device_ordinal: The TPU device to use. Should be >= 0 and less than the number - /// of TPU cores in the task on which the node is placed. - @inlinable @inline(__always) - public static func enqueueTPUEmbeddingIntegerBatch( - batch: [Tensor], - modeOverride: StringTensor, - deviceOrdinal: Int64 = -1 - ) { - _RawTFEager.enqueueTPUEmbeddingIntegerBatch( - batch: batch, modeOverride: modeOverride, deviceOrdinal: deviceOrdinal) - } - - /// An op that enqueues TPUEmbedding input indices from a SparseTensor. - /// - /// This Op eases the porting of code that uses embedding_lookup_sparse(), - /// although some Python preprocessing of the SparseTensor arguments to - /// embedding_lookup_sparse() is required to produce the arguments to this Op, - /// since only a single EnqueueTPUEmbeddingSparseBatch Op is allowed per training - /// step. - /// - /// The tensors at corresponding positions in the three input lists - /// must have the same shape, i.e. rank 1 with dim_size() equal to the total - /// number of lookups into the table described by the corresponding table_id. - /// - /// - Parameters: - /// - sample_indices: A list of rank 1 Tensors specifying the training example and - /// feature to which the corresponding embedding_indices and aggregation_weights - /// values belong. sample_indices[i] must equal b * nf + f, where nf is the - /// number of features from the corresponding table, f is in [0, nf), and - /// b is in [0, batch size). - /// - embedding_indices: A list of rank 1 Tensors, indices into the embedding tables. - /// - aggregation_weights: A list of rank 1 Tensors containing per sample -- i.e. per - /// (training example, feature) -- aggregation weights. - /// - mode_override: A string input that overrides the mode specified in the - /// TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', - /// 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set - /// in TPUEmbeddingConfiguration is used, otherwise mode_override is used. - /// - /// - Attrs: - /// - device_ordinal: The TPU device to use. Should be >= 0 and less than the number - /// of TPU cores in the task on which the node is placed. - /// - combiners: A list of string scalars, one for each embedding table that specify - /// how to normalize the embedding activations after weighted summation. - /// Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have - /// the sum of the weights be 0 for 'mean' or the sum of the squared weights be - /// 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for - /// all tables. - @inlinable @inline(__always) - public static func enqueueTPUEmbeddingSparseBatch< - T1: TensorFlowIndex, - T2: TensorFlowIndex, - T3: FloatingPoint & TensorFlowScalar - >( - sampleIndices: [Tensor], - embeddingIndices: [Tensor], - aggregationWeights: [Tensor], - modeOverride: StringTensor, - deviceOrdinal: Int64 = -1, - combiners: [String] - ) { - _RawTFEager.enqueueTPUEmbeddingSparseBatch( - sampleIndices: sampleIndices, embeddingIndices: embeddingIndices, - aggregationWeights: aggregationWeights, modeOverride: modeOverride, - deviceOrdinal: deviceOrdinal, combiners: combiners) - } - - /// Eases the porting of code that uses tf.nn.embedding_lookup_sparse(). - /// - /// sample_indices[i], embedding_indices[i] and aggregation_weights[i] correspond - /// to the ith feature. table_ids[i] indicates which embedding table to look up ith - /// feature. - /// - /// The tensors at corresponding positions in the three input lists (sample_indices, - /// embedding_indices and aggregation_weights) must have the same shape, i.e. rank 1 - /// with dim_size() equal to the total number of lookups into the table described by - /// the corresponding feature. - /// - /// - Parameters: - /// - sample_indices: A list of rank 1 Tensors specifying the training example to - /// which the corresponding embedding_indices and aggregation_weights values - /// belong. It corresponds to sp_ids.indices[:,0] in embedding_lookup_sparse(). - /// - embedding_indices: A list of rank 1 Tensors, indices into the embedding tables. - /// It corresponds to sp_ids.values in embedding_lookup_sparse(). - /// - aggregation_weights: A list of rank 1 Tensors containing per training example - /// aggregation weights. It corresponds to sp_weights.values in - /// embedding_lookup_sparse(). - /// - mode_override: A string input that overrides the mode specified in the - /// TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', - /// 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set - /// in TPUEmbeddingConfiguration is used, otherwise mode_override is used. - /// - /// - Attrs: - /// - device_ordinal: The TPU device to use. Should be >= 0 and less than the number - /// of TPU cores in the task on which the node is placed. - /// - combiners: A list of string scalars, one for each embedding table that specify - /// how to normalize the embedding activations after weighted summation. - /// Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have - /// the sum of the weights be 0 for 'mean' or the sum of the squared weights be - /// 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for - /// all tables. - /// - table_ids: A list of integers specifying the identifier of the embedding table - /// (offset of TableDescriptor in the TPUEmbeddingConfiguration) to lookup the - /// corresponding input. The ith input is looked up using table_ids[i]. The size - /// of the table_ids list must be equal to that of sample_indices, - /// embedding_indices and aggregation_weights. - @inlinable @inline(__always) - public static func enqueueTPUEmbeddingSparseTensorBatch< - T1: TensorFlowIndex, - T2: TensorFlowIndex, - T3: FloatingPoint & TensorFlowScalar - >( - sampleIndices: [Tensor], - embeddingIndices: [Tensor], - aggregationWeights: [Tensor], - modeOverride: StringTensor, - deviceOrdinal: Int64 = -1, - combiners: [String], - tableIds: [Int32], - maxSequenceLengths: [Int32] - ) { - _RawTFEager.enqueueTPUEmbeddingSparseTensorBatch( - sampleIndices: sampleIndices, embeddingIndices: embeddingIndices, - aggregationWeights: aggregationWeights, modeOverride: modeOverride, - deviceOrdinal: deviceOrdinal, combiners: combiners, tableIds: tableIds, - maxSequenceLengths: maxSequenceLengths) - } - - /// Ensures that the tensor's shape matches the expected shape. - /// - /// Raises an error if the input tensor's shape does not match the specified shape. - /// Returns the input tensor otherwise. - /// - /// - Parameter input: A tensor, whose shape is to be validated. - /// - /// - Attr shape: The expected (possibly partially specified) shape of the input tensor. - /// - /// - Output output: A tensor with the same shape and contents as the input tensor or value. - @inlinable @inline(__always) - public static func ensureShape( - _ input: Tensor, - shape: TensorShape? - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.ensureShape(input, shape: shape), to: output_device) - case .TF_EAGER: - return _RawTFEager.ensureShape(input, shape: shape) - } - - } - - /// Creates or finds a child frame, and makes `data` available to the child frame. - /// - /// This op is used together with `Exit` to create loops in the graph. - /// The unique `frame_name` is used by the `Executor` to identify frames. If - /// `is_constant` is true, `output` is a constant in the child frame; otherwise - /// it may be changed in the child frame. At most `parallel_iterations` iterations - /// are run in parallel in the child frame. - /// - /// - Parameter data: The tensor to be made available to the child frame. - /// - /// - Attrs: - /// - frame_name: The name of the child frame. - /// - is_constant: If true, the output is constant within the child frame. - /// - parallel_iterations: The number of iterations allowed to run in parallel. - /// - /// - Output output: The same tensor as `data`. - @inlinable @inline(__always) - public static func enter( - data: Tensor, - frameName: String, - isConstant: Bool = false, - parallelIterations: Int64 = 10 - ) -> Tensor { - switch data.handle.backend { - case .XLA: - let output_device = data.device - let data = Tensor(copying: data, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.enter( - data: data, frameName: frameName, isConstant: isConstant, - parallelIterations: parallelIterations), to: output_device) - case .TF_EAGER: - return _RawTFEager.enter( - data: data, frameName: frameName, isConstant: isConstant, - parallelIterations: parallelIterations) - } - - } - - /// Returns the truth value of (x == y) element-wise. - /// - /// *NOTE*: `Equal` supports broadcasting. More about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - /// - /// ```python - /// x = tf.constant([2, 4]) - /// y = tf.constant(2) - /// tf.math.equal(x, y) ==> array([True, False]) - /// - /// x = tf.constant([2, 4]) - /// y = tf.constant([2, 4]) - /// tf.math.equal(x, y) ==> array([True, True]) - /// ``` - @inlinable @inline(__always) - public static func equal( - _ x: Tensor, - _ y: Tensor, - incompatibleShapeError: Bool = true - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - return _RawXLA.equal(x, y, incompatibleShapeError: incompatibleShapeError) - case .TF_EAGER: - return _RawTFEager.equal(x, y, incompatibleShapeError: incompatibleShapeError) - } - - } - - /// Returns the truth value of (x == y) element-wise. - /// - /// *NOTE*: `Equal` supports broadcasting. More about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - /// - /// ```python - /// x = tf.constant([2, 4]) - /// y = tf.constant(2) - /// tf.math.equal(x, y) ==> array([True, False]) - /// - /// x = tf.constant([2, 4]) - /// y = tf.constant([2, 4]) - /// tf.math.equal(x, y) ==> array([True, True]) - /// ``` - @inlinable @inline(__always) - public static func equal( - _ x: StringTensor, - _ y: StringTensor, - incompatibleShapeError: Bool = true - ) -> Tensor { - _RawTFEager.equal(x, y, incompatibleShapeError: incompatibleShapeError) - } - - /// Computes the Gauss error function of `x` element-wise. - @inlinable @inline(__always) - public static func erf( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - let output_device = x.device - let x = Tensor(copying: x, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.erf(x), to: output_device) - case .TF_EAGER: - return _RawTFEager.erf(x) - } - - } - - /// Computes the complementary error function of `x` element-wise. - @inlinable @inline(__always) - public static func erfc( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - let output_device = x.device - let x = Tensor(copying: x, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.erfc(x), to: output_device) - case .TF_EAGER: - return _RawTFEager.erfc(x) - } - - } - - @inlinable @inline(__always) - public static func erfinv( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - let output_device = x.device - let x = Tensor(copying: x, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.erfinv(x), to: output_device) - case .TF_EAGER: - return _RawTFEager.erfinv(x) - } - - } - - /// Computes the euclidean norm of elements across dimensions of a tensor. - /// - /// Reduces `input` along the dimensions given in `axis`. Unless - /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - /// `axis`. If `keep_dims` is true, the reduced dimensions are - /// retained with length 1. - /// - /// - Parameters: - /// - input: The tensor to reduce. - /// - reduction_indices: The dimensions to reduce. Must be in the range - /// `[-rank(input), rank(input))`. - /// - /// - Attr keep_dims: If true, retain reduced dimensions with length 1. - /// - /// - Output output: The reduced tensor. - @inlinable @inline(__always) - public static func euclideanNorm< - T: TensorFlowNumeric, - Tidx: TensorFlowIndex - >( - _ input: Tensor, - reductionIndices: Tensor, - keepDims: Bool = false - ) -> Tensor { - switch commonBackend(input.handle.backend, reductionIndices.handle.backend) { - case .XLA: - let output_device = reductionIndices.device - let input = Tensor(copying: input, to: .defaultTFEager) - let reductionIndices = Tensor(copying: reductionIndices, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.euclideanNorm( - input, reductionIndices: reductionIndices, keepDims: keepDims), to: output_device) - case .TF_EAGER: - return _RawTFEager.euclideanNorm( - input, reductionIndices: reductionIndices, keepDims: keepDims) - } - - } - - /// Exits the current frame to its parent frame. - /// - /// Exit makes its input `data` available to the parent frame. - /// - /// - Parameter data: The tensor to be made available to the parent frame. - /// - /// - Output output: The same tensor as `data`. - @inlinable @inline(__always) - public static func exit( - data: Tensor - ) -> Tensor { - switch data.handle.backend { - case .XLA: - let output_device = data.device - let data = Tensor(copying: data, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.exit(data: data), to: output_device) - case .TF_EAGER: - return _RawTFEager.exit(data: data) - } - - } - - /// Computes exponential of x element-wise. \\(y = e^x\\). - /// - /// This function computes the exponential of every element in the input tensor. - /// i.e. `exp(x)` or `e^(x)`, where `x` is the input tensor. - /// `e` denotes Euler's number and is approximately equal to 2.718281. - /// Output is positive for any real input. - /// - /// ```python - /// x = tf.constant(2.0) - /// tf.math.exp(x) ==> 7.389056 - /// - /// x = tf.constant([2.0, 8.0]) - /// tf.math.exp(x) ==> array([7.389056, 2980.958], dtype=float32) - /// ``` - /// - /// For complex numbers, the exponential value is calculated as follows: - /// - /// ``` - /// e^(x+iy) = e^x * e^iy = e^x * (cos y + i sin y) - /// ``` - /// - /// Let's consider complex number 1+1j as an example. - /// e^1 * (cos 1 + i sin 1) = 2.7182818284590 * (0.54030230586+0.8414709848j) - /// - /// ```python - /// x = tf.constant(1 + 1j) - /// tf.math.exp(x) ==> 1.4686939399158851+2.2873552871788423j - /// ``` - @inlinable @inline(__always) - public static func exp( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.exp(x) - case .TF_EAGER: - return _RawTFEager.exp(x) - } - - } - - /// Inserts a dimension of 1 into a tensor's shape. - /// - /// Given a tensor `input`, this operation inserts a dimension of 1 at the - /// dimension index `axis` of `input`'s shape. The dimension index `axis` starts at - /// zero; if you specify a negative number for `axis` it is counted backward from - /// the end. - /// - /// This operation is useful if you want to add a batch dimension to a single - /// element. For example, if you have a single image of shape `[height, width, - /// channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`, - /// which will make the shape `[1, height, width, channels]`. - /// - /// Other examples: - /// - /// ``` - /// # 't' is a tensor of shape [2] - /// shape(expand_dims(t, 0)) ==> [1, 2] - /// shape(expand_dims(t, 1)) ==> [2, 1] - /// shape(expand_dims(t, -1)) ==> [2, 1] - /// - /// # 't2' is a tensor of shape [2, 3, 5] - /// shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5] - /// shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5] - /// shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1] - /// ``` - /// - /// This operation requires that: - /// - /// `-1-input.dims() <= dim <= input.dims()` - /// - /// This operation is related to `squeeze()`, which removes dimensions of - /// size 1. - /// - /// - Parameter dim: 0-D (scalar). Specifies the dimension index at which to - /// expand the shape of `input`. Must be in the range - /// `[-rank(input) - 1, rank(input)]`. - /// - /// - Output output: Contains the same data as `input`, but its shape has an additional - /// dimension of size 1 added. - @inlinable @inline(__always) - public static func expandDims< - T: TensorFlowScalar, - Tdim: TensorFlowIndex - >( - _ input: Tensor, - dim: Tensor - ) -> Tensor { - switch commonBackend(input.handle.backend, dim.handle.backend) { - case .XLA: - return _RawXLA.expandDims(input, dim: dim) - case .TF_EAGER: - return _RawTFEager.expandDims(input, dim: dim) - } - - } - - @inlinable @inline(__always) - public static func experimentalAssertNextDataset( - inputDataset: VariantHandle, - transformations: StringTensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.experimentalAssertNextDataset( - inputDataset: inputDataset, transformations: transformations, outputTypes: outputTypes, - outputShapes: outputShapes) - } - - /// Creates a dataset that shards the input dataset. - /// - /// Creates a dataset that shards the input dataset by num_workers, returning a - /// sharded dataset for the index-th worker. This attempts to automatically shard - /// a dataset by examining the Dataset graph and inserting a shard op before the - /// inputs to a reader Dataset (e.g. CSVDataset, TFRecordDataset). - /// - /// This dataset will throw a NotFound error if we cannot shard the dataset - /// automatically. - /// - /// - Parameters: - /// - input_dataset: A variant tensor representing the input dataset. - /// - num_workers: A scalar representing the number of workers to distribute this dataset across. - /// - index: A scalar representing the index of the current worker out of num_workers. - @inlinable @inline(__always) - public static func experimentalAutoShardDataset( - inputDataset: VariantHandle, - numWorkers: Tensor, - index: Tensor, - autoShardPolicy: Int64 = 0, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.experimentalAutoShardDataset( - inputDataset: inputDataset, numWorkers: numWorkers, index: index, - autoShardPolicy: autoShardPolicy, outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Records the bytes size of each element of `input_dataset` in a StatsAggregator. - @inlinable @inline(__always) - public static func experimentalBytesProducedStatsDataset( - inputDataset: VariantHandle, - tag: StringTensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.experimentalBytesProducedStatsDataset( - inputDataset: inputDataset, tag: tag, outputTypes: outputTypes, outputShapes: outputShapes) - } - - @inlinable @inline(__always) - public static func experimentalCSVDataset( - filenames: StringTensor, - compressionType: StringTensor, - bufferSize: Tensor, - header: Tensor, - fieldDelim: StringTensor, - useQuoteDelim: Tensor, - naValue: StringTensor, - selectCols: Tensor, - recordDefaults: OutputTypes, - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.experimentalCSVDataset( - filenames: filenames, compressionType: compressionType, bufferSize: bufferSize, - header: header, fieldDelim: fieldDelim, useQuoteDelim: useQuoteDelim, naValue: naValue, - selectCols: selectCols, recordDefaults: recordDefaults, outputShapes: outputShapes) - } - - @inlinable @inline(__always) - public static func experimentalChooseFastestDataset( - inputDatasets: [VariantHandle], - numExperiments: Int64, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.experimentalChooseFastestDataset( - inputDatasets: inputDatasets, numExperiments: numExperiments, outputTypes: outputTypes, - outputShapes: outputShapes) - } - - /// Returns the cardinality of `input_dataset`. - /// - /// Returns the cardinality of `input_dataset`. - /// - /// - Parameter input_dataset: A variant tensor representing the dataset to return cardinality for. - /// - /// - Output cardinality: The cardinality of `input_dataset`. Named constants are used to represent - /// infinite and unknown cardinality. - @inlinable @inline(__always) - public static func experimentalDatasetCardinality( - inputDataset: VariantHandle - ) -> Tensor { - _RawTFEager.experimentalDatasetCardinality(inputDataset: inputDataset) - } - - /// Writes the given dataset to the given file using the TFRecord format. - /// - /// - Parameters: - /// - input_dataset: A variant tensor representing the dataset to write. - /// - filename: A scalar string tensor representing the filename to use. - /// - compression_type: A scalar string tensor containing either (i) the empty string (no - /// compression), (ii) "ZLIB", or (iii) "GZIP". - @inlinable @inline(__always) - public static func experimentalDatasetToTFRecord( - inputDataset: VariantHandle, - filename: StringTensor, - compressionType: StringTensor - ) { - _RawTFEager.experimentalDatasetToTFRecord( - inputDataset: inputDataset, filename: filename, compressionType: compressionType) - } - - /// Creates a dataset that batches input elements into a SparseTensor. - /// - /// - Parameters: - /// - input_dataset: A handle to an input dataset. Must have a single component. - /// - batch_size: A scalar representing the number of elements to accumulate in a - /// batch. - /// - row_shape: A vector representing the dense shape of each row in the produced - /// SparseTensor. The shape may be partially specified, using `-1` to indicate - /// that a particular dimension should use the maximum size of all batch elements. - @inlinable @inline(__always) - public static func experimentalDenseToSparseBatchDataset( - inputDataset: VariantHandle, - batchSize: Tensor, - rowShape: Tensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.experimentalDenseToSparseBatchDataset( - inputDataset: inputDataset, batchSize: batchSize, rowShape: rowShape, - outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// A substitute for `InterleaveDataset` on a fixed list of `N` datasets. - /// - /// - Parameters: - /// - selector_input_dataset: A dataset of scalar `DT_INT64` elements that determines which of the - /// `N` data inputs should produce the next output element. - /// - data_input_datasets: `N` datasets with the same type that will be interleaved according to - /// the values of `selector_input_dataset`. - @inlinable @inline(__always) - public static func experimentalDirectedInterleaveDataset( - selectorInputDataset: VariantHandle, - dataInputDatasets: [VariantHandle], - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.experimentalDirectedInterleaveDataset( - selectorInputDataset: selectorInputDataset, dataInputDatasets: dataInputDatasets, - outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Creates a dataset that computes a group-by on `input_dataset`. - /// - /// Creates a dataset that computes a group-by on `input_dataset`. - /// - /// - Parameters: - /// - input_dataset: A variant tensor representing the input dataset. - /// - key_func_other_arguments: A list of tensors, typically values that were captured when - /// building a closure for `key_func`. - /// - init_func_other_arguments: A list of tensors, typically values that were captured when - /// building a closure for `init_func`. - /// - reduce_func_other_arguments: A list of tensors, typically values that were captured when - /// building a closure for `reduce_func`. - /// - finalize_func_other_arguments: A list of tensors, typically values that were captured when - /// building a closure for `finalize_func`. - /// - /// - Attrs: - /// - key_func: A function mapping an element of `input_dataset`, concatenated - /// with `key_func_other_arguments` to a scalar value of type DT_INT64. - /// - init_func: A function mapping a key of type DT_INT64, concatenated with - /// `init_func_other_arguments` to the initial reducer state. - /// - reduce_func: A function mapping the current reducer state and an element of `input_dataset`, - /// concatenated with `reduce_func_other_arguments` to a new reducer state. - /// - finalize_func: A function mapping the final reducer state to an output element. - @inlinable @inline(__always) - public static func experimentalGroupByReducerDataset< - KeyfuncIn: TensorGroup, - KeyfuncOut: TensorGroup, - InitfuncIn: TensorGroup, - InitfuncOut: TensorGroup, - ReducefuncIn: TensorGroup, - ReducefuncOut: TensorGroup, - FinalizefuncIn: TensorGroup, - FinalizefuncOut: TensorGroup, - TkeyFuncOtherArguments: TensorArrayProtocol, - TinitFuncOtherArguments: TensorArrayProtocol, - TreduceFuncOtherArguments: TensorArrayProtocol, - TfinalizeFuncOtherArguments: TensorArrayProtocol - >( - inputDataset: VariantHandle, - keyFuncOtherArguments: TkeyFuncOtherArguments, - initFuncOtherArguments: TinitFuncOtherArguments, - reduceFuncOtherArguments: TreduceFuncOtherArguments, - finalizeFuncOtherArguments: TfinalizeFuncOtherArguments, - keyFunc: (KeyfuncIn) -> KeyfuncOut, - initFunc: (InitfuncIn) -> InitfuncOut, - reduceFunc: (ReducefuncIn) -> ReducefuncOut, - finalizeFunc: (FinalizefuncIn) -> FinalizefuncOut, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.experimentalGroupByReducerDataset( - inputDataset: inputDataset, keyFuncOtherArguments: keyFuncOtherArguments, - initFuncOtherArguments: initFuncOtherArguments, - reduceFuncOtherArguments: reduceFuncOtherArguments, - finalizeFuncOtherArguments: finalizeFuncOtherArguments, keyFunc: keyFunc, - initFunc: initFunc, reduceFunc: reduceFunc, finalizeFunc: finalizeFunc, - outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Creates a dataset that computes a windowed group-by on `input_dataset`. - /// - /// // TODO(mrry): Support non-int64 keys. - /// - /// - Attr key_func: A function mapping an element of `input_dataset`, concatenated - /// with `key_func_other_arguments` to a scalar value of type DT_INT64. - @inlinable @inline(__always) - public static func experimentalGroupByWindowDataset< - KeyfuncIn: TensorGroup, - KeyfuncOut: TensorGroup, - ReducefuncIn: TensorGroup, - ReducefuncOut: TensorGroup, - WindowsizefuncIn: TensorGroup, - WindowsizefuncOut: TensorGroup, - TkeyFuncOtherArguments: TensorArrayProtocol, - TreduceFuncOtherArguments: TensorArrayProtocol, - TwindowSizeFuncOtherArguments: TensorArrayProtocol - >( - inputDataset: VariantHandle, - keyFuncOtherArguments: TkeyFuncOtherArguments, - reduceFuncOtherArguments: TreduceFuncOtherArguments, - windowSizeFuncOtherArguments: TwindowSizeFuncOtherArguments, - keyFunc: (KeyfuncIn) -> KeyfuncOut, - reduceFunc: (ReducefuncIn) -> ReducefuncOut, - windowSizeFunc: (WindowsizefuncIn) -> WindowsizefuncOut, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.experimentalGroupByWindowDataset( - inputDataset: inputDataset, keyFuncOtherArguments: keyFuncOtherArguments, - reduceFuncOtherArguments: reduceFuncOtherArguments, - windowSizeFuncOtherArguments: windowSizeFuncOtherArguments, keyFunc: keyFunc, - reduceFunc: reduceFunc, windowSizeFunc: windowSizeFunc, outputTypes: outputTypes, - outputShapes: outputShapes) - } - - /// Creates a dataset that contains the elements of `input_dataset` ignoring errors. - @inlinable @inline(__always) - public static func experimentalIgnoreErrorsDataset( - inputDataset: VariantHandle, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.experimentalIgnoreErrorsDataset( - inputDataset: inputDataset, outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Returns the name of the device on which `resource` has been placed. - @inlinable @inline(__always) - public static func experimentalIteratorGetDevice( - resource: ResourceHandle - ) -> StringTensor { - _RawTFEager.experimentalIteratorGetDevice(resource: resource) - } - - @inlinable @inline(__always) - public static func experimentalLMDBDataset( - filenames: StringTensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.experimentalLMDBDataset( - filenames: filenames, outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Records the latency of producing `input_dataset` elements in a StatsAggregator. - @inlinable @inline(__always) - public static func experimentalLatencyStatsDataset( - inputDataset: VariantHandle, - tag: StringTensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.experimentalLatencyStatsDataset( - inputDataset: inputDataset, tag: tag, outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Creates a dataset that fuses mapping with batching. - /// - /// Creates a dataset that applies `f` to the outputs of `input_dataset` and then - /// batches `batch_size` of them. - /// - /// Unlike a "MapDataset", which applies `f` sequentially, this dataset invokes up - /// to `batch_size * num_parallel_batches` copies of `f` in parallel. - /// - /// - Parameters: - /// - input_dataset: A variant tensor representing the input dataset. - /// - other_arguments: A list of tensors, typically values that were captured when building a closure - /// for `f`. - /// - batch_size: A scalar representing the number of elements to accumulate in a - /// batch. It determines the number of concurrent invocations of `f` that process - /// elements from `input_dataset` in parallel. - /// - num_parallel_calls: A scalar representing the maximum number of parallel invocations of the `map_fn` - /// function. Applying the `map_fn` on consecutive input elements in parallel has - /// the potential to improve input pipeline throughput. - /// - drop_remainder: A scalar representing whether the last batch should be dropped in case its size - /// is smaller than desired. - /// - /// - Attr f: A function to apply to the outputs of `input_dataset`. - @inlinable @inline(__always) - public static func experimentalMapAndBatchDataset< - FIn: TensorGroup, - FOut: TensorGroup, - Targuments: TensorArrayProtocol - >( - inputDataset: VariantHandle, - otherArguments: Targuments, - batchSize: Tensor, - numParallelCalls: Tensor, - dropRemainder: Tensor, - f: (FIn) -> FOut, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?], - preserveCardinality: Bool = false - ) -> VariantHandle { - _RawTFEager.experimentalMapAndBatchDataset( - inputDataset: inputDataset, otherArguments: otherArguments, batchSize: batchSize, - numParallelCalls: numParallelCalls, dropRemainder: dropRemainder, f: f, - outputTypes: outputTypes, outputShapes: outputShapes, - preserveCardinality: preserveCardinality) - } - - /// Creates a dataset that applies `f` to the outputs of `input_dataset`. - @inlinable @inline(__always) - public static func experimentalMapDataset< - FIn: TensorGroup, - FOut: TensorGroup, - Targuments: TensorArrayProtocol - >( - inputDataset: VariantHandle, - otherArguments: Targuments, - f: (FIn) -> FOut, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?], - useInterOpParallelism: Bool = true, - preserveCardinality: Bool = false - ) -> VariantHandle { - _RawTFEager.experimentalMapDataset( - inputDataset: inputDataset, otherArguments: otherArguments, f: f, outputTypes: outputTypes, - outputShapes: outputShapes, useInterOpParallelism: useInterOpParallelism, - preserveCardinality: preserveCardinality) - } - - @inlinable @inline(__always) - public static func experimentalMatchingFilesDataset( - patterns: StringTensor - ) -> VariantHandle { - _RawTFEager.experimentalMatchingFilesDataset(patterns: patterns) - } - - /// Creates a dataset that overrides the maximum intra-op parallelism. - /// - /// - Parameter max_intra_op_parallelism: Identifies the maximum intra-op parallelism to use. - @inlinable @inline(__always) - public static func experimentalMaxIntraOpParallelismDataset( - inputDataset: VariantHandle, - maxIntraOpParallelism: Tensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.experimentalMaxIntraOpParallelismDataset( - inputDataset: inputDataset, maxIntraOpParallelism: maxIntraOpParallelism, - outputTypes: outputTypes, outputShapes: outputShapes) - } - - @inlinable @inline(__always) - public static func experimentalNonSerializableDataset( - inputDataset: VariantHandle, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.experimentalNonSerializableDataset( - inputDataset: inputDataset, outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Creates a dataset that applies `f` to the outputs of `input_dataset`. - /// - /// The resulting dataset is similar to the `InterleaveDataset`, with the exception - /// that if retrieving the next value from a dataset would cause the requester to - /// block, it will skip that input dataset. This dataset is especially useful - /// when loading data from a variable-latency datastores (e.g. HDFS, GCS), as it - /// allows the training step to proceed so long as some data is available. - /// - /// !! WARNING !! This dataset is not deterministic! - /// - /// - Attr f: A function mapping elements of `input_dataset`, concatenated with - /// `other_arguments`, to a Dataset variant that contains elements matching - /// `output_types` and `output_shapes`. - @inlinable @inline(__always) - public static func experimentalParallelInterleaveDataset< - FIn: TensorGroup, - FOut: TensorGroup, - Targuments: TensorArrayProtocol - >( - inputDataset: VariantHandle, - otherArguments: Targuments, - cycleLength: Tensor, - blockLength: Tensor, - sloppy: Tensor, - bufferOutputElements: Tensor, - prefetchInputElements: Tensor, - f: (FIn) -> FOut, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.experimentalParallelInterleaveDataset( - inputDataset: inputDataset, otherArguments: otherArguments, cycleLength: cycleLength, - blockLength: blockLength, sloppy: sloppy, bufferOutputElements: bufferOutputElements, - prefetchInputElements: prefetchInputElements, f: f, outputTypes: outputTypes, - outputShapes: outputShapes) - } - - /// Transforms `input_dataset` containing `Example` protos as vectors of DT_STRING into a dataset of `Tensor` or `SparseTensor` objects representing the parsed features. - /// - /// - Parameter dense_defaults: A dict mapping string keys to `Tensor`s. - /// The keys of the dict must match the dense_keys of the feature. - /// - /// - Attrs: - /// - sparse_keys: A list of string keys in the examples features. - /// The results for these keys will be returned as `SparseTensor` objects. - /// - dense_keys: A list of Ndense string Tensors (scalars). - /// The keys expected in the Examples features associated with dense values. - /// - sparse_types: A list of `DTypes` of the same length as `sparse_keys`. - /// Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), - /// and `tf.string` (`BytesList`) are supported. - /// - Tdense: A list of DTypes of the same length as `dense_keys`. - /// Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), - /// and `tf.string` (`BytesList`) are supported. - /// - /// - dense_shapes: List of tuples with the same length as `dense_keys`. - /// The shape of the data for each dense feature referenced by `dense_keys`. - /// Required for any input tensors identified by `dense_keys`. Must be - /// either fully defined, or may contain an unknown first dimension. - /// An unknown first dimension means the feature is treated as having - /// a variable number of blocks, and the output shape along this dimension - /// is considered unknown at graph build time. Padding is applied for - /// minibatch elements smaller than the maximum number of blocks for the - /// given feature along this dimension. - /// - output_types: The type list for the return values. - /// - output_shapes: The list of shapes being produced. - @inlinable @inline(__always) - public static func experimentalParseExampleDataset( - inputDataset: VariantHandle, - numParallelCalls: Tensor, - denseDefaults: Tdense, - sparseKeys: [String], - denseKeys: [String], - sparseTypes: [TensorDataType], - denseShapes: [TensorShape?], - outputTypes: [TensorDataType], - outputShapes: [TensorShape?], - sloppy: Bool = false - ) -> VariantHandle { - _RawTFEager.experimentalParseExampleDataset( - inputDataset: inputDataset, numParallelCalls: numParallelCalls, - denseDefaults: denseDefaults, sparseKeys: sparseKeys, denseKeys: denseKeys, - sparseTypes: sparseTypes, denseShapes: denseShapes, outputTypes: outputTypes, - outputShapes: outputShapes, sloppy: sloppy) - } - - /// Creates a dataset that uses a custom thread pool to compute `input_dataset`. - /// - /// - Parameter num_threads: Identifies the number of threads to use for the private threadpool. - @inlinable @inline(__always) - public static func experimentalPrivateThreadPoolDataset( - inputDataset: VariantHandle, - numThreads: Tensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.experimentalPrivateThreadPoolDataset( - inputDataset: inputDataset, numThreads: numThreads, outputTypes: outputTypes, - outputShapes: outputShapes) - } - - /// Creates a Dataset that returns pseudorandom numbers. - /// - /// - Parameters: - /// - seed: A scalar seed for the random number generator. If either seed or - /// seed2 is set to be non-zero, the random number generator is seeded - /// by the given seed. Otherwise, a random seed is used. - /// - seed2: A second scalar seed to avoid seed collision. - @inlinable @inline(__always) - public static func experimentalRandomDataset( - seed: Tensor, - seed2: Tensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.experimentalRandomDataset( - seed: seed, seed2: seed2, outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Creates a dataset that changes the batch size. - /// - /// Creates a dataset that changes the batch size of the dataset to current batch - /// size // num_replicas. - /// - /// - Parameters: - /// - input_dataset: A variant tensor representing the input dataset. - /// - num_replicas: A scalar representing the number of replicas to distribute this batch across. As - /// a result of this transformation the current batch size would end up being - /// divided by this parameter. - @inlinable @inline(__always) - public static func experimentalRebatchDataset( - inputDataset: VariantHandle, - numReplicas: Tensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?], - useFallback: Bool = true - ) -> VariantHandle { - _RawTFEager.experimentalRebatchDataset( - inputDataset: inputDataset, numReplicas: numReplicas, outputTypes: outputTypes, - outputShapes: outputShapes, useFallback: useFallback) - } - - /// Creates a dataset successively reduces `f` over the elements of `input_dataset`. - @inlinable @inline(__always) - public static func experimentalScanDataset< - FIn: TensorGroup, - FOut: TensorGroup, - Tstate: TensorArrayProtocol, - Targuments: TensorArrayProtocol - >( - inputDataset: VariantHandle, - initialState: Tstate, - otherArguments: Targuments, - f: (FIn) -> FOut, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?], - preserveCardinality: Bool = false - ) -> VariantHandle { - _RawTFEager.experimentalScanDataset( - inputDataset: inputDataset, initialState: initialState, otherArguments: otherArguments, - f: f, outputTypes: outputTypes, outputShapes: outputShapes, - preserveCardinality: preserveCardinality) - } - - @inlinable @inline(__always) - public static func experimentalSetStatsAggregatorDataset( - inputDataset: VariantHandle, - statsAggregator: ResourceHandle, - tag: StringTensor, - counterPrefix: StringTensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.experimentalSetStatsAggregatorDataset( - inputDataset: inputDataset, statsAggregator: statsAggregator, tag: tag, - counterPrefix: counterPrefix, outputTypes: outputTypes, outputShapes: outputShapes) - } - - @inlinable @inline(__always) - public static func experimentalSleepDataset( - inputDataset: VariantHandle, - sleepMicroseconds: Tensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.experimentalSleepDataset( - inputDataset: inputDataset, sleepMicroseconds: sleepMicroseconds, outputTypes: outputTypes, - outputShapes: outputShapes) - } - - /// Creates a dataset that passes a sliding window over `input_dataset`. - /// - /// - Parameters: - /// - window_size: A scalar representing the number of elements in the - /// sliding window. - /// - window_shift: A scalar representing the steps moving the sliding window - /// forward in one iteration. It must be positive. - /// - window_stride: A scalar representing the stride of the input elements of the sliding window. - /// It must be positive. - @inlinable @inline(__always) - public static func experimentalSlidingWindowDataset( - inputDataset: VariantHandle, - windowSize: Tensor, - windowShift: Tensor, - windowStride: Tensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.experimentalSlidingWindowDataset( - inputDataset: inputDataset, windowSize: windowSize, windowShift: windowShift, - windowStride: windowStride, outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Creates a dataset that executes a SQL query and emits rows of the result set. - /// - /// - Parameters: - /// - driver_name: The database type. Currently, the only supported type is 'sqlite'. - /// - data_source_name: A connection string to connect to the database. - /// - query: A SQL query to execute. - @inlinable @inline(__always) - public static func experimentalSqlDataset( - driverName: StringTensor, - dataSourceName: StringTensor, - query: StringTensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.experimentalSqlDataset( - driverName: driverName, dataSourceName: dataSourceName, query: query, - outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Creates a statistics manager resource. - @inlinable @inline(__always) - public static func experimentalStatsAggregatorHandle( - container: String, - sharedName: String - ) -> ResourceHandle { - _RawTFEager.experimentalStatsAggregatorHandle(container: container, sharedName: sharedName) - } - - /// Produces a summary of any statistics recorded by the given statistics manager. - @inlinable @inline(__always) - public static func experimentalStatsAggregatorSummary( - iterator: ResourceHandle - ) -> StringTensor { - _RawTFEager.experimentalStatsAggregatorSummary(iterator: iterator) - } - - /// Creates a dataset that stops iteration when predicate` is false. - /// - /// The `predicate` function must return a scalar boolean and accept the - /// following arguments: - /// - /// * One tensor for each component of an element of `input_dataset`. - /// * One tensor for each value in `other_arguments`. - /// - /// - Parameter other_arguments: A list of tensors, typically values that were captured when - /// building a closure for `predicate`. - /// - /// - Attr predicate: A function returning a scalar boolean. - @inlinable @inline(__always) - public static func experimentalTakeWhileDataset< - PredicateIn: TensorGroup, - PredicateOut: TensorGroup, - Targuments: TensorArrayProtocol - >( - inputDataset: VariantHandle, - otherArguments: Targuments, - predicate: (PredicateIn) -> PredicateOut, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.experimentalTakeWhileDataset( - inputDataset: inputDataset, otherArguments: otherArguments, predicate: predicate, - outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Creates a dataset that uses a custom thread pool to compute `input_dataset`. - /// - /// - Parameter thread_pool: A resource produced by the ThreadPoolHandle op. - @inlinable @inline(__always) - public static func experimentalThreadPoolDataset( - inputDataset: VariantHandle, - threadPool: ResourceHandle, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.experimentalThreadPoolDataset( - inputDataset: inputDataset, threadPool: threadPool, outputTypes: outputTypes, - outputShapes: outputShapes) - } - - /// Creates a dataset that uses a custom thread pool to compute `input_dataset`. - /// - /// - Attrs: - /// - num_threads: The number of threads in the thread pool. - /// - max_intra_op_parallelism: The maximum degree of parallelism to use within operations that execute on this - /// threadpool. - /// - display_name: A human-readable name for the threads that may be visible in some - /// visualizations. - /// threadpool. - /// - /// - Output handle: A resource that can be consumed by one or more ExperimentalThreadPoolDataset - /// ops. - @inlinable @inline(__always) - public static func experimentalThreadPoolHandle( - numThreads: Int64, - maxIntraOpParallelism: Int64 = 1, - displayName: String, - container: String, - sharedName: String - ) -> ResourceHandle { - _RawTFEager.experimentalThreadPoolHandle( - numThreads: numThreads, maxIntraOpParallelism: maxIntraOpParallelism, - displayName: displayName, container: container, sharedName: sharedName) - } - - /// A dataset that splits the elements of its input into multiple elements. - @inlinable @inline(__always) - public static func experimentalUnbatchDataset( - inputDataset: VariantHandle, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.experimentalUnbatchDataset( - inputDataset: inputDataset, outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Creates a dataset that contains the unique elements of `input_dataset`. - @inlinable @inline(__always) - public static func experimentalUniqueDataset( - inputDataset: VariantHandle, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.experimentalUniqueDataset( - inputDataset: inputDataset, outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Computes `exp(x) - 1` element-wise. - /// - /// i.e. `exp(x) - 1` or `e^(x) - 1`, where `x` is the input tensor. - /// `e` denotes Euler's number and is approximately equal to 2.718281. - /// - /// ```python - /// x = tf.constant(2.0) - /// tf.math.expm1(x) ==> 6.389056 - /// - /// x = tf.constant([2.0, 8.0]) - /// tf.math.expm1(x) ==> array([6.389056, 2979.958], dtype=float32) - /// - /// x = tf.constant(1 + 1j) - /// tf.math.expm1(x) ==> (0.46869393991588515+2.2873552871788423j) - /// ``` - @inlinable @inline(__always) - public static func expm1( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.expm1(x) - case .TF_EAGER: - return _RawTFEager.expm1(x) - } - - } - - /// Extracts a glimpse from the input tensor. - /// - /// Returns a set of windows called glimpses extracted at location - /// `offsets` from the input tensor. If the windows only partially - /// overlaps the inputs, the non overlapping areas will be filled with - /// random noise. - /// - /// The result is a 4-D tensor of shape `[batch_size, glimpse_height, - /// glimpse_width, channels]`. The channels and batch dimensions are the - /// same as that of the input tensor. The height and width of the output - /// windows are specified in the `size` parameter. - /// - /// The argument `normalized` and `centered` controls how the windows are built: - /// - /// * If the coordinates are normalized but not centered, 0.0 and 1.0 - /// correspond to the minimum and maximum of each height and width - /// dimension. - /// * If the coordinates are both normalized and centered, they range from - /// -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper - /// left corner, the lower right corner is located at (1.0, 1.0) and the - /// center is at (0, 0). - /// * If the coordinates are not normalized they are interpreted as - /// numbers of pixels. - /// - /// - Parameters: - /// - input: A 4-D float tensor of shape `[batch_size, height, width, channels]`. - /// - size: A 1-D tensor of 2 elements containing the size of the glimpses - /// to extract. The glimpse height must be specified first, following - /// by the glimpse width. - /// - offsets: A 2-D integer tensor of shape `[batch_size, 2]` containing - /// the y, x locations of the center of each window. - /// - /// - Attrs: - /// - centered: indicates if the offset coordinates are centered relative to - /// the image, in which case the (0, 0) offset is relative to the center - /// of the input images. If false, the (0,0) offset corresponds to the - /// upper left corner of the input images. - /// - normalized: indicates if the offset coordinates are normalized. - /// - uniform_noise: indicates if the noise should be generated using a - /// uniform distribution or a Gaussian distribution. - /// - noise: indicates if the noise should `uniform`, `gaussian`, or - /// `zero`. The default is `uniform` which means the the noise type - /// will be decided by `uniform_noise`. - /// - /// - Output glimpse: A tensor representing the glimpses `[batch_size, - /// glimpse_height, glimpse_width, channels]`. - @inlinable @inline(__always) - public static func extractGlimpse( - _ input: Tensor, - size: Tensor, - offsets: Tensor, - centered: Bool = true, - normalized: Bool = true, - uniformNoise: Bool = true, - noise: String = "uniform" - ) -> Tensor { - switch commonBackend( - commonBackend(input.handle.backend, size.handle.backend), offsets.handle.backend) - { - case .XLA: - let output_device = offsets.device - let input = Tensor(copying: input, to: .defaultTFEager) - let size = Tensor(copying: size, to: .defaultTFEager) - let offsets = Tensor(copying: offsets, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.extractGlimpse( - input, size: size, offsets: offsets, centered: centered, normalized: normalized, - uniformNoise: uniformNoise, noise: noise), to: output_device) - case .TF_EAGER: - return _RawTFEager.extractGlimpse( - input, size: size, offsets: offsets, centered: centered, normalized: normalized, - uniformNoise: uniformNoise, noise: noise) - } - - } - - /// Extract `patches` from `images` and put them in the "depth" output dimension. - /// - /// - Parameter images: 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`. - /// - /// - Attrs: - /// - ksizes: The size of the sliding window for each dimension of `images`. - /// - strides: How far the centers of two consecutive patches are in - /// the images. Must be: `[1, stride_rows, stride_cols, 1]`. - /// - rates: Must be: `[1, rate_rows, rate_cols, 1]`. This is the - /// input stride, specifying how far two consecutive patch samples are in the - /// input. Equivalent to extracting patches with - /// `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by - /// subsampling them spatially by a factor of `rates`. This is equivalent to - /// `rate` in dilated (a.k.a. Atrous) convolutions. - /// - padding: The type of padding algorithm to use. - /// - /// - Output patches: 4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows * - /// ksize_cols * depth]` containing image patches with size - /// `ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension. Note - /// `out_rows` and `out_cols` are the dimensions of the output patches. - @inlinable @inline(__always) - public static func extractImagePatches( - images: Tensor, - ksizes: [Int32], - strides: [Int32], - rates: [Int32], - padding: Padding - ) -> Tensor { - switch images.handle.backend { - case .XLA: - let output_device = images.device - let images = Tensor(copying: images, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.extractImagePatches( - images: images, ksizes: ksizes, strides: strides, rates: rates, padding: padding), - to: output_device) - case .TF_EAGER: - return _RawTFEager.extractImagePatches( - images: images, ksizes: ksizes, strides: strides, rates: rates, padding: padding) - } - - } - - /// Extract the shape information of a JPEG-encoded image. - /// - /// This op only parses the image header, so it is much faster than DecodeJpeg. - /// - /// - Parameter contents: 0-D. The JPEG-encoded image. - /// - /// - Attr output_type: (Optional) The output type of the operation (int32 or int64). - /// Defaults to int32. - /// - /// - Output image_shape: 1-D. The image shape with format [height, width, channels]. - @inlinable @inline(__always) - public static func extractJpegShape( - contents: StringTensor - ) -> Tensor { - _RawTFEager.extractJpegShape(contents: contents) - } - - /// Extract `patches` from `input` and put them in the "depth" output dimension. 3D extension of `extract_image_patches`. - /// - /// - Parameter input: 5-D Tensor with shape `[batch, in_planes, in_rows, in_cols, depth]`. - /// - /// - Attrs: - /// - ksizes: The size of the sliding window for each dimension of `input`. - /// - strides: 1-D of length 5. How far the centers of two consecutive patches are in - /// `input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`. - /// - padding: The type of padding algorithm to use. - /// - /// We specify the size-related attributes as: - /// - /// ```python - /// ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1] - /// strides = [1, stride_planes, strides_rows, strides_cols, 1] - /// ``` - /// - /// - Output patches: 5-D Tensor with shape `[batch, out_planes, out_rows, out_cols, - /// ksize_planes * ksize_rows * ksize_cols * depth]` containing patches - /// with size `ksize_planes x ksize_rows x ksize_cols x depth` vectorized - /// in the "depth" dimension. Note `out_planes`, `out_rows` and `out_cols` - /// are the dimensions of the output patches. - @inlinable @inline(__always) - public static func extractVolumePatches( - _ input: Tensor, - ksizes: [Int32], - strides: [Int32], - padding: Padding - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.extractVolumePatches( - input, ksizes: ksizes, strides: strides, padding: padding), to: output_device) - case .TF_EAGER: - return _RawTFEager.extractVolumePatches( - input, ksizes: ksizes, strides: strides, padding: padding) - } - - } - - /// Fast Fourier transform. - /// - /// Computes the 1-dimensional discrete Fourier transform over the inner-most - /// dimension of `input`. - /// - /// - Parameter input: A complex tensor. - /// - /// - Output output: A complex tensor of the same shape as `input`. The inner-most - /// dimension of `input` is replaced with its 1D Fourier transform. - /// - /// @compatibility(numpy) - /// Equivalent to np.fft.fft - /// @end_compatibility - @inlinable @inline(__always) - public static func fFT( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.fFT(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.fFT(input) - } - - } - - /// 2D fast Fourier transform. - /// - /// Computes the 2-dimensional discrete Fourier transform over the inner-most - /// 2 dimensions of `input`. - /// - /// - Parameter input: A complex tensor. - /// - /// - Output output: A complex tensor of the same shape as `input`. The inner-most 2 - /// dimensions of `input` are replaced with their 2D Fourier transform. - /// - /// @compatibility(numpy) - /// Equivalent to np.fft.fft2 - /// @end_compatibility - @inlinable @inline(__always) - public static func fFT2D( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.fFT2D(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.fFT2D(input) - } - - } - - /// 3D fast Fourier transform. - /// - /// Computes the 3-dimensional discrete Fourier transform over the inner-most 3 - /// dimensions of `input`. - /// - /// - Parameter input: A complex tensor. - /// - /// - Output output: A complex tensor of the same shape as `input`. The inner-most 3 - /// dimensions of `input` are replaced with their 3D Fourier transform. - /// - /// @compatibility(numpy) - /// Equivalent to np.fft.fftn with 3 dimensions. - /// @end_compatibility - @inlinable @inline(__always) - public static func fFT3D( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.fFT3D(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.fFT3D(input) - } - - } - - /// A queue that produces elements in first-in first-out order. - /// - /// - Attrs: - /// - component_types: The type of each component in a value. - /// - shapes: The shape of each component in a value. The length of this attr must - /// be either 0 or the same as the length of component_types. If the length of - /// this attr is 0, the shapes of queue elements are not constrained, and - /// only one element may be dequeued at a time. - /// - capacity: The upper bound on the number of elements in this queue. - /// Negative numbers mean no limit. - /// - container: If non-empty, this queue is placed in the given container. - /// Otherwise, a default container is used. - /// - shared_name: If non-empty, this queue will be shared under the given name - /// across multiple sessions. - /// - /// - Output handle: The handle to the queue. - @inlinable @inline(__always) - public static func fIFOQueueV2( - componentTypes: [TensorDataType], - shapes: [TensorShape?], - capacity: Int64 = -1, - container: String, - sharedName: String - ) -> ResourceHandle { - _RawTFEager.fIFOQueueV2( - componentTypes: componentTypes, shapes: shapes, capacity: capacity, container: container, - sharedName: sharedName) - } - - /// Output a fact about factorials. - @inlinable @inline(__always) - public static func fact() -> StringTensor { - _RawTFEager.fact() - } - - /// This op is used as a placeholder in If branch functions. It doesn't provide a - /// valid output when run, so must either be removed (e.g. replaced with a - /// function input) or guaranteed not to be used (e.g. if mirroring an - /// intermediate output needed for the gradient computation of the other branch). - /// - /// - Attrs: - /// - dtype: The type of the output. - /// - shape: The purported shape of the output. This is only used for shape inference; - /// the output will not necessarily have this shape. Can be a partial shape. - /// - /// - Output output: \"Fake\" output value. This should not be consumed by another op. - @inlinable @inline(__always) - public static func fakeParam( - shape: TensorShape? - ) -> Tensor { - _RawTFEager.fakeParam(shape: shape) - } - - /// Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type. - /// - /// Attributes `[min; max]` define the clamping range for the `inputs` data. - /// `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]` - /// when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and - /// then de-quantized and output as floats in `[min; max]` interval. - /// `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. - /// - /// Before quantization, `min` and `max` values are adjusted with the following - /// logic. - /// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, - /// the behavior can be unexpected: - /// If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. - /// If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. - /// If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, - /// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`. - /// - /// Quantization is called fake since the output is still in floating point. - @inlinable @inline(__always) - public static func fakeQuantWithMinMaxArgs( - inputs: Tensor, - min: Double = -6, - max: Double = 6, - numBits: Int64 = 8, - narrowRange: Bool = false - ) -> Tensor { - switch inputs.handle.backend { - case .XLA: - let output_device = inputs.device - let inputs = Tensor(copying: inputs, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.fakeQuantWithMinMaxArgs( - inputs: inputs, min: min, max: max, numBits: numBits, narrowRange: narrowRange), - to: output_device) - case .TF_EAGER: - return _RawTFEager.fakeQuantWithMinMaxArgs( - inputs: inputs, min: min, max: max, numBits: numBits, narrowRange: narrowRange) - } - - } - - /// Compute gradients for a FakeQuantWithMinMaxArgs operation. - /// - /// - Parameters: - /// - gradients: Backpropagated gradients above the FakeQuantWithMinMaxArgs operation. - /// - inputs: Values passed as inputs to the FakeQuantWithMinMaxArgs operation. - /// - /// - Output backprops: Backpropagated gradients below the FakeQuantWithMinMaxArgs operation: - /// `gradients * (inputs >= min && inputs <= max)`. - @inlinable @inline(__always) - public static func fakeQuantWithMinMaxArgsGradient( - gradients: Tensor, - inputs: Tensor, - min: Double = -6, - max: Double = 6, - numBits: Int64 = 8, - narrowRange: Bool = false - ) -> Tensor { - switch commonBackend(gradients.handle.backend, inputs.handle.backend) { - case .XLA: - let output_device = inputs.device - let gradients = Tensor(copying: gradients, to: .defaultTFEager) - let inputs = Tensor(copying: inputs, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.fakeQuantWithMinMaxArgsGradient( - gradients: gradients, inputs: inputs, min: min, max: max, numBits: numBits, - narrowRange: narrowRange), to: output_device) - case .TF_EAGER: - return _RawTFEager.fakeQuantWithMinMaxArgsGradient( - gradients: gradients, inputs: inputs, min: min, max: max, numBits: numBits, - narrowRange: narrowRange) - } - - } - - /// Fake-quantize the 'inputs' tensor of type float via global float scalars `min` - /// - /// and `max` to 'outputs' tensor of same shape as `inputs`. - /// - /// `[min; max]` define the clamping range for the `inputs` data. - /// `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]` - /// when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and - /// then de-quantized and output as floats in `[min; max]` interval. - /// `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. - /// - /// Before quantization, `min` and `max` values are adjusted with the following - /// logic. - /// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, - /// the behavior can be unexpected: - /// If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. - /// If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. - /// If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, - /// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`. - /// - /// This operation has a gradient and thus allows for training `min` and `max` - /// values. - @inlinable @inline(__always) - public static func fakeQuantWithMinMaxVars( - inputs: Tensor, - min: Tensor, - max: Tensor, - numBits: Int64 = 8, - narrowRange: Bool = false - ) -> Tensor { - switch commonBackend( - commonBackend(inputs.handle.backend, min.handle.backend), max.handle.backend) - { - case .XLA: - let output_device = max.device - let inputs = Tensor(copying: inputs, to: .defaultTFEager) - let min = Tensor(copying: min, to: .defaultTFEager) - let max = Tensor(copying: max, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.fakeQuantWithMinMaxVars( - inputs: inputs, min: min, max: max, numBits: numBits, narrowRange: narrowRange), - to: output_device) - case .TF_EAGER: - return _RawTFEager.fakeQuantWithMinMaxVars( - inputs: inputs, min: min, max: max, numBits: numBits, narrowRange: narrowRange) - } - - } - - /// Compute gradients for a FakeQuantWithMinMaxVars operation. - /// - /// - Parameters: - /// - gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation. - /// - inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation. - /// min, max: Quantization interval, scalar floats. - /// - /// - Attrs: - /// - num_bits: The bitwidth of the quantization; between 2 and 8, inclusive. - /// - narrow_range: Whether to quantize into 2^num_bits - 1 distinct values. - /// - /// - Outputs: - /// - backprops_wrt_input: Backpropagated gradients w.r.t. inputs: - /// `gradients * (inputs >= min && inputs <= max)`. - /// - backprop_wrt_min: Backpropagated gradients w.r.t. min parameter: - /// `sum(gradients * (inputs < min))`. - /// - backprop_wrt_max: Backpropagated gradients w.r.t. max parameter: - /// `sum(gradients * (inputs > max))`. - @inlinable @inline(__always) - public static func fakeQuantWithMinMaxVarsGradient( - gradients: Tensor, - inputs: Tensor, - min: Tensor, - max: Tensor, - numBits: Int64 = 8, - narrowRange: Bool = false - ) -> ( - backpropsWrtInput: Tensor, backpropWrtMin: Tensor, backpropWrtMax: Tensor - ) { - _RawTFEager.fakeQuantWithMinMaxVarsGradient( - gradients: gradients, inputs: inputs, min: min, max: max, numBits: numBits, - narrowRange: narrowRange) + } + + /// Returns the index with the largest value across dimensions of a tensor. + /// + /// Note that in case of ties the identity of the return value is not guaranteed. + /// + /// Usage: + /// ```python + /// import tensorflow as tf + /// a = [1, 10, 26.9, 2.8, 166.32, 62.3] + /// b = tf.math.argmax(input = a) + /// c = tf.keras.backend.eval(b) + /// # c = 4 + /// # here a[4] = 166.32 which is the largest element of a across axis 0 + /// ``` + /// + /// - Parameter dimension: int32 or int64, must be in the range `[-rank(input), rank(input))`. + /// Describes which dimension of the input Tensor to reduce across. For vectors, + /// use dimension = 0. + @inlinable @inline(__always) + public static func argMax< + T: TensorFlowNumeric, + Tidx: TensorFlowIndex, + OutputType: TensorFlowIndex + >( + _ input: Tensor, + dimension: Tensor + ) -> Tensor { + switch commonBackend(input.handle.backend, dimension.handle.backend) { + case .XLA: + return _RawXLA.argMax(input, dimension: dimension) + case .TF_EAGER: + return _RawTFEager.argMax(input, dimension: dimension) } - /// Fake-quantize the 'inputs' tensor of type float and one of the shapes: `[d]`, - /// - /// `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` of shape `[d]` - /// to 'outputs' tensor of same shape as `inputs`. - /// - /// `[min; max]` define the clamping range for the `inputs` data. - /// `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]` - /// when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and - /// then de-quantized and output as floats in `[min; max]` interval. - /// `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. - /// - /// Before quantization, `min` and `max` values are adjusted with the following - /// logic. - /// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, - /// the behavior can be unexpected: - /// If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. - /// If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. - /// If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, - /// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`. - /// - /// This operation has a gradient and thus allows for training `min` and `max` - /// values. - @inlinable @inline(__always) - public static func fakeQuantWithMinMaxVarsPerChannel( - inputs: Tensor, - min: Tensor, - max: Tensor, - numBits: Int64 = 8, - narrowRange: Bool = false - ) -> Tensor { - switch commonBackend( - commonBackend(inputs.handle.backend, min.handle.backend), max.handle.backend) - { - case .XLA: - let output_device = max.device - let inputs = Tensor(copying: inputs, to: .defaultTFEager) - let min = Tensor(copying: min, to: .defaultTFEager) - let max = Tensor(copying: max, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.fakeQuantWithMinMaxVarsPerChannel( - inputs: inputs, min: min, max: max, numBits: numBits, narrowRange: narrowRange), - to: output_device) - case .TF_EAGER: - return _RawTFEager.fakeQuantWithMinMaxVarsPerChannel( - inputs: inputs, min: min, max: max, numBits: numBits, narrowRange: narrowRange) - } - - } - - /// Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation. - /// - /// - Parameters: - /// - gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation, - /// shape one of: `[d]`, `[b, d]`, `[b, h, w, d]`. - /// - inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape - /// same as `gradients`. - /// min, max: Quantization interval, floats of shape `[d]`. - /// - /// - Attrs: - /// - num_bits: The bitwidth of the quantization; between 2 and 16, inclusive. - /// - narrow_range: Whether to quantize into 2^num_bits - 1 distinct values. - /// - /// - Outputs: - /// - backprops_wrt_input: Backpropagated gradients w.r.t. inputs, shape same as - /// `inputs`: - /// `gradients * (inputs >= min && inputs <= max)`. - /// - backprop_wrt_min: Backpropagated gradients w.r.t. min parameter, shape `[d]`: - /// `sum_per_d(gradients * (inputs < min))`. - /// - backprop_wrt_max: Backpropagated gradients w.r.t. max parameter, shape `[d]`: - /// `sum_per_d(gradients * (inputs > max))`. - @inlinable @inline(__always) - public static func fakeQuantWithMinMaxVarsPerChannelGradient( - gradients: Tensor, - inputs: Tensor, - min: Tensor, - max: Tensor, - numBits: Int64 = 8, - narrowRange: Bool = false - ) -> ( - backpropsWrtInput: Tensor, backpropWrtMin: Tensor, backpropWrtMax: Tensor - ) { - _RawTFEager.fakeQuantWithMinMaxVarsPerChannelGradient( - gradients: gradients, inputs: inputs, min: min, max: max, numBits: numBits, - narrowRange: narrowRange) + } + + /// Returns the index with the smallest value across dimensions of a tensor. + /// + /// Note that in case of ties the identity of the return value is not guaranteed. + /// + /// Usage: + /// ```python + /// import tensorflow as tf + /// a = [1, 10, 26.9, 2.8, 166.32, 62.3] + /// b = tf.math.argmin(input = a) + /// c = tf.keras.backend.eval(b) + /// # c = 0 + /// # here a[0] = 1 which is the smallest element of a across axis 0 + /// ``` + /// + /// - Parameter dimension: int32 or int64, must be in the range `[-rank(input), rank(input))`. + /// Describes which dimension of the input Tensor to reduce across. For vectors, + /// use dimension = 0. + @inlinable @inline(__always) + public static func argMin< + T: TensorFlowNumeric, + Tidx: TensorFlowIndex, + OutputType: TensorFlowIndex + >( + _ input: Tensor, + dimension: Tensor + ) -> Tensor { + switch commonBackend(input.handle.backend, dimension.handle.backend) { + case .XLA: + return _RawXLA.argMin(input, dimension: dimension) + case .TF_EAGER: + return _RawTFEager.argMin(input, dimension: dimension) } - /// Creates a tensor filled with a scalar value. - /// - /// This operation creates a tensor of shape `dims` and fills it with `value`. - /// - /// For example: - /// - /// ``` - /// # Output tensor has shape [2, 3]. - /// fill([2, 3], 9) ==> [[9, 9, 9] - /// [9, 9, 9]] - /// ``` - /// - /// `tf.fill` differs from `tf.constant` in a few ways: - /// - /// * `tf.fill` only supports scalar contents, whereas `tf.constant` supports - /// Tensor values. - /// * `tf.fill` creates an Op in the computation graph that constructs the actual - /// Tensor value at runtime. This is in contrast to `tf.constant` which embeds - /// the entire Tensor into the graph with a `Const` node. - /// * Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes - /// based on other runtime Tensors, unlike `tf.constant`. - /// - /// - Parameters: - /// - dims: 1-D. Represents the shape of the output tensor. - /// - value: 0-D (scalar). Value to fill the returned tensor. - /// - /// @compatibility(numpy) - /// Equivalent to np.full - /// @end_compatibility - @inlinable @inline(__always) - public static func fill< - T: TensorFlowScalar, - IndexType: TensorFlowIndex - >( - dims: Tensor, - value: Tensor - ) -> Tensor { - switch commonBackend(dims.handle.backend, value.handle.backend) { - case .XLA: - return _RawXLA.fill(dims: dims, value: value) - case .TF_EAGER: - return _RawTFEager.fill(dims: dims, value: value) - } - - } - - /// Creates a dataset containing elements of first component of `input_dataset` having true in the last component. - @inlinable @inline(__always) - public static func filterByLastComponentDataset( - inputDataset: VariantHandle, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.filterByLastComponentDataset( - inputDataset: inputDataset, outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Creates a dataset containing elements of `input_dataset` matching `predicate`. - /// - /// The `predicate` function must return a scalar boolean and accept the - /// following arguments: - /// - /// * One tensor for each component of an element of `input_dataset`. - /// * One tensor for each value in `other_arguments`. - /// - /// - Parameter other_arguments: A list of tensors, typically values that were captured when - /// building a closure for `predicate`. - /// - /// - Attr predicate: A function returning a scalar boolean. - @inlinable @inline(__always) - public static func filterDataset< - PredicateIn: TensorGroup, - PredicateOut: TensorGroup, - Targuments: TensorArrayProtocol - >( - inputDataset: VariantHandle, - otherArguments: Targuments, - predicate: (PredicateIn) -> PredicateOut, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.filterDataset( - inputDataset: inputDataset, otherArguments: otherArguments, predicate: predicate, - outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Generates fingerprint values. - /// - /// Generates fingerprint values of `data`. - /// - /// Fingerprint op considers the first dimension of `data` as the batch dimension, - /// and `output[i]` contains the fingerprint value generated from contents in - /// `data[i, ...]` for all `i`. - /// - /// Fingerprint op writes fingerprint values as byte arrays. For example, the - /// default method `farmhash64` generates a 64-bit fingerprint value at a time. - /// This 8-byte value is written out as an `uint8` array of size 8, in little-endian - /// order. - /// - /// For example, suppose that `data` has data type `DT_INT32` and shape (2, 3, 4), - /// and that the fingerprint method is `farmhash64`. In this case, the output shape - /// is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the size of - /// each fingerprint value in bytes. `output[0, :]` is generated from 12 integers in - /// `data[0, :, :]` and similarly `output[1, :]` is generated from other 12 integers - /// in `data[1, :, :]`. - /// - /// Note that this op fingerprints the raw underlying buffer, and it does not - /// fingerprint Tensor's metadata such as data type and/or shape. For example, the - /// fingerprint values are invariant under reshapes and bitcasts as long as the - /// batch dimension remain the same: - /// - /// ``` - /// Fingerprint(data) == Fingerprint(Reshape(data, ...)) - /// Fingerprint(data) == Fingerprint(Bitcast(data, ...)) - /// ``` - /// - /// For string data, one should expect `Fingerprint(data) != - /// Fingerprint(ReduceJoin(data))` in general. - /// - /// - Parameters: - /// - data: Must have rank 1 or higher. - /// - method: Fingerprint method used by this op. Currently available method is - /// `farmhash::fingerprint64`. - /// - /// - Attr T: This can be a POD-type or string type. - /// - /// - Output fingerprint: A two-dimensional `Tensor` of type `tf.uint8`. The first dimension equals to - /// `data`'s first dimension, and the second dimension size depends on the - /// fingerprint algorithm. - @inlinable @inline(__always) - public static func fingerprint( - data: Tensor, - method: StringTensor - ) -> Tensor { - switch data.handle.backend { - case .XLA: - let output_device = data.device - let data = Tensor(copying: data, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.fingerprint(data: data, method: method), to: output_device) - case .TF_EAGER: - return _RawTFEager.fingerprint(data: data, method: method) - } - - } - - @inlinable @inline(__always) - public static func fiveFloatOutputs() -> ( - a: Tensor, b: Tensor, c: Tensor, d: Tensor, e: Tensor - ) { - _RawTFEager.fiveFloatOutputs() - } - - /// Creates a dataset that emits the records from one or more binary files. - /// - /// - Parameters: - /// - filenames: A scalar or a vector containing the name(s) of the file(s) to be - /// read. - /// - header_bytes: A scalar representing the number of bytes to skip at the - /// beginning of a file. - /// - record_bytes: A scalar representing the number of bytes in each record. - /// - footer_bytes: A scalar representing the number of bytes to skip at the end - /// of a file. - /// - buffer_size: A scalar representing the number of bytes to buffer. Must be > 0. - @inlinable @inline(__always) - public static func fixedLengthRecordDataset( - filenames: StringTensor, - headerBytes: Tensor, - recordBytes: Tensor, - footerBytes: Tensor, - bufferSize: Tensor - ) -> VariantHandle { - _RawTFEager.fixedLengthRecordDataset( - filenames: filenames, headerBytes: headerBytes, recordBytes: recordBytes, - footerBytes: footerBytes, bufferSize: bufferSize) - } - - @inlinable @inline(__always) - public static func fixedLengthRecordDatasetV2( - filenames: StringTensor, - headerBytes: Tensor, - recordBytes: Tensor, - footerBytes: Tensor, - bufferSize: Tensor, - compressionType: StringTensor - ) -> VariantHandle { - _RawTFEager.fixedLengthRecordDatasetV2( - filenames: filenames, headerBytes: headerBytes, recordBytes: recordBytes, - footerBytes: footerBytes, bufferSize: bufferSize, compressionType: compressionType) - } - - /// A Reader that outputs fixed-length records from a file. - /// - /// - Attrs: - /// - header_bytes: Number of bytes in the header, defaults to 0. - /// - record_bytes: Number of bytes in the record. - /// - footer_bytes: Number of bytes in the footer, defaults to 0. - /// - hop_bytes: Number of bytes to hop before each read. Default of 0 means using - /// record_bytes. - /// - container: If non-empty, this reader is placed in the given container. - /// Otherwise, a default container is used. - /// - shared_name: If non-empty, this reader is named in the given bucket - /// with this shared_name. Otherwise, the node name is used instead. - /// - encoding: The type of encoding for the file. Currently ZLIB and GZIP - /// are supported. Defaults to none. - /// - /// - Output reader_handle: The handle to reference the Reader. - @inlinable @inline(__always) - public static func fixedLengthRecordReaderV2( - headerBytes: Int64 = 0, - recordBytes: Int64, - footerBytes: Int64 = 0, - hopBytes: Int64 = 0, - container: String, - sharedName: String, - encoding: String - ) -> ResourceHandle { - _RawTFEager.fixedLengthRecordReaderV2( - headerBytes: headerBytes, recordBytes: recordBytes, footerBytes: footerBytes, - hopBytes: hopBytes, container: container, sharedName: sharedName, encoding: encoding) - } - - /// Generates labels for candidate sampling with a learned unigram distribution. - /// - /// A unigram sampler could use a fixed unigram distribution read from a - /// file or passed in as an in-memory array instead of building up the distribution - /// from data on the fly. There is also an option to skew the distribution by - /// applying a distortion power to the weights. - /// - /// The vocabulary file should be in CSV-like format, with the last field - /// being the weight associated with the word. - /// - /// For each batch, this op picks a single set of sampled candidate labels. - /// - /// The advantages of sampling candidates per-batch are simplicity and the - /// possibility of efficient dense matrix multiplication. The disadvantage is that - /// the sampled candidates must be chosen independently of the context and of the - /// true labels. - /// - /// - Parameter true_classes: A batch_size * num_true matrix, in which each row contains the - /// IDs of the num_true target_classes in the corresponding original label. - /// - /// - Attrs: - /// - num_true: Number of true labels per context. - /// - num_sampled: Number of candidates to randomly sample. - /// - unique: If unique is true, we sample with rejection, so that all sampled - /// candidates in a batch are unique. This requires some approximation to - /// estimate the post-rejection sampling probabilities. - /// - range_max: The sampler will sample integers from the interval [0, range_max). - /// - vocab_file: Each valid line in this file (which should have a CSV-like format) - /// corresponds to a valid word ID. IDs are in sequential order, starting from - /// num_reserved_ids. The last entry in each line is expected to be a value - /// corresponding to the count or relative probability. Exactly one of vocab_file - /// and unigrams needs to be passed to this op. - /// - distortion: The distortion is used to skew the unigram probability distribution. - /// Each weight is first raised to the distortion's power before adding to the - /// internal unigram distribution. As a result, distortion = 1.0 gives regular - /// unigram sampling (as defined by the vocab file), and distortion = 0.0 gives - /// a uniform distribution. - /// - num_reserved_ids: Optionally some reserved IDs can be added in the range [0, - /// ..., num_reserved_ids) by the users. One use case is that a special unknown - /// word token is used as ID 0. These IDs will have a sampling probability of 0. - /// - num_shards: A sampler can be used to sample from a subset of the original range - /// in order to speed up the whole computation through parallelism. This parameter - /// (together with 'shard') indicates the number of partitions that are being - /// used in the overall computation. - /// - shard: A sampler can be used to sample from a subset of the original range - /// in order to speed up the whole computation through parallelism. This parameter - /// (together with 'num_shards') indicates the particular partition number of a - /// sampler op, when partitioning is being used. - /// - unigrams: A list of unigram counts or probabilities, one per ID in sequential - /// order. Exactly one of vocab_file and unigrams should be passed to this op. - /// - seed: If either seed or seed2 are set to be non-zero, the random number - /// generator is seeded by the given seed. Otherwise, it is seeded by a - /// random seed. - /// - seed2: An second seed to avoid seed collision. - /// - /// - Outputs: - /// - sampled_candidates: A vector of length num_sampled, in which each element is - /// the ID of a sampled candidate. - /// - true_expected_count: A batch_size * num_true matrix, representing - /// the number of times each candidate is expected to occur in a batch - /// of sampled candidates. If unique=true, then this is a probability. - /// - sampled_expected_count: A vector of length num_sampled, for each sampled - /// candidate representing the number of times the candidate is expected - /// to occur in a batch of sampled candidates. If unique=true, then this is a - /// probability. - @inlinable @inline(__always) - public static func fixedUnigramCandidateSampler( - trueClasses: Tensor, - numTrue: Int64, - numSampled: Int64, - unique: Bool, - rangeMax: Int64, - vocabFile: String, - distortion: Double = 1, - numReservedIds: Int64 = 0, - numShards: Int64 = 1, - shard: Int64 = 0, - unigrams: [Double], - seed: Int64 = 0, - seed2: Int64 = 0 - ) -> ( - sampledCandidates: Tensor, trueExpectedCount: Tensor, - sampledExpectedCount: Tensor - ) { - _RawTFEager.fixedUnigramCandidateSampler( - trueClasses: trueClasses, numTrue: numTrue, numSampled: numSampled, unique: unique, - rangeMax: rangeMax, vocabFile: vocabFile, distortion: distortion, - numReservedIds: numReservedIds, numShards: numShards, shard: shard, unigrams: unigrams, - seed: seed, seed2: seed2) + } + + /// Converts each entry in the given tensor to strings. + /// + /// Supports many numeric types and boolean. + /// + /// For Unicode, see the + /// [https://www.tensorflow.org/tutorials/representation/unicode](Working with Unicode text) + /// tutorial. + /// + /// Examples: + /// + /// >>> tf.strings.as_string([3, 2]) + /// + /// >>> tf.strings.as_string([3.1415926, 2.71828], precision=2).numpy() + /// array([b'3.14', b'2.72'], dtype=object) + /// + /// - Attrs: + /// - precision: The post-decimal precision to use for floating point numbers. + /// Only used if precision > -1. + /// - scientific: Use scientific notation for floating point numbers. + /// - shortest: Use shortest representation (either scientific or standard) for + /// floating point numbers. + /// - width: Pad pre-decimal numbers to this width. + /// Applies to both floating point and integer numbers. + /// Only used if width > -1. + /// - fill: The value to pad if width > -1. If empty, pads with spaces. + /// Another typical value is '0'. String cannot be longer than 1 character. + @inlinable @inline(__always) + public static func asString( + _ input: Tensor, + precision: Int64 = -1, + scientific: Bool = false, + shortest: Bool = false, + width: Int64 = -1, + fill: String + ) -> StringTensor { + _RawTFEager.asString( + input, precision: precision, scientific: scientific, shortest: shortest, width: width, + fill: fill) + } + + /// Computes the trignometric inverse sine of x element-wise. + /// + /// The `tf.math.asin` operation returns the inverse of `tf.math.sin`, such that + /// if `y = tf.math.sin(x)` then, `x = tf.math.asin(y)`. + /// + /// **Note**: The output of `tf.math.asin` will lie within the invertible range + /// of sine, i.e [-pi/2, pi/2]. + /// + /// For example: + /// + /// ```python + /// # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] + /// x = tf.constant([1.047, 0.785]) + /// y = tf.math.sin(x) # [0.8659266, 0.7068252] + /// + /// tf.math.asin(y) # [1.047, 0.785] = x + /// ``` + /// + @inlinable @inline(__always) + public static func asin( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.asin(x) + case .TF_EAGER: + return _RawTFEager.asin(x) } - /// Creates a dataset that applies `f` to the outputs of `input_dataset`. - /// - /// Unlike MapDataset, the `f` in FlatMapDataset is expected to return a - /// Dataset variant, and FlatMapDataset will flatten successive results - /// into a single Dataset. - /// - /// - Attr f: A function mapping elements of `input_dataset`, concatenated with - /// `other_arguments`, to a Dataset variant that contains elements matching - /// `output_types` and `output_shapes`. - @inlinable @inline(__always) - public static func flatMapDataset< - FIn: TensorGroup, - FOut: TensorGroup, - Targuments: TensorArrayProtocol - >( - inputDataset: VariantHandle, - otherArguments: Targuments, - f: (FIn) -> FOut, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.flatMapDataset( - inputDataset: inputDataset, otherArguments: otherArguments, f: f, outputTypes: outputTypes, - outputShapes: outputShapes) - } - - @inlinable @inline(__always) - public static func floatInput( - _ a: Tensor - ) { - _RawTFEager.floatInput(a) - } - - @inlinable @inline(__always) - public static func floatOutput() -> Tensor { - _RawTFEager.floatOutput() - } - - @inlinable @inline(__always) - public static func floatOutputStringOutput() -> (a: Tensor, b: StringTensor) { - _RawTFEager.floatOutputStringOutput() - } - - /// Returns element-wise largest integer not greater than x. - @inlinable @inline(__always) - public static func floor( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.floor(x) - case .TF_EAGER: - return _RawTFEager.floor(x) - } - - } - - /// Returns x // y element-wise. - /// - /// *NOTE*: `FloorDiv` supports broadcasting. More about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - @inlinable @inline(__always) - public static func floorDiv( - _ x: Tensor, - _ y: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - let output_device = y.device - let x = Tensor(copying: x, to: .defaultTFEager) - let y = Tensor(copying: y, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.floorDiv(x, y), to: output_device) - case .TF_EAGER: - return _RawTFEager.floorDiv(x, y) - } - - } - - /// Returns element-wise remainder of division. When `x < 0` xor `y < 0` is - /// - /// true, this follows Python semantics in that the result here is consistent - /// with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`. - /// - /// *NOTE*: `FloorMod` supports broadcasting. More about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - @inlinable @inline(__always) - public static func floorMod( - _ x: Tensor, - _ y: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - let output_device = y.device - let x = Tensor(copying: x, to: .defaultTFEager) - let y = Tensor(copying: y, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.floorMod(x, y), to: output_device) - case .TF_EAGER: - return _RawTFEager.floorMod(x, y) - } - - } - - @inlinable @inline(__always) - public static func flushSummaryWriter( - writer: ResourceHandle - ) { - _RawTFEager.flushSummaryWriter(writer: writer) - } - - @inlinable @inline(__always) - public static func foo1( - _ a: Tensor, - _ b: Tensor, - c: Tensor - ) -> (d: Tensor, e: Tensor) { - _RawTFEager.foo1(a, b, c: c) - } - - @inlinable @inline(__always) - public static func foo2( - _ a: Tensor, - _ b: StringTensor, - c: StringTensor - ) -> (d: Tensor, e: Tensor) { - _RawTFEager.foo2(a, b, c: c) - } - - @inlinable @inline(__always) - public static func foo3( - _ a: Tensor, - _ b: StringTensor, - c: Tensor - ) -> (d: Tensor, e: Tensor) { - _RawTFEager.foo3(a, b, c: c) - } - - /// ```python - /// output = input; - /// for i in range(start, limit, delta) - /// output = body(i, output); - /// ``` - /// - /// - Parameters: - /// - start: The lower bound. An int32 - /// - limit: The upper bound. An int32 - /// - delta: The increment. An int32 - /// - input: A list of input tensors whose types are T. - /// - /// - Attrs: - /// - T: A list of dtypes. - /// - body: A function that takes a list of tensors (int32, T) and returns another - /// list of tensors (T). - /// - /// - Output output: A list of output tensors whose types are T. - @inlinable @inline(__always) - public static func for_< - T: TensorArrayProtocol, - BodyIn: TensorGroup, - BodyOut: TensorGroup - >( - start: Tensor, - limit: Tensor, - delta: Tensor, - _ input: T, - body: (BodyIn) -> BodyOut - ) -> T { - _RawTFEager.for_(start: start, limit: limit, delta: delta, input, body: body) - } - - /// Performs fractional average pooling on the input. - /// - /// Fractional average pooling is similar to Fractional max pooling in the pooling - /// region generation step. The only difference is that after pooling regions are - /// generated, a mean operation is performed instead of a max operation in each - /// pooling region. - /// - /// - Parameter value: 4-D with shape `[batch, height, width, channels]`. - /// - /// - Attrs: - /// - pooling_ratio: Pooling ratio for each dimension of `value`, currently only - /// supports row and col dimension and should be >= 1.0. For example, a valid - /// pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements - /// must be 1.0 because we don't allow pooling on batch and channels - /// dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions - /// respectively. - /// - pseudo_random: When set to True, generates the pooling sequence in a - /// pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin - /// Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for - /// difference between pseudorandom and random. - /// - overlapping: When set to True, it means when pooling, the values at the boundary - /// of adjacent pooling cells are used by both cells. For example: - /// - /// `index 0 1 2 3 4` - /// - /// `value 20 5 16 3 7` - /// - /// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. - /// The result would be [41/3, 26/3] for fractional avg pooling. - /// - deterministic: When set to True, a fixed pooling region will be used when - /// iterating over a FractionalAvgPool node in the computation graph. Mainly used - /// in unit test to make FractionalAvgPool deterministic. - /// - seed: If either seed or seed2 are set to be non-zero, the random number - /// generator is seeded by the given seed. Otherwise, it is seeded by a - /// random seed. - /// - seed2: An second seed to avoid seed collision. - /// - /// - Outputs: - /// - output: output tensor after fractional avg pooling. - /// - row_pooling_sequence: row pooling sequence, needed to calculate gradient. - /// - col_pooling_sequence: column pooling sequence, needed to calculate gradient. - @inlinable @inline(__always) - public static func fractionalAvgPool( - value: Tensor, - poolingRatio: [Double], - pseudoRandom: Bool = false, - overlapping: Bool = false, - deterministic: Bool = false, - seed: Int64 = 0, - seed2: Int64 = 0 - ) -> (output: Tensor, rowPoolingSequence: Tensor, colPoolingSequence: Tensor) { - _RawTFEager.fractionalAvgPool( - value: value, poolingRatio: poolingRatio, pseudoRandom: pseudoRandom, - overlapping: overlapping, deterministic: deterministic, seed: seed, seed2: seed2) - } - - /// Computes gradient of the FractionalAvgPool function. - /// - /// Unlike FractionalMaxPoolGrad, we don't need to find arg_max for - /// FractionalAvgPoolGrad, we just need to evenly back-propagate each element of - /// out_backprop to those indices that form the same pooling cell. Therefore, we - /// just need to know the shape of original input tensor, instead of the whole - /// tensor. - /// - /// - Parameters: - /// - orig_input_tensor_shape: Original input tensor shape for `fractional_avg_pool` - /// - out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients - /// w.r.t. the output of `fractional_avg_pool`. - /// - row_pooling_sequence: row pooling sequence, form pooling region with - /// col_pooling_sequence. - /// - col_pooling_sequence: column pooling sequence, form pooling region with - /// row_pooling sequence. - /// - /// - Attr overlapping: When set to True, it means when pooling, the values at the boundary - /// of adjacent pooling cells are used by both cells. For example: - /// - /// `index 0 1 2 3 4` - /// - /// `value 20 5 16 3 7` - /// - /// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. - /// The result would be [41/3, 26/3] for fractional avg pooling. - /// - /// - Output output: 4-D. Gradients w.r.t. the input of `fractional_avg_pool`. - @inlinable @inline(__always) - public static func fractionalAvgPoolGrad( - origInputTensorShape: Tensor, - outBackprop: Tensor, - rowPoolingSequence: Tensor, - colPoolingSequence: Tensor, - overlapping: Bool = false - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(origInputTensorShape.handle.backend, outBackprop.handle.backend), - rowPoolingSequence.handle.backend), colPoolingSequence.handle.backend) - { - case .XLA: - let output_device = colPoolingSequence.device - let origInputTensorShape = Tensor(copying: origInputTensorShape, to: .defaultTFEager) - let outBackprop = Tensor(copying: outBackprop, to: .defaultTFEager) - let rowPoolingSequence = Tensor(copying: rowPoolingSequence, to: .defaultTFEager) - let colPoolingSequence = Tensor(copying: colPoolingSequence, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.fractionalAvgPoolGrad( - origInputTensorShape: origInputTensorShape, outBackprop: outBackprop, - rowPoolingSequence: rowPoolingSequence, colPoolingSequence: colPoolingSequence, - overlapping: overlapping), to: output_device) - case .TF_EAGER: - return _RawTFEager.fractionalAvgPoolGrad( - origInputTensorShape: origInputTensorShape, outBackprop: outBackprop, - rowPoolingSequence: rowPoolingSequence, colPoolingSequence: colPoolingSequence, - overlapping: overlapping) - } - - } - - /// Performs fractional max pooling on the input. - /// - /// Fractional max pooling is slightly different than regular max pooling. In - /// regular max pooling, you downsize an input set by taking the maximum value of - /// smaller N x N subsections of the set (often 2x2), and try to reduce the set by - /// a factor of N, where N is an integer. Fractional max pooling, as you might - /// expect from the word "fractional", means that the overall reduction ratio N - /// does not have to be an integer. - /// - /// The sizes of the pooling regions are generated randomly but are fairly uniform. - /// For example, let's look at the height dimension, and the constraints on the - /// list of rows that will be pool boundaries. - /// - /// First we define the following: - /// - /// 1. input_row_length : the number of rows from the input set - /// 2. output_row_length : which will be smaller than the input - /// 3. alpha = input_row_length / output_row_length : our reduction ratio - /// 4. K = floor(alpha) - /// 5. row_pooling_sequence : this is the result list of pool boundary rows - /// - /// Then, row_pooling_sequence should satisfy: - /// - /// 1. a[0] = 0 : the first value of the sequence is 0 - /// 2. a[end] = input_row_length : the last value of the sequence is the size - /// 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size - /// 4. length(row_pooling_sequence) = output_row_length+1 - /// - /// For more details on fractional max pooling, see this paper: - /// [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) - /// - /// - Parameter value: 4-D with shape `[batch, height, width, channels]`. - /// - /// - Attrs: - /// - pooling_ratio: Pooling ratio for each dimension of `value`, currently only - /// supports row and col dimension and should be >= 1.0. For example, a valid - /// pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements - /// must be 1.0 because we don't allow pooling on batch and channels - /// dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions - /// respectively. - /// - pseudo_random: When set to True, generates the pooling sequence in a - /// pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin - /// Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for - /// difference between pseudorandom and random. - /// - overlapping: When set to True, it means when pooling, the values at the boundary - /// of adjacent pooling cells are used by both cells. For example: - /// - /// `index 0 1 2 3 4` - /// - /// `value 20 5 16 3 7` - /// - /// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. - /// The result would be [20, 16] for fractional max pooling. - /// - deterministic: When set to True, a fixed pooling region will be used when - /// iterating over a FractionalMaxPool node in the computation graph. Mainly used - /// in unit test to make FractionalMaxPool deterministic. - /// - seed: If either seed or seed2 are set to be non-zero, the random number - /// generator is seeded by the given seed. Otherwise, it is seeded by a - /// random seed. - /// - seed2: An second seed to avoid seed collision. - /// - /// - Outputs: - /// - output: output tensor after fractional max pooling. - /// - row_pooling_sequence: row pooling sequence, needed to calculate gradient. - /// - col_pooling_sequence: column pooling sequence, needed to calculate gradient. - @inlinable @inline(__always) - public static func fractionalMaxPool( - value: Tensor, - poolingRatio: [Double], - pseudoRandom: Bool = false, - overlapping: Bool = false, - deterministic: Bool = false, - seed: Int64 = 0, - seed2: Int64 = 0 - ) -> (output: Tensor, rowPoolingSequence: Tensor, colPoolingSequence: Tensor) { - _RawTFEager.fractionalMaxPool( - value: value, poolingRatio: poolingRatio, pseudoRandom: pseudoRandom, - overlapping: overlapping, deterministic: deterministic, seed: seed, seed2: seed2) - } - - /// Computes gradient of the FractionalMaxPool function. - /// - /// - Parameters: - /// - orig_input: Original input for `fractional_max_pool` - /// - orig_output: Original output for `fractional_max_pool` - /// - out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients - /// w.r.t. the output of `fractional_max_pool`. - /// - row_pooling_sequence: row pooling sequence, form pooling region with - /// col_pooling_sequence. - /// - col_pooling_sequence: column pooling sequence, form pooling region with - /// row_pooling sequence. - /// - /// - Attr overlapping: When set to True, it means when pooling, the values at the boundary - /// of adjacent pooling cells are used by both cells. For example: - /// - /// `index 0 1 2 3 4` - /// - /// `value 20 5 16 3 7` - /// - /// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. - /// The result would be [20, 16] for fractional max pooling. - /// - /// - Output output: 4-D. Gradients w.r.t. the input of `fractional_max_pool`. - @inlinable @inline(__always) - public static func fractionalMaxPoolGrad( - origInput: Tensor, - origOutput: Tensor, - outBackprop: Tensor, - rowPoolingSequence: Tensor, - colPoolingSequence: Tensor, - overlapping: Bool = false - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend( - commonBackend(origInput.handle.backend, origOutput.handle.backend), - outBackprop.handle.backend), rowPoolingSequence.handle.backend), - colPoolingSequence.handle.backend) - { - case .XLA: - let output_device = colPoolingSequence.device - let origInput = Tensor(copying: origInput, to: .defaultTFEager) - let origOutput = Tensor(copying: origOutput, to: .defaultTFEager) - let outBackprop = Tensor(copying: outBackprop, to: .defaultTFEager) - let rowPoolingSequence = Tensor(copying: rowPoolingSequence, to: .defaultTFEager) - let colPoolingSequence = Tensor(copying: colPoolingSequence, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.fractionalMaxPoolGrad( - origInput: origInput, origOutput: origOutput, outBackprop: outBackprop, - rowPoolingSequence: rowPoolingSequence, colPoolingSequence: colPoolingSequence, - overlapping: overlapping), to: output_device) - case .TF_EAGER: - return _RawTFEager.fractionalMaxPoolGrad( - origInput: origInput, origOutput: origOutput, outBackprop: outBackprop, - rowPoolingSequence: rowPoolingSequence, colPoolingSequence: colPoolingSequence, - overlapping: overlapping) - } + } + /// Computes inverse hyperbolic sine of x element-wise. + /// + /// Given an input tensor, this function computes inverse hyperbolic sine + /// for every element in the tensor. Both input and output has a range of + /// `[-inf, inf]`. + /// + /// ```python + /// x = tf.constant([-float("inf"), -2, -0.5, 1, 1.2, 200, 10000, float("inf")]) + /// tf.math.asinh(x) ==> [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 9.903487 inf] + /// ``` + @inlinable @inline(__always) + public static func asinh( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.asinh(x) + case .TF_EAGER: + return _RawTFEager.asinh(x) } - @inlinable @inline(__always) - public static func funcAttr< - FIn: TensorGroup, - FOut: TensorGroup - >( - f: (FIn) -> FOut - ) { - _RawTFEager.funcAttr(f: f) - } - - /// Batch normalization. - /// - /// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". - /// The size of 1D Tensors matches the dimension C of the 4D Tensors. - /// - /// - Parameters: - /// - x: A 4D Tensor for input data. - /// - scale: A 1D Tensor for scaling factor, to scale the normalized x. - /// - offset: A 1D Tensor for offset, to shift to the normalized x. - /// - mean: A 1D Tensor for population mean. Used for inference only; - /// must be empty for training. - /// - variance: A 1D Tensor for population variance. Used for inference only; - /// must be empty for training. - /// - /// - Attrs: - /// - T: The data type for the elements of input and output Tensors. - /// - epsilon: A small float number added to the variance of x. - /// - data_format: The data format for x and y. Either "NHWC" (default) or "NCHW". - /// - is_training: A bool value to indicate the operation is for training (default) - /// or inference. - /// - /// - Outputs: - /// - y: A 4D Tensor for output data. - /// - batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow - /// to compute the running mean. - /// - batch_variance: A 1D Tensor for the computed batch variance, to be used by - /// TensorFlow to compute the running variance. - /// - reserve_space_1: A 1D Tensor for the computed batch mean, to be reused - /// in the gradient computation. - /// - reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance - /// in the cuDNN case), to be reused in the gradient computation. - @inlinable @inline(__always) - public static func fusedBatchNorm( - _ x: Tensor, - scale: Tensor, - offset: Tensor, - mean: Tensor, - variance: Tensor, - epsilon: Double = 0.0001, - dataFormat: DataFormat = .nhwc, - isTraining: Bool = true - ) -> ( - y: Tensor, batchMean: Tensor, batchVariance: Tensor, reserveSpace1: Tensor, - reserveSpace2: Tensor - ) { - _RawTFEager.fusedBatchNorm( - x, scale: scale, offset: offset, mean: mean, variance: variance, epsilon: epsilon, - dataFormat: dataFormat, isTraining: isTraining) - } - - /// Gradient for batch normalization. - /// - /// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". - /// The size of 1D Tensors matches the dimension C of the 4D Tensors. - /// - /// - Parameters: - /// - y_backprop: A 4D Tensor for the gradient with respect to y. - /// - x: A 4D Tensor for input data. - /// - scale: A 1D Tensor for scaling factor, to scale the normalized x. - /// - reserve_space_1: When is_training is True, a 1D Tensor for the computed batch - /// mean to be reused in gradient computation. When is_training is - /// False, a 1D Tensor for the population mean to be reused in both - /// 1st and 2nd order gradient computation. - /// - reserve_space_2: When is_training is True, a 1D Tensor for the computed batch - /// variance (inverted variance in the cuDNN case) to be reused in - /// gradient computation. When is_training is False, a 1D Tensor - /// for the population variance to be reused in both 1st and 2nd - /// order gradient computation. - /// - /// - Attrs: - /// - T: The data type for the elements of input and output Tensors. - /// - epsilon: A small float number added to the variance of x. - /// - data_format: The data format for y_backprop, x, x_backprop. - /// Either "NHWC" (default) or "NCHW". - /// - is_training: A bool value to indicate the operation is for training (default) - /// or inference. - /// - /// - Outputs: - /// - x_backprop: A 4D Tensor for the gradient with respect to x. - /// - scale_backprop: A 1D Tensor for the gradient with respect to scale. - /// - offset_backprop: A 1D Tensor for the gradient with respect to offset. - /// - reserve_space_3: Unused placeholder to match the mean input in FusedBatchNorm. - /// - reserve_space_4: Unused placeholder to match the variance input - /// in FusedBatchNorm. - @inlinable @inline(__always) - public static func fusedBatchNormGrad( - yBackprop: Tensor, - _ x: Tensor, - scale: Tensor, - reserveSpace1: Tensor, - reserveSpace2: Tensor, - epsilon: Double = 0.0001, - dataFormat: DataFormat = .nhwc, - isTraining: Bool = true - ) -> ( - xBackprop: Tensor, scaleBackprop: Tensor, offsetBackprop: Tensor, - reserveSpace3: Tensor, reserveSpace4: Tensor - ) { - _RawTFEager.fusedBatchNormGrad( - yBackprop: yBackprop, x, scale: scale, reserveSpace1: reserveSpace1, - reserveSpace2: reserveSpace2, epsilon: epsilon, dataFormat: dataFormat, - isTraining: isTraining) - } - - /// Gradient for batch normalization. - /// - /// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". - /// The size of 1D Tensors matches the dimension C of the 4D Tensors. - /// - /// - Parameters: - /// - y_backprop: A 4D Tensor for the gradient with respect to y. - /// - x: A 4D Tensor for input data. - /// - scale: A 1D Tensor for scaling factor, to scale the normalized x. - /// - reserve_space_1: When is_training is True, a 1D Tensor for the computed batch - /// mean to be reused in gradient computation. When is_training is - /// False, a 1D Tensor for the population mean to be reused in both - /// 1st and 2nd order gradient computation. - /// - reserve_space_2: When is_training is True, a 1D Tensor for the computed batch - /// variance (inverted variance in the cuDNN case) to be reused in - /// gradient computation. When is_training is False, a 1D Tensor - /// for the population variance to be reused in both 1st and 2nd - /// order gradient computation. - /// - /// - Attrs: - /// - T: The data type for the elements of input and output Tensors. - /// - U: The data type for the scale, offset, mean, and variance. - /// - epsilon: A small float number added to the variance of x. - /// - data_format: The data format for y_backprop, x, x_backprop. - /// Either "NHWC" (default) or "NCHW". - /// - is_training: A bool value to indicate the operation is for training (default) - /// or inference. - /// - /// - Outputs: - /// - x_backprop: A 4D Tensor for the gradient with respect to x. - /// - scale_backprop: A 1D Tensor for the gradient with respect to scale. - /// - offset_backprop: A 1D Tensor for the gradient with respect to offset. - /// - reserve_space_3: Unused placeholder to match the mean input in FusedBatchNorm. - /// - reserve_space_4: Unused placeholder to match the variance input - /// in FusedBatchNorm. - @inlinable @inline(__always) - public static func fusedBatchNormGradV2< - T: FloatingPoint & TensorFlowScalar, - U: FloatingPoint & TensorFlowScalar - >( - yBackprop: Tensor, - _ x: Tensor, - scale: Tensor, - reserveSpace1: Tensor, - reserveSpace2: Tensor, - epsilon: Double = 0.0001, - dataFormat: DataFormat = .nhwc, - isTraining: Bool = true - ) -> ( - xBackprop: Tensor, scaleBackprop: Tensor, offsetBackprop: Tensor, - reserveSpace3: Tensor, reserveSpace4: Tensor - ) { - _RawTFEager.fusedBatchNormGradV2( - yBackprop: yBackprop, x, scale: scale, reserveSpace1: reserveSpace1, - reserveSpace2: reserveSpace2, epsilon: epsilon, dataFormat: dataFormat, - isTraining: isTraining) - } - - /// Gradient for batch normalization. - /// - /// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". - /// The size of 1D Tensors matches the dimension C of the 4D Tensors. - /// - /// - Parameters: - /// - y_backprop: A 4D Tensor for the gradient with respect to y. - /// - x: A 4D Tensor for input data. - /// - scale: A 1D Tensor for scaling factor, to scale the normalized x. - /// - reserve_space_1: When is_training is True, a 1D Tensor for the computed batch - /// mean to be reused in gradient computation. When is_training is - /// False, a 1D Tensor for the population mean to be reused in both - /// 1st and 2nd order gradient computation. - /// - reserve_space_2: When is_training is True, a 1D Tensor for the computed batch - /// variance (inverted variance in the cuDNN case) to be reused in - /// gradient computation. When is_training is False, a 1D Tensor - /// for the population variance to be reused in both 1st and 2nd - /// order gradient computation. - /// - reserve_space_3: When is_training is True, a 1D Tensor for some intermediate results to be reused - /// in gradient computation. When is_training is False, a dummy empty Tensor will be - /// created. - /// - /// - Attrs: - /// - T: The data type for the elements of input and output Tensors. - /// - U: The data type for the scale, offset, mean, and variance. - /// - epsilon: A small float number added to the variance of x. - /// - data_format: The data format for y_backprop, x, x_backprop. - /// Either "NHWC" (default) or "NCHW". - /// - is_training: A bool value to indicate the operation is for training (default) - /// or inference. - /// - /// - Outputs: - /// - x_backprop: A 4D Tensor for the gradient with respect to x. - /// - scale_backprop: A 1D Tensor for the gradient with respect to scale. - /// - offset_backprop: A 1D Tensor for the gradient with respect to offset. - /// - reserve_space_4: Unused placeholder to match the mean input in FusedBatchNorm. - /// - reserve_space_5: Unused placeholder to match the variance input - /// in FusedBatchNorm. - @inlinable @inline(__always) - public static func fusedBatchNormGradV3< - T: FloatingPoint & TensorFlowScalar, - U: FloatingPoint & TensorFlowScalar - >( - yBackprop: Tensor, - _ x: Tensor, - scale: Tensor, - reserveSpace1: Tensor, - reserveSpace2: Tensor, - reserveSpace3: Tensor, - epsilon: Double = 0.0001, - dataFormat: DataFormat = .nhwc, - isTraining: Bool = true - ) -> ( - xBackprop: Tensor, scaleBackprop: Tensor, offsetBackprop: Tensor, - reserveSpace4: Tensor, reserveSpace5: Tensor - ) { - _RawTFEager.fusedBatchNormGradV3( - yBackprop: yBackprop, x, scale: scale, reserveSpace1: reserveSpace1, - reserveSpace2: reserveSpace2, reserveSpace3: reserveSpace3, epsilon: epsilon, - dataFormat: dataFormat, isTraining: isTraining) - } - - /// Batch normalization. - /// - /// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". - /// The size of 1D Tensors matches the dimension C of the 4D Tensors. - /// - /// - Parameters: - /// - x: A 4D Tensor for input data. - /// - scale: A 1D Tensor for scaling factor, to scale the normalized x. - /// - offset: A 1D Tensor for offset, to shift to the normalized x. - /// - mean: A 1D Tensor for population mean. Used for inference only; - /// must be empty for training. - /// - variance: A 1D Tensor for population variance. Used for inference only; - /// must be empty for training. - /// - /// - Attrs: - /// - T: The data type for the elements of input and output Tensors. - /// - U: The data type for the scale, offset, mean, and variance. - /// - epsilon: A small float number added to the variance of x. - /// - data_format: The data format for x and y. Either "NHWC" (default) or "NCHW". - /// - is_training: A bool value to indicate the operation is for training (default) - /// or inference. - /// - /// - Outputs: - /// - y: A 4D Tensor for output data. - /// - batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow - /// to compute the running mean. - /// - batch_variance: A 1D Tensor for the computed batch variance, to be used by - /// TensorFlow to compute the running variance. - /// - reserve_space_1: A 1D Tensor for the computed batch mean, to be reused - /// in the gradient computation. - /// - reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance - /// in the cuDNN case), to be reused in the gradient computation. - @inlinable @inline(__always) - public static func fusedBatchNormV2< - T: FloatingPoint & TensorFlowScalar, - U: FloatingPoint & TensorFlowScalar - >( - _ x: Tensor, - scale: Tensor, - offset: Tensor, - mean: Tensor, - variance: Tensor, - epsilon: Double = 0.0001, - dataFormat: DataFormat = .nhwc, - isTraining: Bool = true - ) -> ( - y: Tensor, batchMean: Tensor, batchVariance: Tensor, reserveSpace1: Tensor, - reserveSpace2: Tensor - ) { - _RawTFEager.fusedBatchNormV2( - x, scale: scale, offset: offset, mean: mean, variance: variance, epsilon: epsilon, - dataFormat: dataFormat, isTraining: isTraining) - } - - /// Batch normalization. - /// - /// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". - /// The size of 1D Tensors matches the dimension C of the 4D Tensors. - /// - /// - Parameters: - /// - x: A 4D Tensor for input data. - /// - scale: A 1D Tensor for scaling factor, to scale the normalized x. - /// - offset: A 1D Tensor for offset, to shift to the normalized x. - /// - mean: A 1D Tensor for population mean. Used for inference only; - /// must be empty for training. - /// - variance: A 1D Tensor for population variance. Used for inference only; - /// must be empty for training. - /// - /// - Attrs: - /// - T: The data type for the elements of input and output Tensors. - /// - U: The data type for the scale, offset, mean, and variance. - /// - epsilon: A small float number added to the variance of x. - /// - data_format: The data format for x and y. Either "NHWC" (default) or "NCHW". - /// - is_training: A bool value to indicate the operation is for training (default) - /// or inference. - /// - /// - Outputs: - /// - y: A 4D Tensor for output data. - /// - batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow - /// to compute the running mean. - /// - batch_variance: A 1D Tensor for the computed batch variance, to be used by - /// TensorFlow to compute the running variance. - /// - reserve_space_1: A 1D Tensor for the computed batch mean, to be reused - /// in the gradient computation. - /// - reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance - /// in the cuDNN case), to be reused in the gradient computation. - /// - reserve_space_3: A 1D Tensor for some intermediate results, to be reused in the gradient - /// computation for better efficiency. - @inlinable @inline(__always) - public static func fusedBatchNormV3< - T: FloatingPoint & TensorFlowScalar, - U: FloatingPoint & TensorFlowScalar - >( - _ x: Tensor, - scale: Tensor, - offset: Tensor, - mean: Tensor, - variance: Tensor, - epsilon: Double = 0.0001, - dataFormat: DataFormat = .nhwc, - isTraining: Bool = true - ) -> ( - y: Tensor, batchMean: Tensor, batchVariance: Tensor, reserveSpace1: Tensor, - reserveSpace2: Tensor, reserveSpace3: Tensor - ) { - _RawTFEager.fusedBatchNormV3( - x, scale: scale, offset: offset, mean: mean, variance: variance, epsilon: epsilon, - dataFormat: dataFormat, isTraining: isTraining) - } - - /// Performs a padding as a preprocess during a convolution. - /// - /// Similar to FusedResizeAndPadConv2d, this op allows for an optimized - /// implementation where the spatial padding transformation stage is fused with the - /// im2col lookup, but in this case without the bilinear filtering required for - /// resizing. Fusing the padding prevents the need to write out the intermediate - /// results as whole tensors, reducing memory pressure, and we can get some latency - /// gains by merging the transformation calculations. - /// The data_format attribute for Conv2D isn't supported by this op, and 'NHWC' - /// order is used instead. - /// Internally this op uses a single per-graph scratch buffer, which means that it - /// will block if multiple versions are being run in parallel. This is because this - /// operator is primarily an optimization to minimize memory usage. - /// - /// - Parameters: - /// - input: 4-D with shape `[batch, in_height, in_width, in_channels]`. - /// - paddings: A two-column matrix specifying the padding sizes. The number of - /// rows must be the same as the rank of `input`. - /// - filter: 4-D with shape - /// `[filter_height, filter_width, in_channels, out_channels]`. - /// - /// - Attrs: - /// - strides: 1-D of length 4. The stride of the sliding window for each dimension - /// of `input`. Must be in the same order as the dimension specified with format. - /// - padding: The type of padding algorithm to use. - @inlinable @inline(__always) - public static func fusedPadConv2D( - _ input: Tensor, - paddings: Tensor, - filter: Tensor, - mode: Mode1, - strides: [Int32], - padding: Padding - ) -> Tensor { - switch commonBackend( - commonBackend(input.handle.backend, paddings.handle.backend), filter.handle.backend) - { - case .XLA: - let output_device = filter.device - let input = Tensor(copying: input, to: .defaultTFEager) - let paddings = Tensor(copying: paddings, to: .defaultTFEager) - let filter = Tensor(copying: filter, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.fusedPadConv2D( - input, paddings: paddings, filter: filter, mode: mode, strides: strides, - padding: padding), to: output_device) - case .TF_EAGER: - return _RawTFEager.fusedPadConv2D( - input, paddings: paddings, filter: filter, mode: mode, strides: strides, padding: padding) - } - - } - - /// Performs a resize and padding as a preprocess during a convolution. - /// - /// It's often possible to do spatial transformations more efficiently as part of - /// the packing stage of a convolution, so this op allows for an optimized - /// implementation where these stages are fused together. This prevents the need to - /// write out the intermediate results as whole tensors, reducing memory pressure, - /// and we can get some latency gains by merging the transformation calculations. - /// The data_format attribute for Conv2D isn't supported by this op, and defaults to - /// 'NHWC' order. - /// Internally this op uses a single per-graph scratch buffer, which means that it - /// will block if multiple versions are being run in parallel. This is because this - /// operator is primarily an optimization to minimize memory usage. - /// - /// - Parameters: - /// - input: 4-D with shape `[batch, in_height, in_width, in_channels]`. - /// - size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The - /// new size for the images. - /// - paddings: A two-column matrix specifying the padding sizes. The number of - /// rows must be the same as the rank of `input`. - /// - filter: 4-D with shape - /// `[filter_height, filter_width, in_channels, out_channels]`. - /// - /// - Attrs: - /// - resize_align_corners: If true, the centers of the 4 corner pixels of the input and output tensors are - /// aligned, preserving the values at the corner pixels. Defaults to false. - /// - strides: 1-D of length 4. The stride of the sliding window for each dimension - /// of `input`. Must be in the same order as the dimension specified with format. - /// - padding: The type of padding algorithm to use. - @inlinable @inline(__always) - public static func fusedResizeAndPadConv2D( - _ input: Tensor, - size: Tensor, - paddings: Tensor, - filter: Tensor, - resizeAlignCorners: Bool = false, - mode: Mode1, - strides: [Int32], - padding: Padding - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(input.handle.backend, size.handle.backend), paddings.handle.backend), - filter.handle.backend) - { - case .XLA: - let output_device = filter.device - let input = Tensor(copying: input, to: .defaultTFEager) - let size = Tensor(copying: size, to: .defaultTFEager) - let paddings = Tensor(copying: paddings, to: .defaultTFEager) - let filter = Tensor(copying: filter, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.fusedResizeAndPadConv2D( - input, size: size, paddings: paddings, filter: filter, - resizeAlignCorners: resizeAlignCorners, mode: mode, strides: strides, padding: padding), - to: output_device) - case .TF_EAGER: - return _RawTFEager.fusedResizeAndPadConv2D( - input, size: size, paddings: paddings, filter: filter, - resizeAlignCorners: resizeAlignCorners, mode: mode, strides: strides, padding: padding) - } - - } - - /// Computes the GRU cell forward propagation for 1 time step. - /// - /// Args - /// x: Input to the GRU cell. - /// h_prev: State input from the previous GRU cell. - /// w_ru: Weight matrix for the reset and update gate. - /// w_c: Weight matrix for the cell connection gate. - /// b_ru: Bias vector for the reset and update gate. - /// b_c: Bias vector for the cell connection gate. - /// - /// Returns - /// r: Output of the reset gate. - /// u: Output of the update gate. - /// c: Output of the cell connection gate. - /// h: Current state of the GRU cell. - /// - /// Note on notation of the variables: - /// - /// Concatenation of a and b is represented by a_b - /// Element-wise dot product of a and b is represented by ab - /// Element-wise dot product is represented by \circ - /// Matrix multiplication is represented by * - /// - /// Biases are initialized with : - /// `b_ru` - constant_initializer(1.0) - /// `b_c` - constant_initializer(0.0) - /// - /// This kernel op implements the following mathematical equations: - /// - /// ``` - /// x_h_prev = [x, h_prev] - /// - /// [r_bar u_bar] = x_h_prev * w_ru + b_ru - /// - /// r = sigmoid(r_bar) - /// u = sigmoid(u_bar) - /// - /// h_prevr = h_prev \circ r - /// - /// x_h_prevr = [x h_prevr] - /// - /// c_bar = x_h_prevr * w_c + b_c - /// c = tanh(c_bar) - /// - /// h = (1-u) \circ c + u \circ h_prev - /// ``` - @inlinable @inline(__always) - public static func gRUBlockCell( - _ x: Tensor, - hPrev: Tensor, - wRu: Tensor, - wC: Tensor, - bRu: Tensor, - bC: Tensor - ) -> (r: Tensor, u: Tensor, c: Tensor, h: Tensor) { - _RawTFEager.gRUBlockCell(x, hPrev: hPrev, wRu: wRu, wC: wC, bRu: bRu, bC: bC) - } - - /// Computes the GRU cell back-propagation for 1 time step. - /// - /// Args - /// x: Input to the GRU cell. - /// h_prev: State input from the previous GRU cell. - /// w_ru: Weight matrix for the reset and update gate. - /// w_c: Weight matrix for the cell connection gate. - /// b_ru: Bias vector for the reset and update gate. - /// b_c: Bias vector for the cell connection gate. - /// r: Output of the reset gate. - /// u: Output of the update gate. - /// c: Output of the cell connection gate. - /// d_h: Gradients of the h_new wrt to objective function. - /// - /// Returns - /// d_x: Gradients of the x wrt to objective function. - /// d_h_prev: Gradients of the h wrt to objective function. - /// d_c_bar Gradients of the c_bar wrt to objective function. - /// d_r_bar_u_bar Gradients of the r_bar & u_bar wrt to objective function. - /// - /// This kernel op implements the following mathematical equations: - /// - /// Note on notation of the variables: - /// - /// Concatenation of a and b is represented by a_b - /// Element-wise dot product of a and b is represented by ab - /// Element-wise dot product is represented by \circ - /// Matrix multiplication is represented by * - /// - /// Additional notes for clarity: - /// - /// `w_ru` can be segmented into 4 different matrices. - /// ``` - /// w_ru = [w_r_x w_u_x - /// w_r_h_prev w_u_h_prev] - /// ``` - /// Similarly, `w_c` can be segmented into 2 different matrices. - /// ``` - /// w_c = [w_c_x w_c_h_prevr] - /// ``` - /// Same goes for biases. - /// ``` - /// b_ru = [b_ru_x b_ru_h] - /// b_c = [b_c_x b_c_h] - /// ``` - /// Another note on notation: - /// ``` - /// d_x = d_x_component_1 + d_x_component_2 - /// - /// where d_x_component_1 = d_r_bar * w_r_x^T + d_u_bar * w_r_x^T - /// and d_x_component_2 = d_c_bar * w_c_x^T - /// - /// d_h_prev = d_h_prev_component_1 + d_h_prevr \circ r + d_h \circ u - /// where d_h_prev_componenet_1 = d_r_bar * w_r_h_prev^T + d_u_bar * w_r_h_prev^T - /// ``` - /// - /// Mathematics behind the Gradients below: - /// ``` - /// d_c_bar = d_h \circ (1-u) \circ (1-c \circ c) - /// d_u_bar = d_h \circ (h-c) \circ u \circ (1-u) - /// - /// d_r_bar_u_bar = [d_r_bar d_u_bar] - /// - /// [d_x_component_1 d_h_prev_component_1] = d_r_bar_u_bar * w_ru^T - /// - /// [d_x_component_2 d_h_prevr] = d_c_bar * w_c^T - /// - /// d_x = d_x_component_1 + d_x_component_2 - /// - /// d_h_prev = d_h_prev_component_1 + d_h_prevr \circ r + u - /// ``` - /// Below calculation is performed in the python wrapper for the Gradients - /// (not in the gradient kernel.) - /// ``` - /// d_w_ru = x_h_prevr^T * d_c_bar - /// - /// d_w_c = x_h_prev^T * d_r_bar_u_bar - /// - /// d_b_ru = sum of d_r_bar_u_bar along axis = 0 - /// - /// d_b_c = sum of d_c_bar along axis = 0 - /// ``` - @inlinable @inline(__always) - public static func gRUBlockCellGrad( - _ x: Tensor, - hPrev: Tensor, - wRu: Tensor, - wC: Tensor, - bRu: Tensor, - bC: Tensor, - r: Tensor, - u: Tensor, - c: Tensor, - dH: Tensor - ) -> (dX: Tensor, dHPrev: Tensor, dCBar: Tensor, dRBarUBar: Tensor) { - _RawTFEager.gRUBlockCellGrad( - x, hPrev: hPrev, wRu: wRu, wC: wC, bRu: bRu, bC: bC, r: r, u: u, c: c, dH: dH) - } - - /// Gather slices from `params` according to `indices`. - /// - /// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). - /// Produces an output tensor with shape `indices.shape + params.shape[1:]` where: - /// - /// ```python - /// # Scalar indices - /// output[:, ..., :] = params[indices, :, ... :] - /// - /// # Vector indices - /// output[i, :, ..., :] = params[indices[i], :, ... :] - /// - /// # Higher rank indices - /// output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] - /// ``` - /// - /// If `indices` is a permutation and `len(indices) == params.shape[0]` then - /// this operation will permute `params` accordingly. - /// - /// `validate_indices`: DEPRECATED. If this operation is assigned to CPU, values in - /// `indices` are always validated to be within range. If assigned to GPU, - /// out-of-bound indices result in safe but unspecified behavior, which may include - /// raising an error. - /// - ///
- /// - ///
- @inlinable @inline(__always) - public static func gather< - Tparams: TensorFlowScalar, - Tindices: TensorFlowIndex - >( - params: Tensor, - indices: Tensor, - validateIndices: Bool = true - ) -> Tensor { - switch commonBackend(params.handle.backend, indices.handle.backend) { - case .XLA: - return _RawXLA.gather(params: params, indices: indices, validateIndices: validateIndices) - case .TF_EAGER: - return _RawTFEager.gather( - params: params, indices: indices, validateIndices: validateIndices) - } - - } - - /// Gather slices from `params` into a Tensor with shape specified by `indices`. - /// - /// `indices` is a K-dimensional integer tensor, best thought of as a - /// (K-1)-dimensional tensor of indices into `params`, where each element defines a - /// slice of `params`: - /// - /// output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]] - /// - /// Whereas in `tf.gather` `indices` defines slices into the `axis` - /// dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the - /// first `N` dimensions of `params`, where `N = indices.shape[-1]`. - /// - /// The last dimension of `indices` can be at most the rank of - /// `params`: - /// - /// indices.shape[-1] <= params.rank - /// - /// The last dimension of `indices` corresponds to elements - /// (if `indices.shape[-1] == params.rank`) or slices - /// (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]` - /// of `params`. The output tensor has shape - /// - /// indices.shape[:-1] + params.shape[indices.shape[-1]:] - /// - /// Note that on CPU, if an out of bound index is found, an error is returned. - /// On GPU, if an out of bound index is found, a 0 is stored in the - /// corresponding output value. - /// - /// Some examples below. - /// - /// Simple indexing into a matrix: - /// - /// ```python - /// indices = [[0, 0], [1, 1]] - /// params = [['a', 'b'], ['c', 'd']] - /// output = ['a', 'd'] - /// ``` - /// - /// Slice indexing into a matrix: - /// - /// ```python - /// indices = [[1], [0]] - /// params = [['a', 'b'], ['c', 'd']] - /// output = [['c', 'd'], ['a', 'b']] - /// ``` - /// - /// Indexing into a 3-tensor: - /// - /// ```python - /// indices = [[1]] - /// params = [[['a0', 'b0'], ['c0', 'd0']], - /// [['a1', 'b1'], ['c1', 'd1']]] - /// output = [[['a1', 'b1'], ['c1', 'd1']]] - /// - /// - /// indices = [[0, 1], [1, 0]] - /// params = [[['a0', 'b0'], ['c0', 'd0']], - /// [['a1', 'b1'], ['c1', 'd1']]] - /// output = [['c0', 'd0'], ['a1', 'b1']] - /// - /// - /// indices = [[0, 0, 1], [1, 0, 1]] - /// params = [[['a0', 'b0'], ['c0', 'd0']], - /// [['a1', 'b1'], ['c1', 'd1']]] - /// output = ['b0', 'b1'] - /// ``` - /// - /// Batched indexing into a matrix: - /// - /// ```python - /// indices = [[[0, 0]], [[0, 1]]] - /// params = [['a', 'b'], ['c', 'd']] - /// output = [['a'], ['b']] - /// ``` - /// - /// Batched slice indexing into a matrix: - /// - /// ```python - /// indices = [[[1]], [[0]]] - /// params = [['a', 'b'], ['c', 'd']] - /// output = [[['c', 'd']], [['a', 'b']]] - /// ``` - /// - /// Batched indexing into a 3-tensor: - /// - /// ```python - /// indices = [[[1]], [[0]]] - /// params = [[['a0', 'b0'], ['c0', 'd0']], - /// [['a1', 'b1'], ['c1', 'd1']]] - /// output = [[[['a1', 'b1'], ['c1', 'd1']]], - /// [[['a0', 'b0'], ['c0', 'd0']]]] - /// - /// indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]] - /// params = [[['a0', 'b0'], ['c0', 'd0']], - /// [['a1', 'b1'], ['c1', 'd1']]] - /// output = [[['c0', 'd0'], ['a1', 'b1']], - /// [['a0', 'b0'], ['c1', 'd1']]] - /// - /// - /// indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]] - /// params = [[['a0', 'b0'], ['c0', 'd0']], - /// [['a1', 'b1'], ['c1', 'd1']]] - /// output = [['b0', 'b1'], ['d0', 'c1']] - /// ``` - /// - /// See also `tf.gather` and `tf.batch_gather`. - /// - /// - Parameters: - /// - params: The tensor from which to gather values. - /// - indices: Index tensor. - /// - /// - Output output: Values from `params` gathered from indices given by `indices`, with - /// shape `indices.shape[:-1] + params.shape[indices.shape[-1]:]`. - @inlinable @inline(__always) - public static func gatherNd< - Tparams: TensorFlowScalar, - Tindices: TensorFlowIndex - >( - params: Tensor, - indices: Tensor - ) -> Tensor { - switch commonBackend(params.handle.backend, indices.handle.backend) { - case .XLA: - let output_device = indices.device - let params = Tensor(copying: params, to: .defaultTFEager) - let indices = Tensor(copying: indices, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.gatherNd(params: params, indices: indices), to: output_device) - case .TF_EAGER: - return _RawTFEager.gatherNd(params: params, indices: indices) - } - - } - - /// Gather slices from `params` axis `axis` according to `indices`. - /// - /// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). - /// Produces an output tensor with shape `params.shape[:axis] + indices.shape + - /// params.shape[axis + 1:]` where: - /// - /// ```python - /// # Scalar indices (output is rank(params) - 1). - /// output[a_0, ..., a_n, b_0, ..., b_n] = - /// params[a_0, ..., a_n, indices, b_0, ..., b_n] - /// - /// # Vector indices (output is rank(params)). - /// output[a_0, ..., a_n, i, b_0, ..., b_n] = - /// params[a_0, ..., a_n, indices[i], b_0, ..., b_n] - /// - /// # Higher rank indices (output is rank(params) + rank(indices) - 1). - /// output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] = - /// params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n] - /// ``` - /// - ///
- /// - ///
- /// - /// Note that on CPU, if an out of bound index is found, an error is returned. - /// On GPU, if an out of bound index is found, a 0 is stored in the - /// corresponding output value. - /// - /// See also `tf.batch_gather` and `tf.gather_nd`. - /// - /// - Parameters: - /// - params: The tensor from which to gather values. Must be at least rank - /// `axis + 1`. - /// - indices: Index tensor. Must be in range `[0, params.shape[axis])`. - /// - axis: The axis in `params` to gather `indices` from. Defaults to the first - /// dimension. Supports negative indexes. - /// - /// - Output output: Values from `params` gathered from indices given by `indices`, with - /// shape `params.shape[:axis] + indices.shape + params.shape[axis + 1:]`. - @inlinable @inline(__always) - public static func gatherV2< - Tparams: TensorFlowScalar, - Tindices: TensorFlowIndex, - Taxis: TensorFlowIndex - >( - params: Tensor, - indices: Tensor, - axis: Tensor, - batchDims: Int64 = 0 - ) -> Tensor { - switch commonBackend( - commonBackend(params.handle.backend, indices.handle.backend), axis.handle.backend) - { - case .XLA: - return _RawXLA.gatherV2(params: params, indices: indices, axis: axis, batchDims: batchDims) - case .TF_EAGER: - return _RawTFEager.gatherV2( - params: params, indices: indices, axis: axis, batchDims: batchDims) - } - - } - - /// This op produces Region of Interests from given bounding boxes(bbox_deltas) encoded wrt anchors according to eq.2 in arXiv:1506.01497 - /// - /// The op selects top `pre_nms_topn` scoring boxes, decodes them with respect to anchors, - /// applies non-maximal suppression on overlapping boxes with higher than - /// `nms_threshold` intersection-over-union (iou) value, discarding boxes where shorter - /// side is less than `min_size`. - /// Inputs: - /// `scores`: A 4D tensor of shape [Batch, Height, Width, Num Anchors] containing the scores per anchor at given postion - /// `bbox_deltas`: is a tensor of shape [Batch, Height, Width, 4 x Num Anchors] boxes encoded to each anchor - /// `anchors`: A 1D tensor of shape [4 x Num Anchors], representing the anchors. - /// Outputs: - /// `rois`: output RoIs, a 3D tensor of shape [Batch, post_nms_topn, 4], padded by 0 if less than post_nms_topn candidates found. - /// `roi_probabilities`: probability scores of each roi in 'rois', a 2D tensor of shape [Batch,post_nms_topn], padded with 0 if needed, sorted by scores. - /// - /// - Parameters: - /// - scores: A 4-D float tensor of shape `[num_images, height, width, num_achors]` containing scores of the boxes for given anchors, can be unsorted. - /// - bbox_deltas: A 4-D float tensor of shape `[num_images, height, width, 4 x num_anchors]`. encoding boxes with respec to each anchor. - /// Coordinates are given in the form [dy, dx, dh, dw]. - /// - image_info: A 2-D float tensor of shape `[num_images, 5]` containing image information Height, Width, Scale. - /// - anchors: A 2-D float tensor of shape `[num_anchors, 4]` describing the anchor boxes. Boxes are formatted in the form [y1, x1, y2, x2]. - /// - nms_threshold: A scalar float tensor for non-maximal-suppression threshold. - /// - pre_nms_topn: A scalar int tensor for the number of top scoring boxes to be used as input. - /// - min_size: A scalar float tensor. Any box that has a smaller size than min_size will be discarded. - /// - /// - Attr post_nms_topn: An integer. Maximum number of rois in the output. - /// - /// - Outputs: - /// - rois: A 3-D float tensor of shape `[num_images,post_nms_topn,4]` representing the selected - /// region of interest boxes. Sorted in descending order in scores. - /// - roi_probabilities: A 2-D float tensor of shape `[num_images, post_nms_topn]` representing the score of the - /// region of interest box in `rois` tensor at the same index. - @inlinable @inline(__always) - public static func generateBoundingBoxProposals( - scores: Tensor, - bboxDeltas: Tensor, - imageInfo: Tensor, - anchors: Tensor, - nmsThreshold: Tensor, - preNmsTopn: Tensor, - minSize: Tensor, - postNmsTopn: Int64 = 300 - ) -> (rois: Tensor, roiProbabilities: Tensor) { - _RawTFEager.generateBoundingBoxProposals( - scores: scores, bboxDeltas: bboxDeltas, imageInfo: imageInfo, anchors: anchors, - nmsThreshold: nmsThreshold, preNmsTopn: preNmsTopn, minSize: minSize, - postNmsTopn: postNmsTopn) - } - - /// Given a path to new and old vocabulary files, returns a remapping Tensor of - /// - /// length `num_new_vocab`, where `remapping[i]` contains the row number in the old - /// vocabulary that corresponds to row `i` in the new vocabulary (starting at line - /// `new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i` - /// in the new vocabulary is not in the old vocabulary. The old vocabulary is - /// constrained to the first `old_vocab_size` entries if `old_vocab_size` is not the - /// default value of -1. - /// - /// `num_vocab_offset` enables - /// use in the partitioned variable case, and should generally be set through - /// examining partitioning info. The format of the files should be a text file, - /// with each line containing a single entity within the vocabulary. - /// - /// For example, with `new_vocab_file` a text file containing each of the following - /// elements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, f3], - /// `num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be - /// `[0, -1, 2]`. - /// - /// The op also returns a count of how many entries in the new vocabulary - /// were present in the old vocabulary, which is used to calculate the number of - /// values to initialize in a weight matrix remapping - /// - /// This functionality can be used to remap both row vocabularies (typically, - /// features) and column vocabularies (typically, classes) from TensorFlow - /// checkpoints. Note that the partitioning logic relies on contiguous vocabularies - /// corresponding to div-partitioned variables. Moreover, the underlying remapping - /// uses an IndexTable (as opposed to an inexact CuckooTable), so client code should - /// use the corresponding index_table_from_file() as the FeatureColumn framework - /// does (as opposed to tf.feature_to_id(), which uses a CuckooTable). - /// - /// - Parameters: - /// - new_vocab_file: Path to the new vocab file. - /// - old_vocab_file: Path to the old vocab file. - /// - /// - Attrs: - /// - new_vocab_offset: How many entries into the new vocab file to start reading. - /// - num_new_vocab: Number of entries in the new vocab file to remap. - /// - old_vocab_size: Number of entries in the old vocab file to consider. If -1, - /// use the entire old vocabulary. - /// - /// - Outputs: - /// - remapping: A Tensor of length num_new_vocab where the element at index i - /// is equal to the old ID that maps to the new ID i. This element is -1 for any - /// new ID that is not found in the old vocabulary. - /// - num_present: Number of new vocab entries found in old vocab. - @inlinable @inline(__always) - public static func generateVocabRemapping( - newVocabFile: StringTensor, - oldVocabFile: StringTensor, - newVocabOffset: Int64, - numNewVocab: Int64, - oldVocabSize: Int64 = -1 - ) -> (remapping: Tensor, numPresent: Tensor) { - _RawTFEager.generateVocabRemapping( - newVocabFile: newVocabFile, oldVocabFile: oldVocabFile, newVocabOffset: newVocabOffset, - numNewVocab: numNewVocab, oldVocabSize: oldVocabSize) - } - - /// Creates a dataset that invokes a function to generate elements. - @inlinable @inline(__always) - public static func generatorDataset< - InitfuncIn: TensorGroup, - InitfuncOut: TensorGroup, - NextfuncIn: TensorGroup, - NextfuncOut: TensorGroup, - FinalizefuncIn: TensorGroup, - FinalizefuncOut: TensorGroup, - TinitFuncArgs: TensorArrayProtocol, - TnextFuncArgs: TensorArrayProtocol, - TfinalizeFuncArgs: TensorArrayProtocol - >( - initFuncOtherArgs: TinitFuncArgs, - nextFuncOtherArgs: TnextFuncArgs, - finalizeFuncOtherArgs: TfinalizeFuncArgs, - initFunc: (InitfuncIn) -> InitfuncOut, - nextFunc: (NextfuncIn) -> NextfuncOut, - finalizeFunc: (FinalizefuncIn) -> FinalizefuncOut, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.generatorDataset( - initFuncOtherArgs: initFuncOtherArgs, nextFuncOtherArgs: nextFuncOtherArgs, - finalizeFuncOtherArgs: finalizeFuncOtherArgs, initFunc: initFunc, nextFunc: nextFunc, - finalizeFunc: finalizeFunc, outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Returns calibration data for the given resource name - @inlinable @inline(__always) - public static func getCalibrationDataOp( - resourceName: StringTensor - ) -> StringTensor { - _RawTFEager.getCalibrationDataOp(resourceName: resourceName) - } - - /// Store the input tensor in the state of the current session. - /// - /// - Parameter value: The tensor to be stored. - /// - /// - Output handle: The handle for the tensor stored in the session state, represented - /// as a string. - @inlinable @inline(__always) - public static func getSessionHandle( - value: Tensor - ) -> StringTensor { - _RawTFEager.getSessionHandle(value: value) - } - - /// Store the input tensor in the state of the current session. - /// - /// - Parameter value: The tensor to be stored. - /// - /// - Output handle: The handle for the tensor stored in the session state, represented - /// as a ResourceHandle object. - @inlinable @inline(__always) - public static func getSessionHandleV2( - value: Tensor - ) -> ResourceHandle { - _RawTFEager.getSessionHandleV2(value: value) - } - - /// Get the value of the tensor specified by its handle. - /// - /// - Parameter handle: The handle for a tensor stored in the session state. - /// - /// - Attr dtype: The type of the output value. - /// - /// - Output value: The tensor for the given handle. - @inlinable @inline(__always) - public static func getSessionTensor( - handle: StringTensor - ) -> Tensor { - _RawTFEager.getSessionTensor(handle: handle) - } - - @inlinable @inline(__always) - public static func graphDefVersion() -> Tensor { - _RawTFEager.graphDefVersion() - } - - /// Returns the truth value of (x > y) element-wise. - /// - /// *NOTE*: `Greater` supports broadcasting. More about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - /// - /// Example: - /// - /// ```python - /// x = tf.constant([5, 4, 6]) - /// y = tf.constant([5, 2, 5]) - /// tf.math.greater(x, y) ==> [False, True, True] - /// - /// x = tf.constant([5, 4, 6]) - /// y = tf.constant([5]) - /// tf.math.greater(x, y) ==> [False, False, True] - /// ``` - @inlinable @inline(__always) - public static func greater( - _ x: Tensor, - _ y: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - return _RawXLA.greater(x, y) - case .TF_EAGER: - return _RawTFEager.greater(x, y) - } - - } - - /// Returns the truth value of (x >= y) element-wise. - /// - /// *NOTE*: `GreaterEqual` supports broadcasting. More about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - /// - /// Example: - /// - /// ```python - /// x = tf.constant([5, 4, 6, 7]) - /// y = tf.constant([5, 2, 5, 10]) - /// tf.math.greater_equal(x, y) ==> [True, True, True, False] - /// - /// x = tf.constant([5, 4, 6, 7]) - /// y = tf.constant([5]) - /// tf.math.greater_equal(x, y) ==> [True, False, True, True] - /// ``` - @inlinable @inline(__always) - public static func greaterEqual( - _ x: Tensor, - _ y: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - return _RawXLA.greaterEqual(x, y) - case .TF_EAGER: - return _RawTFEager.greaterEqual(x, y) - } - - } - - /// Creates a dataset that computes a group-by on `input_dataset`. - /// - /// Creates a dataset that computes a group-by on `input_dataset`. - /// - /// - Parameters: - /// - input_dataset: A variant tensor representing the input dataset. - /// - key_func_other_arguments: A list of tensors, typically values that were captured when - /// building a closure for `key_func`. - /// - init_func_other_arguments: A list of tensors, typically values that were captured when - /// building a closure for `init_func`. - /// - reduce_func_other_arguments: A list of tensors, typically values that were captured when - /// building a closure for `reduce_func`. - /// - finalize_func_other_arguments: A list of tensors, typically values that were captured when - /// building a closure for `finalize_func`. - /// - /// - Attrs: - /// - key_func: A function mapping an element of `input_dataset`, concatenated - /// with `key_func_other_arguments` to a scalar value of type DT_INT64. - /// - init_func: A function mapping a key of type DT_INT64, concatenated with - /// `init_func_other_arguments` to the initial reducer state. - /// - reduce_func: A function mapping the current reducer state and an element of `input_dataset`, - /// concatenated with `reduce_func_other_arguments` to a new reducer state. - /// - finalize_func: A function mapping the final reducer state to an output element. - @inlinable @inline(__always) - public static func groupByReducerDataset< - KeyfuncIn: TensorGroup, - KeyfuncOut: TensorGroup, - InitfuncIn: TensorGroup, - InitfuncOut: TensorGroup, - ReducefuncIn: TensorGroup, - ReducefuncOut: TensorGroup, - FinalizefuncIn: TensorGroup, - FinalizefuncOut: TensorGroup, - TkeyFuncOtherArguments: TensorArrayProtocol, - TinitFuncOtherArguments: TensorArrayProtocol, - TreduceFuncOtherArguments: TensorArrayProtocol, - TfinalizeFuncOtherArguments: TensorArrayProtocol - >( - inputDataset: VariantHandle, - keyFuncOtherArguments: TkeyFuncOtherArguments, - initFuncOtherArguments: TinitFuncOtherArguments, - reduceFuncOtherArguments: TreduceFuncOtherArguments, - finalizeFuncOtherArguments: TfinalizeFuncOtherArguments, - keyFunc: (KeyfuncIn) -> KeyfuncOut, - initFunc: (InitfuncIn) -> InitfuncOut, - reduceFunc: (ReducefuncIn) -> ReducefuncOut, - finalizeFunc: (FinalizefuncIn) -> FinalizefuncOut, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.groupByReducerDataset( - inputDataset: inputDataset, keyFuncOtherArguments: keyFuncOtherArguments, - initFuncOtherArguments: initFuncOtherArguments, - reduceFuncOtherArguments: reduceFuncOtherArguments, - finalizeFuncOtherArguments: finalizeFuncOtherArguments, keyFunc: keyFunc, - initFunc: initFunc, reduceFunc: reduceFunc, finalizeFunc: finalizeFunc, - outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Creates a dataset that computes a windowed group-by on `input_dataset`. - /// - /// // TODO(mrry): Support non-int64 keys. - /// - /// - Attr key_func: A function mapping an element of `input_dataset`, concatenated - /// with `key_func_other_arguments` to a scalar value of type DT_INT64. - @inlinable @inline(__always) - public static func groupByWindowDataset< - KeyfuncIn: TensorGroup, - KeyfuncOut: TensorGroup, - ReducefuncIn: TensorGroup, - ReducefuncOut: TensorGroup, - WindowsizefuncIn: TensorGroup, - WindowsizefuncOut: TensorGroup, - TkeyFuncOtherArguments: TensorArrayProtocol, - TreduceFuncOtherArguments: TensorArrayProtocol, - TwindowSizeFuncOtherArguments: TensorArrayProtocol - >( - inputDataset: VariantHandle, - keyFuncOtherArguments: TkeyFuncOtherArguments, - reduceFuncOtherArguments: TreduceFuncOtherArguments, - windowSizeFuncOtherArguments: TwindowSizeFuncOtherArguments, - keyFunc: (KeyfuncIn) -> KeyfuncOut, - reduceFunc: (ReducefuncIn) -> ReducefuncOut, - windowSizeFunc: (WindowsizefuncIn) -> WindowsizefuncOut, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.groupByWindowDataset( - inputDataset: inputDataset, keyFuncOtherArguments: keyFuncOtherArguments, - reduceFuncOtherArguments: reduceFuncOtherArguments, - windowSizeFuncOtherArguments: windowSizeFuncOtherArguments, keyFunc: keyFunc, - reduceFunc: reduceFunc, windowSizeFunc: windowSizeFunc, outputTypes: outputTypes, - outputShapes: outputShapes) - } - - /// Gives a guarantee to the TF runtime that the input tensor is a constant. - /// - /// The runtime is then free to make optimizations based on this. - /// - /// Only accepts value typed tensors as inputs and rejects resource variable handles - /// as input. - /// - /// Returns the input tensor without modification. - @inlinable @inline(__always) - public static func guaranteeConst( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.guaranteeConst(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.guaranteeConst(input) - } - - } - - /// Convert one or more images from HSV to RGB. - /// - /// Outputs a tensor of the same shape as the `images` tensor, containing the RGB - /// value of the pixels. The output is only well defined if the value in `images` - /// are in `[0,1]`. - /// - /// See `rgb_to_hsv` for a description of the HSV encoding. - /// - /// - Parameter images: 1-D or higher rank. HSV data to convert. Last dimension must be size 3. - /// - /// - Output output: `images` converted to RGB. - @inlinable @inline(__always) - public static func hSVToRGB( - images: Tensor - ) -> Tensor { - switch images.handle.backend { - case .XLA: - let output_device = images.device - let images = Tensor(copying: images, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.hSVToRGB(images: images), to: output_device) - case .TF_EAGER: - return _RawTFEager.hSVToRGB(images: images) - } - - } - - /// Creates a non-initialized hash table. - /// - /// This op creates a hash table, specifying the type of its keys and values. - /// Before using the table you will have to initialize it. After initialization the - /// table will be immutable. - /// - /// - Attrs: - /// - container: If non-empty, this table is placed in the given container. - /// Otherwise, a default container is used. - /// - shared_name: If non-empty, this table is shared under the given name across - /// multiple sessions. - /// - use_node_name_sharing: If true and shared_name is empty, the table is shared - /// using the node name. - /// - key_dtype: Type of the table keys. - /// - value_dtype: Type of the table values. - /// - /// - Output table_handle: Handle to a table. - @inlinable @inline(__always) - public static func hashTableV2( - container: String, - sharedName: String, - useNodeNameSharing: Bool = false, - keyDtype: TensorDataType, - valueDtype: TensorDataType - ) -> ResourceHandle { - _RawTFEager.hashTableV2( - container: container, sharedName: sharedName, useNodeNameSharing: useNodeNameSharing, - keyDtype: keyDtype, valueDtype: valueDtype) - } - - /// Return histogram of values. - /// - /// Given the tensor `values`, this operation returns a rank 1 histogram counting - /// the number of entries in `values` that fall into every bin. The bins are - /// equal width and determined by the arguments `value_range` and `nbins`. - /// - /// ```python - /// # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) - /// nbins = 5 - /// value_range = [0.0, 5.0] - /// new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] - /// - /// with tf.get_default_session() as sess: - /// hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) - /// variables.global_variables_initializer().run() - /// sess.run(hist) => [2, 1, 1, 0, 2] - /// ``` - /// - /// - Parameters: - /// - values: Numeric `Tensor`. - /// - value_range: Shape [2] `Tensor` of same `dtype` as `values`. - /// values <= value_range[0] will be mapped to hist[0], - /// values >= value_range[1] will be mapped to hist[-1]. - /// - nbins: Scalar `int32 Tensor`. Number of histogram bins. - /// - /// - Output out: A 1-D `Tensor` holding histogram of values. - @inlinable @inline(__always) - public static func histogramFixedWidth< - T: TensorFlowNumeric, - Dtype: TensorFlowIndex - >( - _ values: Tensor, - valueRange: Tensor, - nbins: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(values.handle.backend, valueRange.handle.backend), nbins.handle.backend) - { - case .XLA: - let output_device = nbins.device - let values = Tensor(copying: values, to: .defaultTFEager) - let valueRange = Tensor(copying: valueRange, to: .defaultTFEager) - let nbins = Tensor(copying: nbins, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.histogramFixedWidth(values, valueRange: valueRange, nbins: nbins), - to: output_device) - case .TF_EAGER: - return _RawTFEager.histogramFixedWidth(values, valueRange: valueRange, nbins: nbins) - } - - } - - /// Outputs a `Summary` protocol buffer with a histogram. - /// - /// The generated - /// [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) - /// has one summary value containing a histogram for `values`. - /// - /// This op reports an `InvalidArgument` error if any value is not finite. - /// - /// - Parameters: - /// - tag: Scalar. Tag to use for the `Summary.Value`. - /// - values: Any shape. Values to use to build the histogram. - /// - /// - Output summary: Scalar. Serialized `Summary` protocol buffer. - @inlinable @inline(__always) - public static func histogramSummary( - tag: StringTensor, - _ values: Tensor - ) -> StringTensor { - _RawTFEager.histogramSummary(tag: tag, values) - } - - /// Inverse fast Fourier transform. - /// - /// Computes the inverse 1-dimensional discrete Fourier transform over the - /// inner-most dimension of `input`. - /// - /// - Parameter input: A complex tensor. - /// - /// - Output output: A complex tensor of the same shape as `input`. The inner-most - /// dimension of `input` is replaced with its inverse 1D Fourier transform. - /// - /// @compatibility(numpy) - /// Equivalent to np.fft.ifft - /// @end_compatibility - @inlinable @inline(__always) - public static func iFFT( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.iFFT(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.iFFT(input) - } - - } - - /// Inverse 2D fast Fourier transform. - /// - /// Computes the inverse 2-dimensional discrete Fourier transform over the - /// inner-most 2 dimensions of `input`. - /// - /// - Parameter input: A complex tensor. - /// - /// - Output output: A complex tensor of the same shape as `input`. The inner-most 2 - /// dimensions of `input` are replaced with their inverse 2D Fourier transform. - /// - /// @compatibility(numpy) - /// Equivalent to np.fft.ifft2 - /// @end_compatibility - @inlinable @inline(__always) - public static func iFFT2D( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.iFFT2D(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.iFFT2D(input) - } - - } - - /// Inverse 3D fast Fourier transform. - /// - /// Computes the inverse 3-dimensional discrete Fourier transform over the - /// inner-most 3 dimensions of `input`. - /// - /// - Parameter input: A complex tensor. - /// - /// - Output output: A complex tensor of the same shape as `input`. The inner-most 3 - /// dimensions of `input` are replaced with their inverse 3D Fourier transform. - /// - /// @compatibility(numpy) - /// Equivalent to np.fft.ifftn with 3 dimensions. - /// @end_compatibility - @inlinable @inline(__always) - public static func iFFT3D( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.iFFT3D(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.iFFT3D(input) - } - - } - - /// Inverse real-valued fast Fourier transform. - /// - /// Computes the inverse 1-dimensional discrete Fourier transform of a real-valued - /// signal over the inner-most dimension of `input`. - /// - /// The inner-most dimension of `input` is assumed to be the result of `RFFT`: the - /// `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If - /// `fft_length` is not provided, it is computed from the size of the inner-most - /// dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to - /// compute `input` is odd, it should be provided since it cannot be inferred - /// properly. - /// - /// Along the axis `IRFFT` is computed on, if `fft_length / 2 + 1` is smaller - /// than the corresponding dimension of `input`, the dimension is cropped. If it is - /// larger, the dimension is padded with zeros. - /// - /// - Parameters: - /// - input: A complex tensor. - /// - fft_length: An int32 tensor of shape [1]. The FFT length. - /// - /// - Output output: A float32 tensor of the same rank as `input`. The inner-most - /// dimension of `input` is replaced with the `fft_length` samples of its inverse - /// 1D Fourier transform. - /// - /// @compatibility(numpy) - /// Equivalent to np.fft.irfft - /// @end_compatibility - @inlinable @inline(__always) - public static func iRFFT< - Treal: FloatingPoint & TensorFlowScalar, - Tcomplex: TensorFlowScalar - >( - _ input: Tensor, - fftLength: Tensor - ) -> Tensor { - switch commonBackend(input.handle.backend, fftLength.handle.backend) { - case .XLA: - let output_device = fftLength.device - let input = Tensor(copying: input, to: .defaultTFEager) - let fftLength = Tensor(copying: fftLength, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.iRFFT(input, fftLength: fftLength), to: output_device) - case .TF_EAGER: - return _RawTFEager.iRFFT(input, fftLength: fftLength) - } - - } - - /// Inverse 2D real-valued fast Fourier transform. - /// - /// Computes the inverse 2-dimensional discrete Fourier transform of a real-valued - /// signal over the inner-most 2 dimensions of `input`. - /// - /// The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`: - /// The inner-most dimension contains the `fft_length / 2 + 1` unique components of - /// the DFT of a real-valued signal. If `fft_length` is not provided, it is computed - /// from the size of the inner-most 2 dimensions of `input`. If the FFT length used - /// to compute `input` is odd, it should be provided since it cannot be inferred - /// properly. - /// - /// Along each axis `IRFFT2D` is computed on, if `fft_length` (or - /// `fft_length / 2 + 1` for the inner-most dimension) is smaller than the - /// corresponding dimension of `input`, the dimension is cropped. If it is larger, - /// the dimension is padded with zeros. - /// - /// - Parameters: - /// - input: A complex tensor. - /// - fft_length: An int32 tensor of shape [2]. The FFT length for each dimension. - /// - /// - Output output: A float32 tensor of the same rank as `input`. The inner-most 2 - /// dimensions of `input` are replaced with the `fft_length` samples of their - /// inverse 2D Fourier transform. - /// - /// @compatibility(numpy) - /// Equivalent to np.fft.irfft2 - /// @end_compatibility - @inlinable @inline(__always) - public static func iRFFT2D< - Treal: FloatingPoint & TensorFlowScalar, - Tcomplex: TensorFlowScalar - >( - _ input: Tensor, - fftLength: Tensor - ) -> Tensor { - switch commonBackend(input.handle.backend, fftLength.handle.backend) { - case .XLA: - let output_device = fftLength.device - let input = Tensor(copying: input, to: .defaultTFEager) - let fftLength = Tensor(copying: fftLength, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.iRFFT2D(input, fftLength: fftLength), to: output_device) - case .TF_EAGER: - return _RawTFEager.iRFFT2D(input, fftLength: fftLength) - } - - } - - /// Inverse 3D real-valued fast Fourier transform. - /// - /// Computes the inverse 3-dimensional discrete Fourier transform of a real-valued - /// signal over the inner-most 3 dimensions of `input`. - /// - /// The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`: - /// The inner-most dimension contains the `fft_length / 2 + 1` unique components of - /// the DFT of a real-valued signal. If `fft_length` is not provided, it is computed - /// from the size of the inner-most 3 dimensions of `input`. If the FFT length used - /// to compute `input` is odd, it should be provided since it cannot be inferred - /// properly. - /// - /// Along each axis `IRFFT3D` is computed on, if `fft_length` (or - /// `fft_length / 2 + 1` for the inner-most dimension) is smaller than the - /// corresponding dimension of `input`, the dimension is cropped. If it is larger, - /// the dimension is padded with zeros. - /// - /// - Parameters: - /// - input: A complex tensor. - /// - fft_length: An int32 tensor of shape [3]. The FFT length for each dimension. - /// - /// - Output output: A float32 tensor of the same rank as `input`. The inner-most 3 - /// dimensions of `input` are replaced with the `fft_length` samples of their - /// inverse 3D real Fourier transform. - /// - /// @compatibility(numpy) - /// Equivalent to np.irfftn with 3 dimensions. - /// @end_compatibility - @inlinable @inline(__always) - public static func iRFFT3D< - Treal: FloatingPoint & TensorFlowScalar, - Tcomplex: TensorFlowScalar - >( - _ input: Tensor, - fftLength: Tensor - ) -> Tensor { - switch commonBackend(input.handle.backend, fftLength.handle.backend) { - case .XLA: - let output_device = fftLength.device - let input = Tensor(copying: input, to: .defaultTFEager) - let fftLength = Tensor(copying: fftLength, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.iRFFT3D(input, fftLength: fftLength), to: output_device) - case .TF_EAGER: - return _RawTFEager.iRFFT3D(input, fftLength: fftLength) - } - - } - - /// Return a tensor with the same shape and contents as the input tensor or value. - @inlinable @inline(__always) - public static func identity( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.identity(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.identity(input) - } - - } - - /// Returns a list of tensors with the same shapes and contents as the input - /// - /// tensors. - /// - /// This op can be used to override the gradient for complicated functions. For - /// example, suppose y = f(x) and we wish to apply a custom function g for backprop - /// such that dx = g(dy). In Python, - /// - /// ```python - /// with tf.get_default_graph().gradient_override_map( - /// {'IdentityN': 'OverrideGradientWithG'}): - /// y, _ = identity_n([f(x), x]) - /// - /// @tf.RegisterGradient('OverrideGradientWithG') - /// def ApplyG(op, dy, _): - /// return [None, g(dy)] # Do not backprop to f(x). - /// ``` - @inlinable @inline(__always) - public static func identityN( - _ input: T - ) -> T { - _RawTFEager.identityN(input) - } - - /// A Reader that outputs the queued work as both the key and value. - /// - /// To use, enqueue strings in a Queue. ReaderRead will take the front - /// work string and output (work, work). - /// - /// - Attrs: - /// - container: If non-empty, this reader is placed in the given container. - /// Otherwise, a default container is used. - /// - shared_name: If non-empty, this reader is named in the given bucket - /// with this shared_name. Otherwise, the node name is used instead. - /// - /// - Output reader_handle: The handle to reference the Reader. - @inlinable @inline(__always) - public static func identityReaderV2( - container: String, - sharedName: String - ) -> ResourceHandle { - _RawTFEager.identityReaderV2(container: container, sharedName: sharedName) - } - - /// output = cond ? then_branch(input) : else_branch(input) - /// - /// - Parameters: - /// - cond: A Tensor. If the tensor is a scalar of non-boolean type, the - /// scalar is converted to a boolean according to the - /// following rule: if the scalar is a numerical value, non-zero means - /// `True` and zero means False; if the scalar is a string, non-empty - /// means `True` and empty means `False`. If the tensor is not a scalar, - /// being empty means False and being non-empty means True. - /// - input: A list of input tensors. - /// - /// - Attrs: - /// - Tin: A list of input types. - /// - Tout: A list of output types. - /// - then_branch: A function that takes 'inputs' and returns a list of tensors, whose - /// types are the same as what else_branch returns. - /// - else_branch: A function that takes 'inputs' and returns a list of tensors, whose - /// types are the same as what then_branch returns. - /// - /// - Output output: A list of return values. - @inlinable @inline(__always) - public static func if_< - Tcond: TensorFlowScalar, - Tin: TensorArrayProtocol, - Tout: TensorGroup, - ThenbranchIn: TensorGroup, - ThenbranchOut: TensorGroup, - ElsebranchIn: TensorGroup, - ElsebranchOut: TensorGroup - >( - cond: Tensor, - _ input: Tin, - thenBranch: (ThenbranchIn) -> ThenbranchOut, - elseBranch: (ElsebranchIn) -> ElsebranchOut, - outputShapes: [TensorShape?] - ) -> Tout { - _RawTFEager.if_( - cond: cond, input, thenBranch: thenBranch, elseBranch: elseBranch, - outputShapes: outputShapes) - } - - /// Compute the lower regularized incomplete Gamma function `P(a, x)`. - /// - /// The lower regularized incomplete Gamma function is defined as: - /// - /// - /// \\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\\) - /// - /// where - /// - /// \\(gamma(a, x) = \\int_{0}^{x} t^{a-1} exp(-t) dt\\) - /// - /// is the lower incomplete Gamma function. - /// - /// Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete - /// Gamma function. - @inlinable @inline(__always) - public static func igamma( - _ a: Tensor, - _ x: Tensor - ) -> Tensor { - switch commonBackend(a.handle.backend, x.handle.backend) { - case .XLA: - let output_device = x.device - let a = Tensor(copying: a, to: .defaultTFEager) - let x = Tensor(copying: x, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.igamma(a, x), to: output_device) - case .TF_EAGER: - return _RawTFEager.igamma(a, x) - } - - } - - /// Computes the gradient of `igamma(a, x)` wrt `a`. - @inlinable @inline(__always) - public static func igammaGradA( - _ a: Tensor, - _ x: Tensor - ) -> Tensor { - switch commonBackend(a.handle.backend, x.handle.backend) { - case .XLA: - let output_device = x.device - let a = Tensor(copying: a, to: .defaultTFEager) - let x = Tensor(copying: x, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.igammaGradA(a, x), to: output_device) - case .TF_EAGER: - return _RawTFEager.igammaGradA(a, x) - } - - } - - /// Compute the upper regularized incomplete Gamma function `Q(a, x)`. - /// - /// The upper regularized incomplete Gamma function is defined as: - /// - /// \\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\) - /// - /// where - /// - /// \\(Gamma(a, x) = int_{x}^{\infty} t^{a-1} exp(-t) dt\\) - /// - /// is the upper incomplete Gama function. - /// - /// Note, above `P(a, x)` (`Igamma`) is the lower regularized complete - /// Gamma function. - @inlinable @inline(__always) - public static func igammac( - _ a: Tensor, - _ x: Tensor - ) -> Tensor { - switch commonBackend(a.handle.backend, x.handle.backend) { - case .XLA: - let output_device = x.device - let a = Tensor(copying: a, to: .defaultTFEager) - let x = Tensor(copying: x, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.igammac(a, x), to: output_device) - case .TF_EAGER: - return _RawTFEager.igammac(a, x) - } - - } - - /// Creates a dataset that contains the elements of `input_dataset` ignoring errors. - @inlinable @inline(__always) - public static func ignoreErrorsDataset( - inputDataset: VariantHandle, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.ignoreErrorsDataset( - inputDataset: inputDataset, outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Returns the imaginary part of a complex number. - /// - /// Given a tensor `input` of complex numbers, this operation returns a tensor of - /// type `float` that is the imaginary part of each element in `input`. All - /// elements in `input` must be complex numbers of the form \\(a + bj\\), where *a* - /// is the real part and *b* is the imaginary part returned by this operation. - /// - /// For example: - /// - /// ``` - /// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] - /// tf.imag(input) ==> [4.75, 5.75] - /// ``` - @inlinable @inline(__always) - public static func imag< - T: TensorFlowScalar, - Tout: FloatingPoint & TensorFlowScalar - >( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.imag(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.imag(input) - } - - } - - /// Returns immutable tensor from memory region. - /// - /// The current implementation memmaps the tensor from a file. - /// - /// - Attrs: - /// - dtype: Type of the returned tensor. - /// - shape: Shape of the returned tensor. - /// - memory_region_name: Name of readonly memory region used by the tensor, see - /// NewReadOnlyMemoryRegionFromFile in tensorflow::Env. - @inlinable @inline(__always) - public static func immutableConst( - shape: TensorShape?, - memoryRegionName: String - ) -> Tensor { - _RawTFEager.immutableConst(shape: shape, memoryRegionName: memoryRegionName) - } - - @inlinable @inline(__always) - public static func importEvent( - writer: ResourceHandle, - event: StringTensor - ) { - _RawTFEager.importEvent(writer: writer, event: event) + } + + /// Asserts that the given condition is true. + /// + /// If `condition` evaluates to false, print the list of tensors in `data`. + /// `summarize` determines how many entries of the tensors to print. + /// + /// - Parameters: + /// - condition: The condition to evaluate. + /// - data: The tensors to print out when condition is false. + /// + /// - Attr summarize: Print this many entries of each tensor. + @inlinable @inline(__always) + public static func assert( + condition: Tensor, + data: T, + summarize: Int64 = 3 + ) { + _RawTFEager.assert(condition: condition, data: data, summarize: summarize) + } + + /// A transformation that asserts which transformations happen next. + /// + /// This transformation checks whether the camel-case names (i.e. "FlatMap", not + /// "flat_map") of the transformations following this transformation match the list + /// of names in the `transformations` argument. If there is a mismatch, the + /// transformation raises an exception. + /// + /// The check occurs when iterating over the contents of the dataset, which + /// means that the check happens *after* any static optimizations are applied + /// to the dataset graph. + /// + /// - Parameters: + /// - input_dataset: A variant tensor representing the input dataset. + /// `AssertNextDataset` passes through the outputs of its input dataset. + /// - transformations: A `tf.string` vector `tf.Tensor` identifying the transformations that are + /// expected to happen next. + @inlinable @inline(__always) + public static func assertNextDataset( + inputDataset: VariantHandle, + transformations: StringTensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.assertNextDataset( + inputDataset: inputDataset, transformations: transformations, outputTypes: outputTypes, + outputShapes: outputShapes) + } + + /// Adds a value to the current value of a variable. + /// + /// Any ReadVariableOp with a control dependency on this op is guaranteed to + /// see the incremented value or a subsequent newer one. + /// + /// - Parameters: + /// - resource: handle to the resource in which to store the variable. + /// - value: the value by which the variable will be incremented. + /// + /// - Attr dtype: the dtype of the value. + @inlinable @inline(__always) + public static func assignAddVariableOp( + resource: ResourceHandle, + value: Tensor + ) { + _RawTFEager.assignAddVariableOp(resource: resource, value: value) + } + + /// Subtracts a value from the current value of a variable. + /// + /// Any ReadVariableOp with a control dependency on this op is guaranteed to + /// see the decremented value or a subsequent newer one. + /// + /// - Parameters: + /// - resource: handle to the resource in which to store the variable. + /// - value: the value by which the variable will be incremented. + /// + /// - Attr dtype: the dtype of the value. + @inlinable @inline(__always) + public static func assignSubVariableOp( + resource: ResourceHandle, + value: Tensor + ) { + _RawTFEager.assignSubVariableOp(resource: resource, value: value) + } + + /// Assigns a new value to a variable. + /// + /// Any ReadVariableOp with a control dependency on this op is guaranteed to return + /// this value or a subsequent newer value of the variable. + /// + /// - Parameters: + /// - resource: handle to the resource in which to store the variable. + /// - value: the value to set the new tensor to use. + /// + /// - Attr dtype: the dtype of the value. + @inlinable @inline(__always) + public static func assignVariableOp( + resource: ResourceHandle, + value: Tensor + ) { + _RawTFEager.assignVariableOp(resource: resource, value: value) + } + + /// Computes the trignometric inverse tangent of x element-wise. + /// + /// The `tf.math.atan` operation returns the inverse of `tf.math.tan`, such that + /// if `y = tf.math.tan(x)` then, `x = tf.math.atan(y)`. + /// + /// **Note**: The output of `tf.math.atan` will lie within the invertible range + /// of tan, i.e (-pi/2, pi/2). + /// + /// For example: + /// + /// ```python + /// # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] + /// x = tf.constant([1.047, 0.785]) + /// y = tf.math.tan(x) # [1.731261, 0.99920404] + /// + /// tf.math.atan(y) # [1.047, 0.785] = x + /// ``` + /// + @inlinable @inline(__always) + public static func atan( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.atan(x) + case .TF_EAGER: + return _RawTFEager.atan(x) } - @inlinable @inline(__always) - public static func inPolymorphicTwice( - _ a: [Tensor], - _ b: [Tensor] - ) { - _RawTFEager.inPolymorphicTwice(a, b) - } - - /// Says whether the targets are in the top `K` predictions. - /// - /// This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the - /// prediction for the target class is among the top `k` predictions among - /// all predictions for example `i`. Note that the behavior of `InTopK` differs - /// from the `TopK` op in its handling of ties; if multiple classes have the - /// same prediction value and straddle the top-`k` boundary, all of those - /// classes are considered to be in the top `k`. - /// - /// More formally, let - /// - /// \\(predictions_i\\) be the predictions for all classes for example `i`, - /// \\(targets_i\\) be the target class for example `i`, - /// \\(out_i\\) be the output for example `i`, - /// - /// $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$ - /// - /// - Parameters: - /// - predictions: A `batch_size` x `classes` tensor. - /// - targets: A `batch_size` vector of class ids. - /// - /// - Attr k: Number of top elements to look at for computing precision. - /// - /// - Output precision: Computed Precision at `k` as a `bool Tensor`. - @inlinable @inline(__always) - public static func inTopK( - predictions: Tensor, - targets: Tensor, - k: Int64 - ) -> Tensor { - switch commonBackend(predictions.handle.backend, targets.handle.backend) { - case .XLA: - let output_device = targets.device - let predictions = Tensor(copying: predictions, to: .defaultTFEager) - let targets = Tensor(copying: targets, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.inTopK(predictions: predictions, targets: targets, k: k), - to: output_device) - case .TF_EAGER: - return _RawTFEager.inTopK(predictions: predictions, targets: targets, k: k) - } - - } - - /// Says whether the targets are in the top `K` predictions. - /// - /// This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the - /// prediction for the target class is among the top `k` predictions among - /// all predictions for example `i`. Note that the behavior of `InTopK` differs - /// from the `TopK` op in its handling of ties; if multiple classes have the - /// same prediction value and straddle the top-`k` boundary, all of those - /// classes are considered to be in the top `k`. - /// - /// More formally, let - /// - /// \\(predictions_i\\) be the predictions for all classes for example `i`, - /// \\(targets_i\\) be the target class for example `i`, - /// \\(out_i\\) be the output for example `i`, - /// - /// $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$ - /// - /// - Parameters: - /// - predictions: A `batch_size` x `classes` tensor. - /// - targets: A `batch_size` vector of class ids. - /// - k: Number of top elements to look at for computing precision. - /// - /// - Output precision: Computed precision at `k` as a `bool Tensor`. - @inlinable @inline(__always) - public static func inTopKV2( - predictions: Tensor, - targets: Tensor, - k: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(predictions.handle.backend, targets.handle.backend), k.handle.backend) - { - case .XLA: - let output_device = k.device - let predictions = Tensor(copying: predictions, to: .defaultTFEager) - let targets = Tensor(copying: targets, to: .defaultTFEager) - let k = Tensor(copying: k, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.inTopKV2(predictions: predictions, targets: targets, k: k), - to: output_device) - case .TF_EAGER: - return _RawTFEager.inTopKV2(predictions: predictions, targets: targets, k: k) - } - - } - - /// A placeholder op for a value that will be fed into the computation. - /// - /// - Attrs: - /// - dtype: The type of elements in the tensor. - /// - shape: The shape of the tensor. - /// - /// - Output output: A tensor that will be provided using the infeed mechanism. - @inlinable @inline(__always) - public static func infeedDequeue( - shape: TensorShape? - ) -> Tensor { - _RawTFEager.infeedDequeue(shape: shape) - } - - /// Fetches multiple values from infeed as an XLA tuple. - /// - /// - Attrs: - /// - dtypes: The element types of each element in `outputs`. - /// - shapes: The shapes of each tensor in `outputs`. - /// - /// - Output outputs: A list of tensors that will be provided using the infeed mechanism. - @inlinable @inline(__always) - public static func infeedDequeueTuple( - shapes: [TensorShape?] - ) -> Dtypes { - _RawTFEager.infeedDequeueTuple(shapes: shapes) - } - - /// An op which feeds a single Tensor value into the computation. - /// - /// - Parameter input: A tensor that will be provided using the infeed mechanism. - /// - /// - Attrs: - /// - dtype: The type of elements in the tensor. - /// - shape: The shape of the tensor. - /// - layout: A vector holding the requested layout in minor-to-major sequence. - /// If a layout attribute is passed, but its values are all -1, the layout will - /// be computed by the infeed operation. - /// - device_ordinal: The TPU device to use. This should be -1 when the Op - /// is running on a TPU device, and >= 0 when the Op is running on the CPU - /// device. - @inlinable @inline(__always) - public static func infeedEnqueue( - _ input: Tensor, - shape: TensorShape?, - layout: [Int32], - deviceOrdinal: Int64 = -1 - ) { - _RawTFEager.infeedEnqueue(input, shape: shape, layout: layout, deviceOrdinal: deviceOrdinal) - } - - /// An op which enqueues prelinearized buffer into TPU infeed. - /// - /// - Parameter input: A variant tensor representing linearized output. - /// - /// - Attr device_ordinal: The TPU device to use. This should be -1 when the Op is running on a TPU device - /// and = 0 when the Op is running on the CPU device. - @inlinable @inline(__always) - public static func infeedEnqueuePrelinearizedBuffer( - _ input: VariantHandle, - deviceOrdinal: Int64 = -1 - ) { - _RawTFEager.infeedEnqueuePrelinearizedBuffer(input, deviceOrdinal: deviceOrdinal) - } - - /// Feeds multiple Tensor values into the computation as an XLA tuple. - /// - /// - Parameter inputs: A list of tensors that will be provided using the infeed mechanism. - /// - /// - Attrs: - /// - dtypes: The element types of each element in `inputs`. - /// - shapes: The shapes of each tensor in `inputs`. - /// - layouts: A vector holding the requested layout in minor-to-major sequence for - /// all the tuple shapes, in the order the shapes appear in the "shapes" input. - /// The layout elements for a sub-shape can be set to -1, in which case the - /// corresponding layout will be computed by the infeed operation. - /// - device_ordinal: The TPU device to use. This should be -1 when the Op - /// is running on a TPU device, and >= 0 when the Op is running on the CPU - /// device. - @inlinable @inline(__always) - public static func infeedEnqueueTuple( - inputs: Dtypes, - shapes: [TensorShape?], - layouts: [Int32], - deviceOrdinal: Int64 = -1 - ) { - _RawTFEager.infeedEnqueueTuple( - inputs: inputs, shapes: shapes, layouts: layouts, deviceOrdinal: deviceOrdinal) + } + + /// Computes arctangent of `y/x` element-wise, respecting signs of the arguments. + /// + /// This is the angle \( \theta \in [-\pi, \pi] \) such that + /// \[ x = r \cos(\theta) \] + /// and + /// \[ y = r \sin(\theta) \] + /// where \(r = \sqrt(x^2 + y^2) \). + @inlinable @inline(__always) + public static func atan2( + _ y: Tensor, + _ x: Tensor + ) -> Tensor { + switch commonBackend(y.handle.backend, x.handle.backend) { + case .XLA: + let output_device = x.device + let y = Tensor(copying: y, to: .defaultTFEager) + let x = Tensor(copying: x, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.atan2(y, x), to: output_device) + case .TF_EAGER: + return _RawTFEager.atan2(y, x) } - @inlinable @inline(__always) - public static func initializeTRTResource( - resourceHandle: ResourceHandle, - filename: StringTensor, - maxCachedEnginesCount: Int64 = 1 - ) { - _RawTFEager.initializeTRTResource( - resourceHandle: resourceHandle, filename: filename, - maxCachedEnginesCount: maxCachedEnginesCount) - } - - /// Initializes a table from a text file. - /// - /// It inserts one key-value pair into the table for each line of the file. - /// The key and value is extracted from the whole line content, elements from the - /// split line based on `delimiter` or the line number (starting from zero). - /// Where to extract the key and value from a line is specified by `key_index` and - /// `value_index`. - /// - /// - A value of -1 means use the line number(starting from zero), expects `int64`. - /// - A value of -2 means use the whole line content, expects `string`. - /// - A value >= 0 means use the index (starting at zero) of the split line based - /// on `delimiter`. - /// - /// - Parameters: - /// - table_handle: Handle to a table which will be initialized. - /// - filename: Filename of a vocabulary text file. - /// - /// - Attrs: - /// - key_index: Column index in a line to get the table `key` values from. - /// - value_index: Column index that represents information of a line to get the table - /// `value` values from. - /// - vocab_size: Number of elements of the file, use -1 if unknown. - /// - delimiter: Delimiter to separate fields in a line. - @inlinable @inline(__always) - public static func initializeTableFromTextFileV2( - tableHandle: ResourceHandle, - filename: StringTensor, - keyIndex: Int64, - valueIndex: Int64, - vocabSize: Int64 = -1, - delimiter: String = "\t" - ) { - _RawTFEager.initializeTableFromTextFileV2( - tableHandle: tableHandle, filename: filename, keyIndex: keyIndex, valueIndex: valueIndex, - vocabSize: vocabSize, delimiter: delimiter) - } - - /// Table initializer that takes two tensors for keys and values respectively. - /// - /// - Parameters: - /// - table_handle: Handle to a table which will be initialized. - /// - keys: Keys of type Tkey. - /// - values: Values of type Tval. - @inlinable @inline(__always) - public static func initializeTableV2< - Tkey: TensorFlowScalar, - Tval: TensorFlowScalar - >( - tableHandle: ResourceHandle, - keys: Tensor, - _ values: Tensor - ) { - _RawTFEager.initializeTableV2(tableHandle: tableHandle, keys: keys, values) - } - - /// Adds v into specified rows of x. - /// - /// Computes y = x; y[i, :] += v; return y. - /// - /// - Parameters: - /// - x: A `Tensor` of type T. - /// - i: A vector. Indices into the left-most dimension of `x`. - /// - v: A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. - /// - /// - Output y: A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`. - @inlinable @inline(__always) - public static func inplaceAdd( - _ x: Tensor, - i: Tensor, - v: Tensor - ) -> Tensor { - switch commonBackend(commonBackend(x.handle.backend, i.handle.backend), v.handle.backend) { - case .XLA: - let output_device = v.device - let x = Tensor(copying: x, to: .defaultTFEager) - let i = Tensor(copying: i, to: .defaultTFEager) - let v = Tensor(copying: v, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.inplaceAdd(x, i: i, v: v), to: output_device) - case .TF_EAGER: - return _RawTFEager.inplaceAdd(x, i: i, v: v) - } - - } - - /// Subtracts `v` into specified rows of `x`. - /// - /// Computes y = x; y[i, :] -= v; return y. - /// - /// - Parameters: - /// - x: A `Tensor` of type T. - /// - i: A vector. Indices into the left-most dimension of `x`. - /// - v: A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. - /// - /// - Output y: A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`. - @inlinable @inline(__always) - public static func inplaceSub( - _ x: Tensor, - i: Tensor, - v: Tensor - ) -> Tensor { - switch commonBackend(commonBackend(x.handle.backend, i.handle.backend), v.handle.backend) { - case .XLA: - let output_device = v.device - let x = Tensor(copying: x, to: .defaultTFEager) - let i = Tensor(copying: i, to: .defaultTFEager) - let v = Tensor(copying: v, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.inplaceSub(x, i: i, v: v), to: output_device) - case .TF_EAGER: - return _RawTFEager.inplaceSub(x, i: i, v: v) - } - - } - - /// Updates specified rows with values in `v`. - /// - /// Computes `x[i, :] = v; return x`. - /// - /// - Parameters: - /// - x: A tensor of type `T`. - /// - i: A vector. Indices into the left-most dimension of `x`. - /// - v: A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. - /// - /// - Output y: A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`. - @inlinable @inline(__always) - public static func inplaceUpdate( - _ x: Tensor, - i: Tensor, - v: Tensor - ) -> Tensor { - switch commonBackend(commonBackend(x.handle.backend, i.handle.backend), v.handle.backend) { - case .XLA: - let output_device = v.device - let x = Tensor(copying: x, to: .defaultTFEager) - let i = Tensor(copying: i, to: .defaultTFEager) - let v = Tensor(copying: v, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.inplaceUpdate(x, i: i, v: v), to: output_device) - case .TF_EAGER: - return _RawTFEager.inplaceUpdate(x, i: i, v: v) - } - - } - - @inlinable @inline(__always) - public static func int64Output() -> Tensor { - _RawTFEager.int64Output() - } - - @inlinable @inline(__always) - public static func intAttr( - foo: Int64 = 1 - ) -> Tensor { - _RawTFEager.intAttr(foo: foo) - } - - @inlinable @inline(__always) - public static func intInput( - _ a: Tensor - ) { - _RawTFEager.intInput(a) + } + + /// Computes inverse hyperbolic tangent of x element-wise. + /// + /// Given an input tensor, this function computes inverse hyperbolic tangent + /// for every element in the tensor. Input range is `[-1,1]` and output range is + /// `[-inf, inf]`. If input is `-1`, output will be `-inf` and if the + /// input is `1`, output will be `inf`. Values outside the range will have + /// `nan` as output. + /// + /// ```python + /// x = tf.constant([-float("inf"), -1, -0.5, 1, 0, 0.5, 10, float("inf")]) + /// tf.math.atanh(x) ==> [nan -inf -0.54930615 inf 0. 0.54930615 nan nan] + /// ``` + @inlinable @inline(__always) + public static func atanh( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.atanh(x) + case .TF_EAGER: + return _RawTFEager.atanh(x) } - @inlinable @inline(__always) - public static func intInputFloatInput( - _ a: Tensor, - _ b: Tensor - ) { - _RawTFEager.intInputFloatInput(a, b) - } - - @inlinable @inline(__always) - public static func intInputIntOutput( - _ a: Tensor - ) -> Tensor { - switch a.handle.backend { - case .XLA: - let output_device = a.device - let a = Tensor(copying: a, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.intInputIntOutput(a), to: output_device) - case .TF_EAGER: - return _RawTFEager.intInputIntOutput(a) - } - - } - - @inlinable @inline(__always) - public static func intOutput() -> Tensor { - _RawTFEager.intOutput() - } - - @inlinable @inline(__always) - public static func intOutputFloatOutput() -> (a: Tensor, b: Tensor) { - _RawTFEager.intOutputFloatOutput() - } - - /// Creates a dataset that applies `f` to the outputs of `input_dataset`. - /// - /// Unlike MapDataset, the `f` in InterleaveDataset is expected to return - /// a Dataset variant, and InterleaveDataset will flatten successive - /// results into a single Dataset. Unlike FlatMapDataset, - /// InterleaveDataset will interleave sequences of up to `block_length` - /// consecutive elements from `cycle_length` input elements. - /// - /// - Attr f: A function mapping elements of `input_dataset`, concatenated with - /// `other_arguments`, to a Dataset variant that contains elements matching - /// `output_types` and `output_shapes`. - @inlinable @inline(__always) - public static func interleaveDataset< - FIn: TensorGroup, - FOut: TensorGroup, - Targuments: TensorArrayProtocol - >( - inputDataset: VariantHandle, - otherArguments: Targuments, - cycleLength: Tensor, - blockLength: Tensor, - f: (FIn) -> FOut, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.interleaveDataset( - inputDataset: inputDataset, otherArguments: otherArguments, cycleLength: cycleLength, - blockLength: blockLength, f: f, outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Computes the reciprocal of x element-wise. - /// - /// I.e., \\(y = 1 / x\\). - @inlinable @inline(__always) - public static func inv( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - let output_device = x.device - let x = Tensor(copying: x, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.inv(x), to: output_device) - case .TF_EAGER: - return _RawTFEager.inv(x) - } - - } - - /// Computes the gradient for the inverse of `x` wrt its input. - /// - /// Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy` - /// is the corresponding input gradient. - @inlinable @inline(__always) - public static func invGrad( - _ y: Tensor, - dy: Tensor - ) -> Tensor { - switch commonBackend(y.handle.backend, dy.handle.backend) { - case .XLA: - let output_device = dy.device - let y = Tensor(copying: y, to: .defaultTFEager) - let dy = Tensor(copying: dy, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.invGrad(y, dy: dy), to: output_device) - case .TF_EAGER: - return _RawTFEager.invGrad(y, dy: dy) - } - - } - - /// Invert (flip) each bit of supported types; for example, type `uint8` value 01010101 becomes 10101010. - /// - /// Flip each bit of supported types. For example, type `int8` (decimal 2) binary 00000010 becomes (decimal -3) binary 11111101. - /// This operation is performed on each element of the tensor argument `x`. - /// - /// Example: - /// ```python - /// import tensorflow as tf - /// from tensorflow.python.ops import bitwise_ops - /// - /// # flip 2 (00000010) to -3 (11111101) - /// tf.assert_equal(-3, bitwise_ops.invert(2)) - /// - /// dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64, - /// dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64] - /// - /// inputs = [0, 5, 3, 14] - /// for dtype in dtype_list: - /// # Because of issues with negative numbers, let's test this indirectly. - /// # 1. invert(a) and a = 0 - /// # 2. invert(a) or a = invert(0) - /// input_tensor = tf.constant([0, 5, 3, 14], dtype=dtype) - /// not_a_and_a, not_a_or_a, not_0 = [bitwise_ops.bitwise_and( - /// input_tensor, bitwise_ops.invert(input_tensor)), - /// bitwise_ops.bitwise_or( - /// input_tensor, bitwise_ops.invert(input_tensor)), - /// bitwise_ops.invert( - /// tf.constant(0, dtype=dtype))] - /// - /// expected = tf.constant([0, 0, 0, 0], dtype=tf.float32) - /// tf.assert_equal(tf.cast(not_a_and_a, tf.float32), expected) - /// - /// expected = tf.cast([not_0] * 4, tf.float32) - /// tf.assert_equal(tf.cast(not_a_or_a, tf.float32), expected) - /// - /// # For unsigned dtypes let's also check the result directly. - /// if dtype.is_unsigned: - /// inverted = bitwise_ops.invert(input_tensor) - /// expected = tf.constant([dtype.max - x for x in inputs], dtype=tf.float32) - /// tf.assert_equal(tf.cast(inverted, tf.float32), tf.cast(expected, tf.float32)) - /// ``` - @inlinable @inline(__always) - public static func invert( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - let output_device = x.device - let x = Tensor(copying: x, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.invert(x), to: output_device) - case .TF_EAGER: - return _RawTFEager.invert(x) - } - - } - - /// Computes the inverse permutation of a tensor. - /// - /// This operation computes the inverse of an index permutation. It takes a 1-D - /// integer tensor `x`, which represents the indices of a zero-based array, and - /// swaps each value with its index position. In other words, for an output tensor - /// `y` and an input tensor `x`, this operation computes the following: - /// - /// `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]` - /// - /// The values must include 0. There can be no duplicate values or negative values. - /// - /// For example: - /// - /// ``` - /// # tensor `x` is [3, 4, 0, 2, 1] - /// invert_permutation(x) ==> [2, 4, 3, 0, 1] - /// ``` - /// - /// - Parameter x: 1-D. - /// - /// - Output y: 1-D. - @inlinable @inline(__always) - public static func invertPermutation( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.invertPermutation(x) - case .TF_EAGER: - return _RawTFEager.invertPermutation(x) - } - - } - - /// Checks whether a tree ensemble has been initialized. - /// - /// - Parameter tree_ensemble_handle: Handle to the tree ensemble resource. - /// - /// - Output is_initialized: output boolean on whether it is initialized or not. - @inlinable @inline(__always) - public static func isBoostedTreesEnsembleInitialized( - treeEnsembleHandle: ResourceHandle - ) -> Tensor { - _RawTFEager.isBoostedTreesEnsembleInitialized(treeEnsembleHandle: treeEnsembleHandle) - } - - /// Checks whether a quantile stream has been initialized. - /// - /// An Op that checks if quantile stream resource is initialized. - /// - /// - Parameter quantile_stream_resource_handle: resource; The reference to quantile stream resource handle. - /// - /// - Output is_initialized: bool; True if the resource is initialized, False otherwise. - @inlinable @inline(__always) - public static func isBoostedTreesQuantileStreamResourceInitialized( - quantileStreamResourceHandle: ResourceHandle - ) -> Tensor { - _RawTFEager.isBoostedTreesQuantileStreamResourceInitialized( - quantileStreamResourceHandle: quantileStreamResourceHandle) - } - - /// Returns which elements of x are finite. - /// - /// @compatibility(numpy) - /// Equivalent to np.isfinite - /// @end_compatibility - /// - /// Example: - /// - /// ```python - /// x = tf.constant([5.0, 4.8, 6.8, np.inf, np.nan]) - /// tf.math.is_finite(x) ==> [True, True, True, False, False] - /// ``` - @inlinable @inline(__always) - public static func isFinite( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.isFinite(x) - case .TF_EAGER: - return _RawTFEager.isFinite(x) - } - - } - - /// Returns which elements of x are Inf. - /// - /// @compatibility(numpy) - /// Equivalent to np.isinf - /// @end_compatibility - /// - /// Example: - /// - /// ```python - /// x = tf.constant([5.0, np.inf, 6.8, np.inf]) - /// tf.math.is_inf(x) ==> [False, True, False, True] - /// ``` - @inlinable @inline(__always) - public static func isInf( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.isInf(x) - case .TF_EAGER: - return _RawTFEager.isInf(x) - } - - } - - /// Returns which elements of x are NaN. - /// - /// @compatibility(numpy) - /// Equivalent to np.isnan - /// @end_compatibility - /// - /// Example: - /// - /// ```python - /// x = tf.constant([5.0, np.nan, 6.8, np.nan, np.inf]) - /// tf.math.is_nan(x) ==> [False, True, False, True, False] - /// ``` - @inlinable @inline(__always) - public static func isNan( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.isNan(x) - case .TF_EAGER: - return _RawTFEager.isNan(x) - } - - } - - /// A container for an iterator resource. - /// - /// - Output handle: A handle to the iterator that can be passed to a "MakeIterator" - /// or "IteratorGetNext" op. - @inlinable @inline(__always) - public static func iterator( - sharedName: String, - container: String, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> ResourceHandle { - _RawTFEager.iterator( - sharedName: sharedName, container: container, outputTypes: outputTypes, - outputShapes: outputShapes) - } - - /// Converts the given string representing a handle to an iterator to a resource. - /// - /// - Parameter string_handle: A string representation of the given handle. - /// - /// - Attrs: - /// - output_types: If specified, defines the type of each tuple component in an - /// element produced by the resulting iterator. - /// - output_shapes: If specified, defines the shape of each tuple component in an - /// element produced by the resulting iterator. - /// - /// - Output resource_handle: A handle to an iterator resource. - @inlinable @inline(__always) - public static func iteratorFromStringHandle( - stringHandle: StringTensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> ResourceHandle { - _RawTFEager.iteratorFromStringHandle( - stringHandle: stringHandle, outputTypes: outputTypes, outputShapes: outputShapes) - } - - @inlinable @inline(__always) - public static func iteratorFromStringHandleV2( - stringHandle: StringTensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> ResourceHandle { - _RawTFEager.iteratorFromStringHandleV2( - stringHandle: stringHandle, outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Returns the name of the device on which `resource` has been placed. - @inlinable @inline(__always) - public static func iteratorGetDevice( - resource: ResourceHandle - ) -> StringTensor { - _RawTFEager.iteratorGetDevice(resource: resource) - } - - /// Gets the next output from the given iterator . - @inlinable @inline(__always) - public static func iteratorGetNext( - iterator: ResourceHandle, - outputShapes: [TensorShape?] - ) -> OutputTypes { - _RawTFEager.iteratorGetNext(iterator: iterator, outputShapes: outputShapes) - } - - /// Gets the next output from the given iterator as an Optional variant. - @inlinable @inline(__always) - public static func iteratorGetNextAsOptional( - iterator: ResourceHandle, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.iteratorGetNextAsOptional( - iterator: iterator, outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Gets the next output from the given iterator. - /// - /// This operation is a synchronous version IteratorGetNext. It should only be used - /// in situations where the iterator does not block the calling thread, or where - /// the calling thread is not a member of the thread pool used to execute parallel - /// operations (e.g. in eager mode). - @inlinable @inline(__always) - public static func iteratorGetNextSync( - iterator: ResourceHandle, - outputShapes: [TensorShape?] - ) -> OutputTypes { - _RawTFEager.iteratorGetNextSync(iterator: iterator, outputShapes: outputShapes) - } - - /// Converts the given `resource_handle` representing an iterator to a string. - /// - /// - Parameter resource_handle: A handle to an iterator resource. - /// - /// - Output string_handle: A string representation of the given handle. - @inlinable @inline(__always) - public static func iteratorToStringHandle( - resourceHandle: ResourceHandle - ) -> StringTensor { - _RawTFEager.iteratorToStringHandle(resourceHandle: resourceHandle) - } - - @inlinable @inline(__always) - public static func iteratorV2( - sharedName: String, - container: String, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> ResourceHandle { - _RawTFEager.iteratorV2( - sharedName: sharedName, container: container, outputTypes: outputTypes, - outputShapes: outputShapes) - } - - /// Returns the index of a data point that should be added to the seed set. - /// - /// Entries in distances are assumed to be squared distances of candidate points to - /// the already sampled centers in the seed set. The op constructs one Markov chain - /// of the k-MC^2 algorithm and returns the index of one candidate point to be added - /// as an additional cluster center. - /// - /// - Parameters: - /// - distances: Vector with squared distances to the closest previously sampled cluster center - /// for each candidate point. - /// - seed: Scalar. Seed for initializing the random number generator. - /// - /// - Output index: Scalar with the index of the sampled point. - @inlinable @inline(__always) - public static func kMC2ChainInitialization( - distances: Tensor, - seed: Tensor - ) -> Tensor { - switch commonBackend(distances.handle.backend, seed.handle.backend) { - case .XLA: - let output_device = seed.device - let distances = Tensor(copying: distances, to: .defaultTFEager) - let seed = Tensor(copying: seed, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.kMC2ChainInitialization(distances: distances, seed: seed), - to: output_device) - case .TF_EAGER: - return _RawTFEager.kMC2ChainInitialization(distances: distances, seed: seed) - } - - } - - @inlinable @inline(__always) - public static func kernelLabel() -> StringTensor { - _RawTFEager.kernelLabel() - } - - @inlinable @inline(__always) - public static func kernelLabelRequired( - _ input: Tensor - ) -> StringTensor { - _RawTFEager.kernelLabelRequired(input) - } - - /// Selects num_to_sample rows of input using the KMeans++ criterion. - /// - /// Rows of points are assumed to be input points. One row is selected at random. - /// Subsequent rows are sampled with probability proportional to the squared L2 - /// distance from the nearest row selected thus far till num_to_sample rows have - /// been sampled. - /// - /// - Parameters: - /// - points: Matrix of shape (n, d). Rows are assumed to be input points. - /// - num_to_sample: Scalar. The number of rows to sample. This value must not be larger than n. - /// - seed: Scalar. Seed for initializing the random number generator. - /// - num_retries_per_sample: Scalar. For each row that is sampled, this parameter - /// specifies the number of additional points to draw from the current - /// distribution before selecting the best. If a negative value is specified, a - /// heuristic is used to sample O(log(num_to_sample)) additional points. - /// - /// - Output samples: Matrix of shape (num_to_sample, d). The sampled rows. - @inlinable @inline(__always) - public static func kmeansPlusPlusInitialization( - points: Tensor, - numToSample: Tensor, - seed: Tensor, - numRetriesPerSample: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(points.handle.backend, numToSample.handle.backend), seed.handle.backend), - numRetriesPerSample.handle.backend) - { - case .XLA: - let output_device = numRetriesPerSample.device - let points = Tensor(copying: points, to: .defaultTFEager) - let numToSample = Tensor(copying: numToSample, to: .defaultTFEager) - let seed = Tensor(copying: seed, to: .defaultTFEager) - let numRetriesPerSample = Tensor(copying: numRetriesPerSample, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.kmeansPlusPlusInitialization( - points: points, numToSample: numToSample, seed: seed, - numRetriesPerSample: numRetriesPerSample), to: output_device) - case .TF_EAGER: - return _RawTFEager.kmeansPlusPlusInitialization( - points: points, numToSample: numToSample, seed: seed, - numRetriesPerSample: numRetriesPerSample) - } - - } - - /// L2 Loss. - /// - /// Computes half the L2 norm of a tensor without the `sqrt`: - /// - /// output = sum(t ** 2) / 2 - /// - /// - Parameter t: Typically 2-D, but may have any dimensions. - /// - /// - Output output: 0-D. - @inlinable @inline(__always) - public static func l2Loss( - t: Tensor - ) -> Tensor { - switch t.handle.backend { - case .XLA: - let output_device = t.device - let t = Tensor(copying: t, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.l2Loss(t: t), to: output_device) - case .TF_EAGER: - return _RawTFEager.l2Loss(t: t) - } - - } - - /// Creates a dataset that emits the key-value pairs in one or more LMDB files. - /// - /// The Lightning Memory-Mapped Database Manager, or LMDB, is an embedded binary - /// key-value database. This dataset can read the contents of LMDB database files, - /// the names of which generally have the `.mdb` suffix. - /// - /// Each output element consists of a key-value pair represented as a pair of - /// scalar string `Tensor`s, where the first `Tensor` contains the key and the - /// second `Tensor` contains the value. - /// - /// LMDB uses different file formats on big- and little-endian machines. - /// `LMDBDataset` can only read files in the format of the host machine. - /// - /// - Parameter filenames: A scalar or a vector containing the name(s) of the binary file(s) to be - /// read. - @inlinable @inline(__always) - public static func lMDBDataset( - filenames: StringTensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.lMDBDataset( - filenames: filenames, outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Local Response Normalization. - /// - /// The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last - /// dimension), and each vector is normalized independently. Within a given vector, - /// each component is divided by the weighted, squared sum of inputs within - /// `depth_radius`. In detail, - /// - /// sqr_sum[a, b, c, d] = - /// sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2) - /// output = input / (bias + alpha * sqr_sum) ** beta - /// - /// For details, see [Krizhevsky et al., ImageNet classification with deep - /// convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks). - /// - /// - Parameter input: 4-D. - /// - /// - Attrs: - /// - depth_radius: 0-D. Half-width of the 1-D normalization window. - /// - bias: An offset (usually positive to avoid dividing by 0). - /// - alpha: A scale factor, usually positive. - /// - beta: An exponent. - @inlinable @inline(__always) - public static func lRN( - _ input: Tensor, - depthRadius: Int64 = 5, - bias: Double = 1, - alpha: Double = 1, - beta: Double = 0.5 - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.lRN( - input, depthRadius: depthRadius, bias: bias, alpha: alpha, beta: beta), - to: output_device) - case .TF_EAGER: - return _RawTFEager.lRN( - input, depthRadius: depthRadius, bias: bias, alpha: alpha, beta: beta) - } - - } - - /// Gradients for Local Response Normalization. - /// - /// - Parameters: - /// - input_grads: 4-D with shape `[batch, height, width, channels]`. - /// - input_image: 4-D with shape `[batch, height, width, channels]`. - /// - output_image: 4-D with shape `[batch, height, width, channels]`. - /// - /// - Attrs: - /// - depth_radius: A depth radius. - /// - bias: An offset (usually > 0 to avoid dividing by 0). - /// - alpha: A scale factor, usually positive. - /// - beta: An exponent. - /// - /// - Output output: The gradients for LRN. - @inlinable @inline(__always) - public static func lRNGrad( - inputGrads: Tensor, - inputImage: Tensor, - outputImage: Tensor, - depthRadius: Int64 = 5, - bias: Double = 1, - alpha: Double = 1, - beta: Double = 0.5 - ) -> Tensor { - switch commonBackend( - commonBackend(inputGrads.handle.backend, inputImage.handle.backend), - outputImage.handle.backend) - { - case .XLA: - let output_device = outputImage.device - let inputGrads = Tensor(copying: inputGrads, to: .defaultTFEager) - let inputImage = Tensor(copying: inputImage, to: .defaultTFEager) - let outputImage = Tensor(copying: outputImage, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.lRNGrad( - inputGrads: inputGrads, inputImage: inputImage, outputImage: outputImage, - depthRadius: depthRadius, bias: bias, alpha: alpha, beta: beta), to: output_device) - case .TF_EAGER: - return _RawTFEager.lRNGrad( - inputGrads: inputGrads, inputImage: inputImage, outputImage: outputImage, - depthRadius: depthRadius, bias: bias, alpha: alpha, beta: beta) - } - - } - - /// Computes the LSTM cell forward propagation for 1 time step. - /// - /// This implementation uses 1 weight matrix and 1 bias vector, and there's an - /// optional peephole connection. - /// - /// This kernel op implements the following mathematical equations: - /// - /// ```python - /// xh = [x, h_prev] - /// [i, f, ci, o] = xh * w + b - /// f = f + forget_bias - /// - /// if not use_peephole: - /// wci = wcf = wco = 0 - /// - /// i = sigmoid(cs_prev * wci + i) - /// f = sigmoid(cs_prev * wcf + f) - /// ci = tanh(ci) - /// - /// cs = ci .* i + cs_prev .* f - /// cs = clip(cs, cell_clip) - /// - /// o = sigmoid(cs * wco + o) - /// co = tanh(cs) - /// h = co .* o - /// ``` - /// - /// - Parameters: - /// - x: The input to the LSTM cell, shape (batch_size, num_inputs). - /// - cs_prev: Value of the cell state at previous time step. - /// - h_prev: Output of the previous cell at previous time step. - /// - w: The weight matrix. - /// - wci: The weight matrix for input gate peephole connection. - /// - wcf: The weight matrix for forget gate peephole connection. - /// - wco: The weight matrix for output gate peephole connection. - /// - b: The bias vector. - /// - /// - Attrs: - /// - forget_bias: The forget gate bias. - /// - cell_clip: Value to clip the 'cs' value to. - /// - use_peephole: Whether to use peephole weights. - /// - /// - Outputs: - /// - i: The input gate. - /// - cs: The cell state before the tanh. - /// - f: The forget gate. - /// - o: The output gate. - /// - ci: The cell input. - /// - co: The cell after the tanh. - /// - h: The output h vector. - @inlinable @inline(__always) - public static func lSTMBlockCell( - _ x: Tensor, - csPrev: Tensor, - hPrev: Tensor, - w: Tensor, - wci: Tensor, - wcf: Tensor, - wco: Tensor, - _ b: Tensor, - forgetBias: Double = 1, - cellClip: Double = 3, - usePeephole: Bool = false - ) -> ( - i: Tensor, cs: Tensor, f: Tensor, o: Tensor, ci: Tensor, co: Tensor, - h: Tensor - ) { - _RawTFEager.lSTMBlockCell( - x, csPrev: csPrev, hPrev: hPrev, w: w, wci: wci, wcf: wcf, wco: wco, b, - forgetBias: forgetBias, cellClip: cellClip, usePeephole: usePeephole) - } - - /// Computes the LSTM cell backward propagation for 1 timestep. - /// - /// This implementation is to be used in conjunction of LSTMBlockCell. - /// - /// - Parameters: - /// - x: The input to the LSTM cell, shape (batch_size, num_inputs). - /// - cs_prev: The previous cell state. - /// - h_prev: The previous h state. - /// - w: The weight matrix. - /// - wci: The weight matrix for input gate peephole connection. - /// - wcf: The weight matrix for forget gate peephole connection. - /// - wco: The weight matrix for output gate peephole connection. - /// - b: The bias vector. - /// - i: The input gate. - /// - cs: The cell state before the tanh. - /// - f: The forget gate. - /// - o: The output gate. - /// - ci: The cell input. - /// - co: The cell after the tanh. - /// - cs_grad: The current gradient of cs. - /// - h_grad: The gradient of h vector. - /// - /// - Attr use_peephole: Whether the cell uses peephole connections. - /// - /// - Outputs: - /// - cs_prev_grad: The gradient of cs to be back-propped. - /// - dicfo: The derivative wrt to [i, cs, f, o]. - /// - wci_grad: The gradient for wci to be back-propped. - /// - wcf_grad: The gradient for wcf to be back-propped. - /// - wco_grad: The gradient for wco to be back-propped. - @inlinable @inline(__always) - public static func lSTMBlockCellGrad( - _ x: Tensor, - csPrev: Tensor, - hPrev: Tensor, - w: Tensor, - wci: Tensor, - wcf: Tensor, - wco: Tensor, - _ b: Tensor, - i: Tensor, - cs: Tensor, - f: Tensor, - o: Tensor, - ci: Tensor, - co: Tensor, - csGrad: Tensor, - hGrad: Tensor, - usePeephole: Bool - ) -> ( - csPrevGrad: Tensor, dicfo: Tensor, wciGrad: Tensor, wcfGrad: Tensor, - wcoGrad: Tensor - ) { - _RawTFEager.lSTMBlockCellGrad( - x, csPrev: csPrev, hPrev: hPrev, w: w, wci: wci, wcf: wcf, wco: wco, b, i: i, cs: cs, f: f, - o: o, ci: ci, co: co, csGrad: csGrad, hGrad: hGrad, usePeephole: usePeephole) - } - - /// Records the latency of producing `input_dataset` elements in a StatsAggregator. - @inlinable @inline(__always) - public static func latencyStatsDataset( - inputDataset: VariantHandle, - tag: StringTensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.latencyStatsDataset( - inputDataset: inputDataset, tag: tag, outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Computes rectified linear: `max(features, features * alpha)`. - @inlinable @inline(__always) - public static func leakyRelu( - features: Tensor, - alpha: Double = 0.2 - ) -> Tensor { - switch features.handle.backend { - case .XLA: - return _RawXLA.leakyRelu(features: features, alpha: alpha) - case .TF_EAGER: - return _RawTFEager.leakyRelu(features: features, alpha: alpha) - } - - } - - /// Computes rectified linear gradients for a LeakyRelu operation. - /// - /// - Parameters: - /// - gradients: The backpropagated gradients to the corresponding LeakyRelu operation. - /// - features: The features passed as input to the corresponding LeakyRelu operation, - /// OR the outputs of that operation (both work equivalently). - /// - /// - Output backprops: `gradients * (features > 0) + alpha * gradients * (features <= 0)`. - @inlinable @inline(__always) - public static func leakyReluGrad( - gradients: Tensor, - features: Tensor, - alpha: Double = 0.2 - ) -> Tensor { - switch commonBackend(gradients.handle.backend, features.handle.backend) { - case .XLA: - return _RawXLA.leakyReluGrad(gradients: gradients, features: features, alpha: alpha) - case .TF_EAGER: - return _RawTFEager.leakyReluGrad(gradients: gradients, features: features, alpha: alpha) - } - - } - - /// Generates labels for candidate sampling with a learned unigram distribution. - /// - /// See explanations of candidate sampling and the data formats at - /// go/candidate-sampling. - /// - /// For each batch, this op picks a single set of sampled candidate labels. - /// - /// The advantages of sampling candidates per-batch are simplicity and the - /// possibility of efficient dense matrix multiplication. The disadvantage is that - /// the sampled candidates must be chosen independently of the context and of the - /// true labels. - /// - /// - Parameter true_classes: A batch_size * num_true matrix, in which each row contains the - /// IDs of the num_true target_classes in the corresponding original label. - /// - /// - Attrs: - /// - num_true: Number of true labels per context. - /// - num_sampled: Number of candidates to randomly sample. - /// - unique: If unique is true, we sample with rejection, so that all sampled - /// candidates in a batch are unique. This requires some approximation to - /// estimate the post-rejection sampling probabilities. - /// - range_max: The sampler will sample integers from the interval [0, range_max). - /// - seed: If either seed or seed2 are set to be non-zero, the random number - /// generator is seeded by the given seed. Otherwise, it is seeded by a - /// random seed. - /// - seed2: An second seed to avoid seed collision. - /// - /// - Outputs: - /// - sampled_candidates: A vector of length num_sampled, in which each element is - /// the ID of a sampled candidate. - /// - true_expected_count: A batch_size * num_true matrix, representing - /// the number of times each candidate is expected to occur in a batch - /// of sampled candidates. If unique=true, then this is a probability. - /// - sampled_expected_count: A vector of length num_sampled, for each sampled - /// candidate representing the number of times the candidate is expected - /// to occur in a batch of sampled candidates. If unique=true, then this is a - /// probability. - @inlinable @inline(__always) - public static func learnedUnigramCandidateSampler( - trueClasses: Tensor, - numTrue: Int64, - numSampled: Int64, - unique: Bool, - rangeMax: Int64, - seed: Int64 = 0, - seed2: Int64 = 0 - ) -> ( - sampledCandidates: Tensor, trueExpectedCount: Tensor, - sampledExpectedCount: Tensor - ) { - _RawTFEager.learnedUnigramCandidateSampler( - trueClasses: trueClasses, numTrue: numTrue, numSampled: numSampled, unique: unique, - rangeMax: rangeMax, seed: seed, seed2: seed2) - } - - /// Elementwise computes the bitwise left-shift of `x` and `y`. - /// - /// If `y` is negative, or greater than or equal to the width of `x` in bits the - /// result is implementation defined. - /// - /// Example: - /// - /// ```python - /// import tensorflow as tf - /// from tensorflow.python.ops import bitwise_ops - /// import numpy as np - /// dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64] - /// - /// for dtype in dtype_list: - /// lhs = tf.constant([-1, -5, -3, -14], dtype=dtype) - /// rhs = tf.constant([5, 0, 7, 11], dtype=dtype) - /// - /// left_shift_result = bitwise_ops.left_shift(lhs, rhs) - /// - /// print(left_shift_result) - /// - /// # This will print: - /// # tf.Tensor([ -32 -5 -128 0], shape=(4,), dtype=int8) - /// # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int16) - /// # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int32) - /// # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int64) - /// - /// lhs = np.array([-2, 64, 101, 32], dtype=np.int8) - /// rhs = np.array([-1, -5, -3, -14], dtype=np.int8) - /// bitwise_ops.left_shift(lhs, rhs) - /// # - /// ``` - /// - @inlinable @inline(__always) - public static func leftShift( - _ x: Tensor, - _ y: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - let output_device = y.device - let x = Tensor(copying: x, to: .defaultTFEager) - let y = Tensor(copying: y, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.leftShift(x, y), to: output_device) - case .TF_EAGER: - return _RawTFEager.leftShift(x, y) - } - - } - - /// Returns the truth value of (x < y) element-wise. - /// - /// *NOTE*: `Less` supports broadcasting. More about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - /// - /// Example: - /// - /// ```python - /// x = tf.constant([5, 4, 6]) - /// y = tf.constant([5]) - /// tf.math.less(x, y) ==> [False, True, False] - /// - /// x = tf.constant([5, 4, 6]) - /// y = tf.constant([5, 6, 7]) - /// tf.math.less(x, y) ==> [False, True, True] - /// ``` - @inlinable @inline(__always) - public static func less( - _ x: Tensor, - _ y: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - return _RawXLA.less(x, y) - case .TF_EAGER: - return _RawTFEager.less(x, y) - } - - } - - /// Returns the truth value of (x <= y) element-wise. - /// - /// *NOTE*: `LessEqual` supports broadcasting. More about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - /// - /// Example: - /// - /// ```python - /// x = tf.constant([5, 4, 6]) - /// y = tf.constant([5]) - /// tf.math.less_equal(x, y) ==> [True, True, False] - /// - /// x = tf.constant([5, 4, 6]) - /// y = tf.constant([5, 6, 6]) - /// tf.math.less_equal(x, y) ==> [True, True, True] - /// ``` - @inlinable @inline(__always) - public static func lessEqual( - _ x: Tensor, - _ y: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - return _RawXLA.lessEqual(x, y) - case .TF_EAGER: - return _RawTFEager.lessEqual(x, y) - } - - } - - /// Computes the log of the absolute value of `Gamma(x)` element-wise. - /// - /// For positive numbers, this function computes log((input - 1)!) for every element in the tensor. - /// `lgamma(5) = log((5-1)!) = log(4!) = log(24) = 3.1780539` - /// - /// Example: - /// - /// ```python - /// x = tf.constant([0, 0.5, 1, 4.5, -4, -5.6]) - /// tf.math.lgamma(x) ==> [inf, 0.5723649, 0., 2.4537368, inf, -4.6477685] - /// ``` - @inlinable @inline(__always) - public static func lgamma( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - let output_device = x.device - let x = Tensor(copying: x, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.lgamma(x), to: output_device) - case .TF_EAGER: - return _RawTFEager.lgamma(x) - } - - } - - /// Generates values in an interval. - /// - /// A sequence of `num` evenly-spaced values are generated beginning at `start`. - /// If `num > 1`, the values in the sequence increase by `stop - start / num - 1`, - /// so that the last one is exactly `stop`. - /// - /// For example: - /// - /// ``` - /// tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0] - /// ``` - /// - /// - Parameters: - /// - start: 0-D tensor. First entry in the range. - /// - stop: 0-D tensor. Last entry in the range. - /// - num: 0-D tensor. Number of values to generate. - /// - /// - Output output: 1-D. The generated values. - @inlinable @inline(__always) - public static func linSpace< - T: FloatingPoint & TensorFlowScalar, - Tidx: TensorFlowIndex - >( - start: Tensor, - stop: Tensor, - num: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(start.handle.backend, stop.handle.backend), num.handle.backend) - { - case .XLA: - return _RawXLA.linSpace(start: start, stop: stop, num: num) - case .TF_EAGER: - return _RawTFEager.linSpace(start: start, stop: stop, num: num) - } - - } - - /// Computes the difference between two lists of numbers or strings. - /// - /// Given a list `x` and a list `y`, this operation returns a list `out` that - /// represents all values that are in `x` but not in `y`. The returned list `out` - /// is sorted in the same order that the numbers appear in `x` (duplicates are - /// preserved). This operation also returns a list `idx` that represents the - /// position of each `out` element in `x`. In other words: - /// - /// `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` - /// - /// For example, given this input: - /// - /// ``` - /// x = [1, 2, 3, 4, 5, 6] - /// y = [1, 3, 5] - /// ``` - /// - /// This operation would return: - /// - /// ``` - /// out ==> [2, 4, 6] - /// idx ==> [1, 3, 5] - /// ``` - /// - /// - Parameters: - /// - x: 1-D. Values to keep. - /// - y: 1-D. Values to remove. - /// - /// - Outputs: - /// - out: 1-D. Values present in `x` but not in `y`. - /// - idx: 1-D. Positions of `x` values preserved in `out`. - @inlinable @inline(__always) - public static func listDiff< - T: TensorFlowScalar, - OutIdx: TensorFlowIndex - >( - _ x: Tensor, - _ y: Tensor - ) -> (out: Tensor, idx: Tensor) { - _RawTFEager.listDiff(x, y) - } - - @inlinable @inline(__always) - public static func listInput( - _ a: [Tensor] - ) { - _RawTFEager.listInput(a) - } - - @inlinable @inline(__always) - public static func listOutput() -> T { - _RawTFEager.listOutput() - } - - /// Loads a 2-D (matrix) `Tensor` with name `old_tensor_name` from the checkpoint - /// - /// at `ckpt_path` and potentially reorders its rows and columns using the - /// specified remappings. - /// - /// Most users should use one of the wrapper initializers (such as - /// `tf.contrib.framework.load_and_remap_matrix_initializer`) instead of this - /// function directly. - /// - /// The remappings are 1-D tensors with the following properties: - /// - /// * `row_remapping` must have exactly `num_rows` entries. Row `i` of the output - /// matrix will be initialized from the row corresponding to index - /// `row_remapping[i]` in the old `Tensor` from the checkpoint. - /// * `col_remapping` must have either 0 entries (indicating that no column - /// reordering is needed) or `num_cols` entries. If specified, column `j` of the - /// output matrix will be initialized from the column corresponding to index - /// `col_remapping[j]` in the old `Tensor` from the checkpoint. - /// * A value of -1 in either of the remappings signifies a "missing" entry. In that - /// case, values from the `initializing_values` tensor will be used to fill that - /// missing row or column. If `row_remapping` has `r` missing entries and - /// `col_remapping` has `c` missing entries, then the following condition must be - /// true: - /// - /// `(r * num_cols) + (c * num_rows) - (r * c) == len(initializing_values)` - /// - /// The remapping tensors can be generated using the GenerateVocabRemapping op. - /// - /// As an example, with row_remapping = [1, 0, -1], col_remapping = [0, 2, -1], - /// initializing_values = [0.5, -0.5, 0.25, -0.25, 42], and w(i, j) representing - /// the value from row i, column j of the old tensor in the checkpoint, the output - /// matrix will look like the following: - /// - /// [[w(1, 0), w(1, 2), 0.5], - /// [w(0, 0), w(0, 2), -0.5], - /// [0.25, -0.25, 42]] - /// - /// - Parameters: - /// - ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`) from - /// which the old matrix `Tensor` will be loaded. - /// - old_tensor_name: Name of the 2-D `Tensor` to load from checkpoint. - /// - row_remapping: An int `Tensor` of row remappings (generally created by - /// `generate_vocab_remapping`). Even if no row remapping is needed, this must - /// still be an index-valued Tensor (e.g. [0, 1, 2, ...]), or a shifted - /// index-valued `Tensor` (e.g. [8, 9, 10, ...], for partitioned `Variables`). - /// - col_remapping: An int `Tensor` of column remappings (generally created by - /// `generate_vocab_remapping`). May be a size-0 `Tensor` if only row remapping - /// is to be done (e.g. column ordering is the same). - /// - initializing_values: A float `Tensor` containing values to fill in for cells - /// in the output matrix that are not loaded from the checkpoint. Length must be - /// exactly the same as the number of missing / new cells. - /// - /// - Attrs: - /// - num_rows: Number of rows (length of the 1st dimension) in the output matrix. - /// - num_cols: Number of columns (length of the 2nd dimension) in the output matrix. - /// - max_rows_in_memory: The maximum number of rows to load from the checkpoint at - /// once. If less than or equal to 0, the entire matrix will be loaded into - /// memory. Setting this arg trades increased disk reads for lower memory usage. - /// - /// - Output output_matrix: Output matrix containing existing values loaded from the - /// checkpoint, and with any missing values filled in from initializing_values. - @inlinable @inline(__always) - public static func loadAndRemapMatrix( - ckptPath: StringTensor, - oldTensorName: StringTensor, - rowRemapping: Tensor, - colRemapping: Tensor, - initializingValues: Tensor, - numRows: Int64, - numCols: Int64, - maxRowsInMemory: Int64 = -1 - ) -> Tensor { - switch commonBackend( - commonBackend(rowRemapping.handle.backend, colRemapping.handle.backend), - initializingValues.handle.backend) - { - case .XLA: - let output_device = initializingValues.device - let rowRemapping = Tensor(copying: rowRemapping, to: .defaultTFEager) - let colRemapping = Tensor(copying: colRemapping, to: .defaultTFEager) - let initializingValues = Tensor(copying: initializingValues, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.loadAndRemapMatrix( - ckptPath: ckptPath, oldTensorName: oldTensorName, rowRemapping: rowRemapping, - colRemapping: colRemapping, initializingValues: initializingValues, numRows: numRows, - numCols: numCols, maxRowsInMemory: maxRowsInMemory), to: output_device) - case .TF_EAGER: - return _RawTFEager.loadAndRemapMatrix( - ckptPath: ckptPath, oldTensorName: oldTensorName, rowRemapping: rowRemapping, - colRemapping: colRemapping, initializingValues: initializingValues, numRows: numRows, - numCols: numCols, maxRowsInMemory: maxRowsInMemory) - } - - } - - /// Load ADAM embedding parameters. - /// - /// An op that loads optimization parameters into HBM for embedding. Must be - /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct - /// embedding table configuration. For example, this op is used to install - /// parameters that are loaded from a checkpoint before a training loop is - /// executed. - /// - /// - Parameters: - /// - parameters: Value of parameters used in the ADAM optimization algorithm. - /// - momenta: Value of momenta used in the ADAM optimization algorithm. - /// - velocities: Value of velocities used in the ADAM optimization algorithm. - @inlinable @inline(__always) - public static func loadTPUEmbeddingADAMParameters( - parameters: Tensor, - momenta: Tensor, - velocities: Tensor, - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) { - _RawTFEager.loadTPUEmbeddingADAMParameters( - parameters: parameters, momenta: momenta, velocities: velocities, tableId: tableId, - tableName: tableName, numShards: numShards, shardId: shardId, config: config) - } - - /// Load ADAM embedding parameters with debug support. - /// - /// An op that loads optimization parameters into HBM for embedding. Must be - /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct - /// embedding table configuration. For example, this op is used to install - /// parameters that are loaded from a checkpoint before a training loop is - /// executed. - /// - /// - Parameters: - /// - parameters: Value of parameters used in the ADAM optimization algorithm. - /// - momenta: Value of momenta used in the ADAM optimization algorithm. - /// - velocities: Value of velocities used in the ADAM optimization algorithm. - /// - gradient_accumulators: Value of gradient_accumulators used in the ADAM optimization algorithm. - @inlinable @inline(__always) - public static func loadTPUEmbeddingADAMParametersGradAccumDebug( - parameters: Tensor, - momenta: Tensor, - velocities: Tensor, - gradientAccumulators: Tensor, - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) { - _RawTFEager.loadTPUEmbeddingADAMParametersGradAccumDebug( - parameters: parameters, momenta: momenta, velocities: velocities, - gradientAccumulators: gradientAccumulators, tableId: tableId, tableName: tableName, - numShards: numShards, shardId: shardId, config: config) - } - - /// Load Adadelta embedding parameters. - /// - /// An op that loads optimization parameters into HBM for embedding. Must be - /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct - /// embedding table configuration. For example, this op is used to install - /// parameters that are loaded from a checkpoint before a training loop is - /// executed. - /// - /// - Parameters: - /// - parameters: Value of parameters used in the Adadelta optimization algorithm. - /// - accumulators: Value of accumulators used in the Adadelta optimization algorithm. - /// - updates: Value of updates used in the Adadelta optimization algorithm. - @inlinable @inline(__always) - public static func loadTPUEmbeddingAdadeltaParameters( - parameters: Tensor, - accumulators: Tensor, - updates: Tensor, - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) { - _RawTFEager.loadTPUEmbeddingAdadeltaParameters( - parameters: parameters, accumulators: accumulators, updates: updates, tableId: tableId, - tableName: tableName, numShards: numShards, shardId: shardId, config: config) - } - - /// Load Adadelta parameters with debug support. - /// - /// An op that loads optimization parameters into HBM for embedding. Must be - /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct - /// embedding table configuration. For example, this op is used to install - /// parameters that are loaded from a checkpoint before a training loop is - /// executed. - /// - /// - Parameters: - /// - parameters: Value of parameters used in the Adadelta optimization algorithm. - /// - accumulators: Value of accumulators used in the Adadelta optimization algorithm. - /// - updates: Value of updates used in the Adadelta optimization algorithm. - /// - gradient_accumulators: Value of gradient_accumulators used in the Adadelta optimization algorithm. - @inlinable @inline(__always) - public static func loadTPUEmbeddingAdadeltaParametersGradAccumDebug( - parameters: Tensor, - accumulators: Tensor, - updates: Tensor, - gradientAccumulators: Tensor, - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) { - _RawTFEager.loadTPUEmbeddingAdadeltaParametersGradAccumDebug( - parameters: parameters, accumulators: accumulators, updates: updates, - gradientAccumulators: gradientAccumulators, tableId: tableId, tableName: tableName, - numShards: numShards, shardId: shardId, config: config) - } - - /// Load Adagrad embedding parameters. - /// - /// An op that loads optimization parameters into HBM for embedding. Must be - /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct - /// embedding table configuration. For example, this op is used to install - /// parameters that are loaded from a checkpoint before a training loop is - /// executed. - /// - /// - Parameters: - /// - parameters: Value of parameters used in the Adagrad optimization algorithm. - /// - accumulators: Value of accumulators used in the Adagrad optimization algorithm. - @inlinable @inline(__always) - public static func loadTPUEmbeddingAdagradParameters( - parameters: Tensor, - accumulators: Tensor, - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) { - _RawTFEager.loadTPUEmbeddingAdagradParameters( - parameters: parameters, accumulators: accumulators, tableId: tableId, tableName: tableName, - numShards: numShards, shardId: shardId, config: config) - } - - /// Load Adagrad embedding parameters with debug support. - /// - /// An op that loads optimization parameters into HBM for embedding. Must be - /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct - /// embedding table configuration. For example, this op is used to install - /// parameters that are loaded from a checkpoint before a training loop is - /// executed. - /// - /// - Parameters: - /// - parameters: Value of parameters used in the Adagrad optimization algorithm. - /// - accumulators: Value of accumulators used in the Adagrad optimization algorithm. - /// - gradient_accumulators: Value of gradient_accumulators used in the Adagrad optimization algorithm. - @inlinable @inline(__always) - public static func loadTPUEmbeddingAdagradParametersGradAccumDebug( - parameters: Tensor, - accumulators: Tensor, - gradientAccumulators: Tensor, - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) { - _RawTFEager.loadTPUEmbeddingAdagradParametersGradAccumDebug( - parameters: parameters, accumulators: accumulators, - gradientAccumulators: gradientAccumulators, tableId: tableId, tableName: tableName, - numShards: numShards, shardId: shardId, config: config) - } - - /// Load centered RMSProp embedding parameters. - /// - /// An op that loads optimization parameters into HBM for embedding. Must be - /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct - /// embedding table configuration. For example, this op is used to install - /// parameters that are loaded from a checkpoint before a training loop is - /// executed. - /// - /// - Parameters: - /// - parameters: Value of parameters used in the centered RMSProp optimization algorithm. - /// - ms: Value of ms used in the centered RMSProp optimization algorithm. - /// - mom: Value of mom used in the centered RMSProp optimization algorithm. - /// - mg: Value of mg used in the centered RMSProp optimization algorithm. - @inlinable @inline(__always) - public static func loadTPUEmbeddingCenteredRMSPropParameters( - parameters: Tensor, - ms: Tensor, - mom: Tensor, - mg: Tensor, - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) { - _RawTFEager.loadTPUEmbeddingCenteredRMSPropParameters( - parameters: parameters, ms: ms, mom: mom, mg: mg, tableId: tableId, tableName: tableName, - numShards: numShards, shardId: shardId, config: config) - } - - /// Load FTRL embedding parameters. - /// - /// An op that loads optimization parameters into HBM for embedding. Must be - /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct - /// embedding table configuration. For example, this op is used to install - /// parameters that are loaded from a checkpoint before a training loop is - /// executed. - /// - /// - Parameters: - /// - parameters: Value of parameters used in the FTRL optimization algorithm. - /// - accumulators: Value of accumulators used in the FTRL optimization algorithm. - /// - linears: Value of linears used in the FTRL optimization algorithm. - @inlinable @inline(__always) - public static func loadTPUEmbeddingFTRLParameters( - parameters: Tensor, - accumulators: Tensor, - linears: Tensor, - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) { - _RawTFEager.loadTPUEmbeddingFTRLParameters( - parameters: parameters, accumulators: accumulators, linears: linears, tableId: tableId, - tableName: tableName, numShards: numShards, shardId: shardId, config: config) - } - - /// Load FTRL embedding parameters with debug support. - /// - /// An op that loads optimization parameters into HBM for embedding. Must be - /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct - /// embedding table configuration. For example, this op is used to install - /// parameters that are loaded from a checkpoint before a training loop is - /// executed. - /// - /// - Parameters: - /// - parameters: Value of parameters used in the FTRL optimization algorithm. - /// - accumulators: Value of accumulators used in the FTRL optimization algorithm. - /// - linears: Value of linears used in the FTRL optimization algorithm. - /// - gradient_accumulators: Value of gradient_accumulators used in the FTRL optimization algorithm. - @inlinable @inline(__always) - public static func loadTPUEmbeddingFTRLParametersGradAccumDebug( - parameters: Tensor, - accumulators: Tensor, - linears: Tensor, - gradientAccumulators: Tensor, - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) { - _RawTFEager.loadTPUEmbeddingFTRLParametersGradAccumDebug( - parameters: parameters, accumulators: accumulators, linears: linears, - gradientAccumulators: gradientAccumulators, tableId: tableId, tableName: tableName, - numShards: numShards, shardId: shardId, config: config) - } - - /// Load MDL Adagrad Light embedding parameters. - /// - /// An op that loads optimization parameters into HBM for embedding. Must be - /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct - /// embedding table configuration. For example, this op is used to install - /// parameters that are loaded from a checkpoint before a training loop is - /// executed. - /// - /// - Parameters: - /// - parameters: Value of parameters used in the MDL Adagrad Light optimization algorithm. - /// - accumulators: Value of accumulators used in the MDL Adagrad Light optimization algorithm. - /// - weights: Value of weights used in the MDL Adagrad Light optimization algorithm. - /// - benefits: Value of benefits used in the MDL Adagrad Light optimization algorithm. - @inlinable @inline(__always) - public static func loadTPUEmbeddingMDLAdagradLightParameters( - parameters: Tensor, - accumulators: Tensor, - weights: Tensor, - benefits: Tensor, - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) { - _RawTFEager.loadTPUEmbeddingMDLAdagradLightParameters( - parameters: parameters, accumulators: accumulators, weights: weights, benefits: benefits, - tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, - config: config) - } - - /// Load Momentum embedding parameters. - /// - /// An op that loads optimization parameters into HBM for embedding. Must be - /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct - /// embedding table configuration. For example, this op is used to install - /// parameters that are loaded from a checkpoint before a training loop is - /// executed. - /// - /// - Parameters: - /// - parameters: Value of parameters used in the Momentum optimization algorithm. - /// - momenta: Value of momenta used in the Momentum optimization algorithm. - @inlinable @inline(__always) - public static func loadTPUEmbeddingMomentumParameters( - parameters: Tensor, - momenta: Tensor, - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) { - _RawTFEager.loadTPUEmbeddingMomentumParameters( - parameters: parameters, momenta: momenta, tableId: tableId, tableName: tableName, - numShards: numShards, shardId: shardId, config: config) - } - - /// Load Momentum embedding parameters with debug support. - /// - /// An op that loads optimization parameters into HBM for embedding. Must be - /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct - /// embedding table configuration. For example, this op is used to install - /// parameters that are loaded from a checkpoint before a training loop is - /// executed. - /// - /// - Parameters: - /// - parameters: Value of parameters used in the Momentum optimization algorithm. - /// - momenta: Value of momenta used in the Momentum optimization algorithm. - /// - gradient_accumulators: Value of gradient_accumulators used in the Momentum optimization algorithm. - @inlinable @inline(__always) - public static func loadTPUEmbeddingMomentumParametersGradAccumDebug( - parameters: Tensor, - momenta: Tensor, - gradientAccumulators: Tensor, - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) { - _RawTFEager.loadTPUEmbeddingMomentumParametersGradAccumDebug( - parameters: parameters, momenta: momenta, gradientAccumulators: gradientAccumulators, - tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, - config: config) - } - - /// Load proximal Adagrad embedding parameters. - /// - /// An op that loads optimization parameters into HBM for embedding. Must be - /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct - /// embedding table configuration. For example, this op is used to install - /// parameters that are loaded from a checkpoint before a training loop is - /// executed. - /// - /// - Parameters: - /// - parameters: Value of parameters used in the proximal Adagrad optimization algorithm. - /// - accumulators: Value of accumulators used in the proximal Adagrad optimization algorithm. - @inlinable @inline(__always) - public static func loadTPUEmbeddingProximalAdagradParameters( - parameters: Tensor, - accumulators: Tensor, - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) { - _RawTFEager.loadTPUEmbeddingProximalAdagradParameters( - parameters: parameters, accumulators: accumulators, tableId: tableId, tableName: tableName, - numShards: numShards, shardId: shardId, config: config) - } - - /// Load proximal Adagrad embedding parameters with debug support. - /// - /// An op that loads optimization parameters into HBM for embedding. Must be - /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct - /// embedding table configuration. For example, this op is used to install - /// parameters that are loaded from a checkpoint before a training loop is - /// executed. - /// - /// - Parameters: - /// - parameters: Value of parameters used in the proximal Adagrad optimization algorithm. - /// - accumulators: Value of accumulators used in the proximal Adagrad optimization algorithm. - /// - gradient_accumulators: Value of gradient_accumulators used in the proximal Adagrad optimization algorithm. - @inlinable @inline(__always) - public static func loadTPUEmbeddingProximalAdagradParametersGradAccumDebug( - parameters: Tensor, - accumulators: Tensor, - gradientAccumulators: Tensor, - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) { - _RawTFEager.loadTPUEmbeddingProximalAdagradParametersGradAccumDebug( - parameters: parameters, accumulators: accumulators, - gradientAccumulators: gradientAccumulators, tableId: tableId, tableName: tableName, - numShards: numShards, shardId: shardId, config: config) - } - - /// Load RMSProp embedding parameters. - /// - /// An op that loads optimization parameters into HBM for embedding. Must be - /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct - /// embedding table configuration. For example, this op is used to install - /// parameters that are loaded from a checkpoint before a training loop is - /// executed. - /// - /// - Parameters: - /// - parameters: Value of parameters used in the RMSProp optimization algorithm. - /// - ms: Value of ms used in the RMSProp optimization algorithm. - /// - mom: Value of mom used in the RMSProp optimization algorithm. - @inlinable @inline(__always) - public static func loadTPUEmbeddingRMSPropParameters( - parameters: Tensor, - ms: Tensor, - mom: Tensor, - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) { - _RawTFEager.loadTPUEmbeddingRMSPropParameters( - parameters: parameters, ms: ms, mom: mom, tableId: tableId, tableName: tableName, - numShards: numShards, shardId: shardId, config: config) - } - - /// Load RMSProp embedding parameters with debug support. - /// - /// An op that loads optimization parameters into HBM for embedding. Must be - /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct - /// embedding table configuration. For example, this op is used to install - /// parameters that are loaded from a checkpoint before a training loop is - /// executed. - /// - /// - Parameters: - /// - parameters: Value of parameters used in the RMSProp optimization algorithm. - /// - ms: Value of ms used in the RMSProp optimization algorithm. - /// - mom: Value of mom used in the RMSProp optimization algorithm. - /// - gradient_accumulators: Value of gradient_accumulators used in the RMSProp optimization algorithm. - @inlinable @inline(__always) - public static func loadTPUEmbeddingRMSPropParametersGradAccumDebug( - parameters: Tensor, - ms: Tensor, - mom: Tensor, - gradientAccumulators: Tensor, - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) { - _RawTFEager.loadTPUEmbeddingRMSPropParametersGradAccumDebug( - parameters: parameters, ms: ms, mom: mom, gradientAccumulators: gradientAccumulators, - tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, - config: config) - } - - /// Load SGD embedding parameters. - /// - /// An op that loads optimization parameters into HBM for embedding. Must be - /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct - /// embedding table configuration. For example, this op is used to install - /// parameters that are loaded from a checkpoint before a training loop is - /// executed. - /// - /// - Parameter parameters: Value of parameters used in the stochastic gradient descent optimization algorithm. - @inlinable @inline(__always) - public static func loadTPUEmbeddingStochasticGradientDescentParameters( - parameters: Tensor, - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) { - _RawTFEager.loadTPUEmbeddingStochasticGradientDescentParameters( - parameters: parameters, tableId: tableId, tableName: tableName, numShards: numShards, - shardId: shardId, config: config) - } - - /// Computes natural logarithm of x element-wise. - /// - /// I.e., \\(y = \log_e x\\). - /// - /// Example: - /// - /// ```python - /// x = tf.constant([0, 0.5, 1, 5]) - /// tf.math.log(x) ==> [-inf, -0.6931472, 0. , 1.609438] - /// ``` - @inlinable @inline(__always) - public static func log( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.log(x) - case .TF_EAGER: - return _RawTFEager.log(x) - } - - } - - /// Computes natural logarithm of (1 + x) element-wise. - /// - /// I.e., \\(y = \log_e (1 + x)\\). - /// - /// Example: - /// - /// ```python - /// x = tf.constant([0, 0.5, 1, 5]) - /// tf.math.log1p(x) ==> [0., 0.4054651, 0.6931472, 1.7917595] - /// ``` - @inlinable @inline(__always) - public static func log1p( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.log1p(x) - case .TF_EAGER: - return _RawTFEager.log1p(x) - } - - } - - /// Computes the sign and the log of the absolute value of the determinant of - /// - /// one or more square matrices. - /// - /// The input is a tensor of shape `[N, M, M]` whose inner-most 2 dimensions - /// form square matrices. The outputs are two tensors containing the signs and - /// absolute values of the log determinants for all N input submatrices - /// `[..., :, :]` such that the determinant = sign*exp(log_abs_determinant). - /// The log_abs_determinant is computed as det(P)*sum(log(diag(LU))) where LU - /// is the LU decomposition of the input and P is the corresponding - /// permutation matrix. - /// - /// - Parameter input: Shape is `[N, M, M]`. - /// - /// - Outputs: - /// - sign: The signs of the log determinants of the inputs. Shape is `[N]`. - /// - log_abs_determinant: The logs of the absolute values of the determinants - /// of the N input matrices. Shape is `[N]`. - @inlinable @inline(__always) - public static func logMatrixDeterminant( - _ input: Tensor - ) -> (sign: Tensor, logAbsDeterminant: Tensor) { - _RawTFEager.logMatrixDeterminant(input) - } - - /// Computes log softmax activations. - /// - /// For each batch `i` and class `j` we have - /// - /// logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i]))) - /// - /// - Parameter logits: 2-D with shape `[batch_size, num_classes]`. - /// - /// - Output logsoftmax: Same shape as `logits`. - @inlinable @inline(__always) - public static func logSoftmax( - logits: Tensor - ) -> Tensor { - switch logits.handle.backend { - case .XLA: - return _RawXLA.logSoftmax(logits: logits) - case .TF_EAGER: - return _RawTFEager.logSoftmax(logits: logits) - } - - } - - /// Generates labels for candidate sampling with a log-uniform distribution. - /// - /// See explanations of candidate sampling and the data formats at - /// go/candidate-sampling. - /// - /// For each batch, this op picks a single set of sampled candidate labels. - /// - /// The advantages of sampling candidates per-batch are simplicity and the - /// possibility of efficient dense matrix multiplication. The disadvantage is that - /// the sampled candidates must be chosen independently of the context and of the - /// true labels. - /// - /// - Parameter true_classes: A batch_size * num_true matrix, in which each row contains the - /// IDs of the num_true target_classes in the corresponding original label. - /// - /// - Attrs: - /// - num_true: Number of true labels per context. - /// - num_sampled: Number of candidates to randomly sample. - /// - unique: If unique is true, we sample with rejection, so that all sampled - /// candidates in a batch are unique. This requires some approximation to - /// estimate the post-rejection sampling probabilities. - /// - range_max: The sampler will sample integers from the interval [0, range_max). - /// - seed: If either seed or seed2 are set to be non-zero, the random number - /// generator is seeded by the given seed. Otherwise, it is seeded by a - /// random seed. - /// - seed2: An second seed to avoid seed collision. - /// - /// - Outputs: - /// - sampled_candidates: A vector of length num_sampled, in which each element is - /// the ID of a sampled candidate. - /// - true_expected_count: A batch_size * num_true matrix, representing - /// the number of times each candidate is expected to occur in a batch - /// of sampled candidates. If unique=true, then this is a probability. - /// - sampled_expected_count: A vector of length num_sampled, for each sampled - /// candidate representing the number of times the candidate is expected - /// to occur in a batch of sampled candidates. If unique=true, then this is a - /// probability. - @inlinable @inline(__always) - public static func logUniformCandidateSampler( - trueClasses: Tensor, - numTrue: Int64, - numSampled: Int64, - unique: Bool, - rangeMax: Int64, - seed: Int64 = 0, - seed2: Int64 = 0 - ) -> ( - sampledCandidates: Tensor, trueExpectedCount: Tensor, - sampledExpectedCount: Tensor - ) { - _RawTFEager.logUniformCandidateSampler( - trueClasses: trueClasses, numTrue: numTrue, numSampled: numSampled, unique: unique, - rangeMax: rangeMax, seed: seed, seed2: seed2) - } - - /// Returns the truth value of x AND y element-wise. - /// - /// *NOTE*: `LogicalAnd` supports broadcasting. More about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - @inlinable @inline(__always) - public static func logicalAnd( - _ x: Tensor, - _ y: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - return _RawXLA.logicalAnd(x, y) - case .TF_EAGER: - return _RawTFEager.logicalAnd(x, y) - } - - } - - /// Returns the truth value of `NOT x` element-wise. - /// - /// - Parameter x: A `Tensor` of type `bool`. - /// - /// - Output y: A `Tensor` of type `bool` with the same shape as `x`. The logical negation of `x`. - @inlinable @inline(__always) - public static func logicalNot( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.logicalNot(x) - case .TF_EAGER: - return _RawTFEager.logicalNot(x) - } - - } - - /// Returns the truth value of x OR y element-wise. - /// - /// *NOTE*: `LogicalOr` supports broadcasting. More about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - @inlinable @inline(__always) - public static func logicalOr( - _ x: Tensor, - _ y: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - return _RawXLA.logicalOr(x, y) - case .TF_EAGER: - return _RawTFEager.logicalOr(x, y) - } - - } - - /// Outputs all keys and values in the table. - /// - /// - Parameter table_handle: Handle to the table. - /// - /// - Outputs: - /// - keys: Vector of all keys present in the table. - /// - values: Tensor of all values in the table. Indexed in parallel with `keys`. - @inlinable @inline(__always) - public static func lookupTableExportV2< - Tkeys: TensorFlowScalar, - Tvalues: TensorFlowScalar - >( - tableHandle: ResourceHandle - ) -> (keys: Tensor, values: Tensor) { - _RawTFEager.lookupTableExportV2(tableHandle: tableHandle) - } - - /// Looks up keys in a table, outputs the corresponding values. - /// - /// The tensor `keys` must of the same type as the keys of the table. - /// The output `values` is of the type of the table values. - /// - /// The scalar `default_value` is the value output for keys not present in the - /// table. It must also be of the same type as the table values. - /// - /// - Parameters: - /// - table_handle: Handle to the table. - /// - keys: Any shape. Keys to look up. - /// - /// - Output values: Same shape as `keys`. Values found in the table, or `default_values` - /// for missing keys. - @inlinable @inline(__always) - public static func lookupTableFindV2< - Tin: TensorFlowScalar, - Tout: TensorFlowScalar - >( - tableHandle: ResourceHandle, - keys: Tensor, - defaultValue: Tensor - ) -> Tensor { - switch commonBackend(keys.handle.backend, defaultValue.handle.backend) { - case .XLA: - let output_device = defaultValue.device - let keys = Tensor(copying: keys, to: .defaultTFEager) - let defaultValue = Tensor(copying: defaultValue, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.lookupTableFindV2( - tableHandle: tableHandle, keys: keys, defaultValue: defaultValue), to: output_device) - case .TF_EAGER: - return _RawTFEager.lookupTableFindV2( - tableHandle: tableHandle, keys: keys, defaultValue: defaultValue) - } - - } - - /// Replaces the contents of the table with the specified keys and values. - /// - /// The tensor `keys` must be of the same type as the keys of the table. - /// The tensor `values` must be of the type of the table values. - /// - /// - Parameters: - /// - table_handle: Handle to the table. - /// - keys: Any shape. Keys to look up. - /// - values: Values to associate with keys. - @inlinable @inline(__always) - public static func lookupTableImportV2< - Tin: TensorFlowScalar, - Tout: TensorFlowScalar - >( - tableHandle: ResourceHandle, - keys: Tensor, - _ values: Tensor - ) { - _RawTFEager.lookupTableImportV2(tableHandle: tableHandle, keys: keys, values) - } - - /// Updates the table to associates keys with values. - /// - /// The tensor `keys` must be of the same type as the keys of the table. - /// The tensor `values` must be of the type of the table values. - /// - /// - Parameters: - /// - table_handle: Handle to the table. - /// - keys: Any shape. Keys to look up. - /// - values: Values to associate with keys. - @inlinable @inline(__always) - public static func lookupTableInsertV2< - Tin: TensorFlowScalar, - Tout: TensorFlowScalar - >( - tableHandle: ResourceHandle, - keys: Tensor, - _ values: Tensor - ) { - _RawTFEager.lookupTableInsertV2(tableHandle: tableHandle, keys: keys, values) - } - - /// Removes keys and its associated values from a table. - /// - /// The tensor `keys` must of the same type as the keys of the table. Keys not - /// already in the table are silently ignored. - /// - /// - Parameters: - /// - table_handle: Handle to the table. - /// - keys: Any shape. Keys of the elements to remove. - @inlinable @inline(__always) - public static func lookupTableRemoveV2( - tableHandle: ResourceHandle, - keys: Tensor - ) { - _RawTFEager.lookupTableRemoveV2(tableHandle: tableHandle, keys: keys) - } - - /// Computes the number of elements in the given table. - /// - /// - Parameter table_handle: Handle to the table. - /// - /// - Output size: Scalar that contains number of elements in the table. - @inlinable @inline(__always) - public static func lookupTableSizeV2( - tableHandle: ResourceHandle - ) -> Tensor { - _RawTFEager.lookupTableSizeV2(tableHandle: tableHandle) - } - - /// Forwards the input to the output. - /// - /// This operator represents the loop termination condition used by the - /// "pivot" switches of a loop. - /// - /// - Parameter input: A boolean scalar, representing the branch predicate of the Switch op. - /// - /// - Output output: The same tensor as `input`. - @inlinable @inline(__always) - public static func loopCond( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.loopCond(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.loopCond(input) - } - - } - - /// Applies lower_bound(sorted_search_values, values) along each row. - /// - /// Each set of rows with the same index in (sorted_inputs, values) is treated - /// independently. The resulting row is the equivalent of calling - /// `np.searchsorted(sorted_inputs, values, side='left')`. - /// - /// The result is not a global index to the entire - /// `Tensor`, but rather just the index in the last dimension. - /// - /// A 2-D example: - /// sorted_sequence = [[0, 3, 9, 9, 10], - /// [1, 2, 3, 4, 5]] - /// values = [[2, 4, 9], - /// [0, 2, 6]] - /// - /// result = LowerBound(sorted_sequence, values) - /// - /// result == [[1, 2, 2], - /// [0, 1, 5]] - /// - /// - Parameters: - /// - sorted_inputs: 2-D Tensor where each row is ordered. - /// - values: 2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains - /// the values that will be searched for in `sorted_search_values`. - /// - /// - Output output: A `Tensor` with the same shape as `values`. It contains the first scalar index - /// into the last dimension where values can be inserted without changing the - /// ordered property. - @inlinable @inline(__always) - public static func lowerBound< - T: TensorFlowScalar, - OutType: TensorFlowIndex - >( - sortedInputs: Tensor, - _ values: Tensor - ) -> Tensor { - switch commonBackend(sortedInputs.handle.backend, values.handle.backend) { - case .XLA: - let output_device = values.device - let sortedInputs = Tensor(copying: sortedInputs, to: .defaultTFEager) - let values = Tensor(copying: values, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.lowerBound(sortedInputs: sortedInputs, values), to: output_device) - case .TF_EAGER: - return _RawTFEager.lowerBound(sortedInputs: sortedInputs, values) - } - - } - - /// Computes the LU decomposition of one or more square matrices. - /// - /// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions - /// form square matrices. - /// - /// The input has to be invertible. - /// - /// The output consists of two tensors LU and P containing the LU decomposition - /// of all input submatrices `[..., :, :]`. LU encodes the lower triangular and - /// upper triangular factors. - /// - /// For each input submatrix of shape `[M, M]`, L is a lower triangular matrix of - /// shape `[M, M]` with unit diagonal whose entries correspond to the strictly lower - /// triangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose - /// entries correspond to the upper triangular part, including the diagonal, of LU. - /// - /// P represents a permutation matrix encoded as a list of indices each between `0` - /// and `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to - /// P, then the L, U and P satisfies P_mat * input = L * U. - /// - /// - Parameter input: A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form matrices of - /// size `[M, M]`. - /// - /// - Outputs: - /// - lu: A tensor of shape `[..., M, M]` whose strictly lower triangular part denotes the - /// lower triangular factor `L` with unit diagonal, and whose upper triangular part - /// denotes the upper triangular factor `U`. - /// - p: Permutation of the rows encoded as a list of indices in `0..M-1`. Shape is - /// `[..., M]`. - /// @compatibility(scipy) - /// Similar to `scipy.linalg.lu`, except the triangular factors `L` and `U` are - /// packed into a single tensor, the permutation is applied to `input` instead of - /// the right hand side and the permutation `P` is returned as a list of indices - /// instead of a permutation matrix. - /// @end_compatibility - @inlinable @inline(__always) - public static func lu< - T: FloatingPoint & TensorFlowScalar, - OutputIdxType: TensorFlowIndex - >( - _ input: Tensor - ) -> (lu: Tensor, p: Tensor) { - _RawTFEager.lu(input) - } - - /// Makes a new iterator from the given `dataset` and stores it in `iterator`. - /// - /// This operation may be executed multiple times. Each execution will reset the - /// iterator in `iterator` to the first element of `dataset`. - @inlinable @inline(__always) - public static func makeIterator( - dataset: VariantHandle, - iterator: ResourceHandle - ) { - _RawTFEager.makeIterator(dataset: dataset, iterator: iterator) - } - - /// Creates a dataset that fuses mapping with batching. - /// - /// Creates a dataset that applies `f` to the outputs of `input_dataset` and then - /// batches `batch_size` of them. - /// - /// Unlike a "MapDataset", which applies `f` sequentially, this dataset invokes up - /// to `batch_size * num_parallel_batches` copies of `f` in parallel. - /// - /// - Parameters: - /// - input_dataset: A variant tensor representing the input dataset. - /// - other_arguments: A list of tensors, typically values that were captured when building a closure - /// for `f`. - /// - batch_size: A scalar representing the number of elements to accumulate in a - /// batch. It determines the number of concurrent invocations of `f` that process - /// elements from `input_dataset` in parallel. - /// - num_parallel_calls: A scalar representing the maximum number of parallel invocations of the `map_fn` - /// function. Applying the `map_fn` on consecutive input elements in parallel has - /// the potential to improve input pipeline throughput. - /// - drop_remainder: A scalar representing whether the last batch should be dropped in case its size - /// is smaller than desired. - /// - /// - Attr f: A function to apply to the outputs of `input_dataset`. - @inlinable @inline(__always) - public static func mapAndBatchDataset< - FIn: TensorGroup, - FOut: TensorGroup, - Targuments: TensorArrayProtocol - >( - inputDataset: VariantHandle, - otherArguments: Targuments, - batchSize: Tensor, - numParallelCalls: Tensor, - dropRemainder: Tensor, - f: (FIn) -> FOut, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?], - preserveCardinality: Bool = false - ) -> VariantHandle { - _RawTFEager.mapAndBatchDataset( - inputDataset: inputDataset, otherArguments: otherArguments, batchSize: batchSize, - numParallelCalls: numParallelCalls, dropRemainder: dropRemainder, f: f, - outputTypes: outputTypes, outputShapes: outputShapes, - preserveCardinality: preserveCardinality) - } - - /// Op removes all elements in the underlying container. - @inlinable @inline(__always) - public static func mapClear( - capacity: Int64 = 0, - memoryLimit: Int64 = 0, - dtypes: [TensorDataType], - container: String, - sharedName: String - ) { - _RawTFEager.mapClear( - capacity: capacity, memoryLimit: memoryLimit, dtypes: dtypes, container: container, - sharedName: sharedName) - } - - /// Creates a dataset that applies `f` to the outputs of `input_dataset`. - @inlinable @inline(__always) - public static func mapDataset< - FIn: TensorGroup, - FOut: TensorGroup, - Targuments: TensorArrayProtocol - >( - inputDataset: VariantHandle, - otherArguments: Targuments, - f: (FIn) -> FOut, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?], - useInterOpParallelism: Bool = true, - preserveCardinality: Bool = false - ) -> VariantHandle { - _RawTFEager.mapDataset( - inputDataset: inputDataset, otherArguments: otherArguments, f: f, outputTypes: outputTypes, - outputShapes: outputShapes, useInterOpParallelism: useInterOpParallelism, - preserveCardinality: preserveCardinality) - } - - /// Maps a function on the list of tensors unpacked from arguments on dimension 0. - /// The function given by `f` is assumed to be stateless, and is executed - /// concurrently on all the slices; up to batch_size (i.e. the size of the 0th - /// dimension of each argument) functions will be scheduled at once. - /// - /// The `max_intra_op_parallelism` attr, which defaults to 1, can be used to - /// limit the intra op parallelism. To limit inter-op parallelism, a user can - /// set a private threadpool on the dataset using `tf.data.Options`'s - /// `ThreadingOptions`. - /// - /// Note that this op is not exposed to users directly, but is invoked in tf.data - /// rewrites. - /// - /// - Parameters: - /// - arguments: A list of tensors whose types are `Targuments`, corresponding to the inputs - /// the function should be mapped over. - /// - captured_inputs: A list of tensors whose types are `Tcaptured`, corresponding to the captured - /// inputs of the defun. - /// - /// - Attrs: - /// - Targuments: A list of types. - /// - Tcaptured: A list of types. - /// - output_types: A list of types. - /// - output_shapes: A list of shapes. - /// - /// - Output output: A list of output tensors whose types are `output_types` and whose dimensions - /// 0 are the same as the dimensions 0 of the tensors in `arguments`, and whose - /// remaining dimensions correspond to those in `output_shapes`. - @inlinable @inline(__always) - public static func mapDefun< - Targuments: TensorArrayProtocol, - Tcaptured: TensorArrayProtocol, - OutputTypes: TensorGroup, - FIn: TensorGroup, - FOut: TensorGroup - >( - arguments: Targuments, - capturedInputs: Tcaptured, - outputShapes: [TensorShape?], - f: (FIn) -> FOut, - maxIntraOpParallelism: Int64 = 1 - ) -> OutputTypes { - _RawTFEager.mapDefun( - arguments: arguments, capturedInputs: capturedInputs, outputShapes: outputShapes, f: f, - maxIntraOpParallelism: maxIntraOpParallelism) - } - - /// Op returns the number of incomplete elements in the underlying container. - @inlinable @inline(__always) - public static func mapIncompleteSize( - capacity: Int64 = 0, - memoryLimit: Int64 = 0, - dtypes: [TensorDataType], - container: String, - sharedName: String - ) -> Tensor { - _RawTFEager.mapIncompleteSize( - capacity: capacity, memoryLimit: memoryLimit, dtypes: dtypes, container: container, - sharedName: sharedName) - } - - /// Op peeks at the values at the specified key. If the - /// - /// underlying container does not contain this key - /// this op will block until it does. - @inlinable @inline(__always) - public static func mapPeek( - key: Tensor, - indices: Tensor, - capacity: Int64 = 0, - memoryLimit: Int64 = 0, - container: String, - sharedName: String - ) -> Dtypes { - _RawTFEager.mapPeek( - key: key, indices: indices, capacity: capacity, memoryLimit: memoryLimit, - container: container, sharedName: sharedName) + } + + @inlinable @inline(__always) + public static func attr( + _ a: Int64 + ) { + _RawTFEager.attr(a) + } + + @inlinable @inline(__always) + public static func attrBool( + _ a: Bool + ) { + _RawTFEager.attrBool(a) + } + + @inlinable @inline(__always) + public static func attrBoolList( + _ a: [Bool] + ) { + _RawTFEager.attrBoolList(a) + } + + @inlinable @inline(__always) + public static func attrDefault( + _ a: String = "banana" + ) { + _RawTFEager.attrDefault(a) + } + + @inlinable @inline(__always) + public static func attrEmptyListDefault( + _ a: [Double] + ) { + _RawTFEager.attrEmptyListDefault(a) + } + + @inlinable @inline(__always) + public static func attrEnum( + _ a: A + ) { + _RawTFEager.attrEnum(a) + } + + @inlinable @inline(__always) + public static func attrEnumList( + _ a: [String] + ) { + _RawTFEager.attrEnumList(a) + } + + @inlinable @inline(__always) + public static func attrFloat( + _ a: Double + ) { + _RawTFEager.attrFloat(a) + } + + @inlinable @inline(__always) + public static func attrListDefault( + _ a: [Int32] = [5, 15] + ) { + _RawTFEager.attrListDefault(a) + } + + @inlinable @inline(__always) + public static func attrListMin( + _ a: [Int32] + ) { + _RawTFEager.attrListMin(a) + } + + @inlinable @inline(__always) + public static func attrListTypeDefault( + _ a: [Tensor], + _ b: [Tensor] + ) { + _RawTFEager.attrListTypeDefault(a, b) + } + + @inlinable @inline(__always) + public static func attrMin( + _ a: Int64 + ) { + _RawTFEager.attrMin(a) + } + + @inlinable @inline(__always) + public static func attrPartialShape( + _ a: TensorShape? + ) { + _RawTFEager.attrPartialShape(a) + } + + @inlinable @inline(__always) + public static func attrPartialShapeList( + _ a: [TensorShape?] + ) { + _RawTFEager.attrPartialShapeList(a) + } + + @inlinable @inline(__always) + public static func attrShape( + _ a: TensorShape? + ) { + _RawTFEager.attrShape(a) + } + + @inlinable @inline(__always) + public static func attrShapeList( + _ a: [TensorShape?] + ) { + _RawTFEager.attrShapeList(a) + } + + @inlinable @inline(__always) + public static func attrTypeDefault( + _ a: Tensor + ) { + _RawTFEager.attrTypeDefault(a) + } + + /// Audio Microfrontend Op. + /// + /// This Op converts a sequence of audio data into one or more + /// feature vectors containing filterbanks of the input. The + /// conversion process uses a lightweight library to perform: + /// + /// 1. A slicing window function + /// 2. Short-time FFTs + /// 3. Filterbank calculations + /// 4. Noise reduction + /// 5. PCAN Auto Gain Control + /// 6. Logarithmic scaling + /// + /// Arguments + /// audio: 1D Tensor, int16 audio data in temporal ordering. + /// sample_rate: Integer, the sample rate of the audio in Hz. + /// window_size: Integer, length of desired time frames in ms. + /// window_step: Integer, length of step size for the next frame in ms. + /// num_channels: Integer, the number of filterbank channels to use. + /// upper_band_limit: Float, the highest frequency included in the filterbanks. + /// lower_band_limit: Float, the lowest frequency included in the filterbanks. + /// smoothing_bits: Int, scale up signal by 2^(smoothing_bits) before reduction. + /// even_smoothing: Float, smoothing coefficient for even-numbered channels. + /// odd_smoothing: Float, smoothing coefficient for odd-numbered channels. + /// min_signal_remaining: Float, fraction of signal to preserve in smoothing. + /// enable_pcan: Bool, enable PCAN auto gain control. + /// pcan_strength: Float, gain normalization exponent. + /// pcan_offset: Float, positive value added in the normalization denominator. + /// gain_bits: Int, number of fractional bits in the gain. + /// enable_log: Bool, enable logarithmic scaling of filterbanks. + /// scale_shift: Integer, scale filterbanks by 2^(scale_shift). + /// left_context: Integer, number of preceding frames to attach to each frame. + /// right_context: Integer, number of preceding frames to attach to each frame. + /// frame_stride: Integer, M frames to skip over, where output[n] = frame[n*M]. + /// zero_padding: Bool, if left/right context is out-of-bounds, attach frame of + /// zeroes. Otherwise, frame[0] or frame[size-1] will be copied. + /// out_scale: Integer, divide all filterbanks by this number. + /// out_type: DType, type of the output Tensor, defaults to UINT16. + /// + /// Returns + /// filterbanks: 2D Tensor, each row is a time frame, each column is a channel. + @inlinable @inline(__always) + public static func audioMicrofrontend( + audio: Tensor, + sampleRate: Int64 = 16000, + windowSize: Int64 = 25, + windowStep: Int64 = 10, + numChannels: Int64 = 32, + upperBandLimit: Double = 7500, + lowerBandLimit: Double = 125, + smoothingBits: Int64 = 10, + evenSmoothing: Double = 0.025, + oddSmoothing: Double = 0.06, + minSignalRemaining: Double = 0.05, + enablePcan: Bool = false, + pcanStrength: Double = 0.95, + pcanOffset: Double = 80, + gainBits: Int64 = 21, + enableLog: Bool = true, + scaleShift: Int64 = 6, + leftContext: Int64 = 0, + rightContext: Int64 = 0, + frameStride: Int64 = 1, + zeroPadding: Bool = false, + outScale: Int64 = 1 + ) -> Tensor { + switch audio.handle.backend { + case .XLA: + let output_device = audio.device + let audio = Tensor(copying: audio, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.audioMicrofrontend( + audio: audio, sampleRate: sampleRate, windowSize: windowSize, windowStep: windowStep, + numChannels: numChannels, upperBandLimit: upperBandLimit, + lowerBandLimit: lowerBandLimit, smoothingBits: smoothingBits, + evenSmoothing: evenSmoothing, oddSmoothing: oddSmoothing, + minSignalRemaining: minSignalRemaining, enablePcan: enablePcan, + pcanStrength: pcanStrength, pcanOffset: pcanOffset, gainBits: gainBits, + enableLog: enableLog, scaleShift: scaleShift, leftContext: leftContext, + rightContext: rightContext, frameStride: frameStride, zeroPadding: zeroPadding, + outScale: outScale), to: output_device) + case .TF_EAGER: + return _RawTFEager.audioMicrofrontend( + audio: audio, sampleRate: sampleRate, windowSize: windowSize, windowStep: windowStep, + numChannels: numChannels, upperBandLimit: upperBandLimit, lowerBandLimit: lowerBandLimit, + smoothingBits: smoothingBits, evenSmoothing: evenSmoothing, oddSmoothing: oddSmoothing, + minSignalRemaining: minSignalRemaining, enablePcan: enablePcan, + pcanStrength: pcanStrength, pcanOffset: pcanOffset, gainBits: gainBits, + enableLog: enableLog, scaleShift: scaleShift, leftContext: leftContext, + rightContext: rightContext, frameStride: frameStride, zeroPadding: zeroPadding, + outScale: outScale) + } + + } + + /// Produces a visualization of audio data over time. + /// + /// Spectrograms are a standard way of representing audio information as a series of + /// slices of frequency information, one slice for each window of time. By joining + /// these together into a sequence, they form a distinctive fingerprint of the sound + /// over time. + /// + /// This op expects to receive audio data as an input, stored as floats in the range + /// -1 to 1, together with a window width in samples, and a stride specifying how + /// far to move the window between slices. From this it generates a three + /// dimensional output. The first dimension is for the channels in the input, so a + /// stereo audio input would have two here for example. The second dimension is time, + /// with successive frequency slices. The third dimension has an amplitude value for + /// each frequency during that time slice. + /// + /// This means the layout when converted and saved as an image is rotated 90 degrees + /// clockwise from a typical spectrogram. Time is descending down the Y axis, and + /// the frequency decreases from left to right. + /// + /// Each value in the result represents the square root of the sum of the real and + /// imaginary parts of an FFT on the current window of samples. In this way, the + /// lowest dimension represents the power of each frequency in the current window, + /// and adjacent windows are concatenated in the next dimension. + /// + /// To get a more intuitive and visual look at what this operation does, you can run + /// tensorflow/examples/wav_to_spectrogram to read in an audio file and save out the + /// resulting spectrogram as a PNG image. + /// + /// - Parameter input: Float representation of audio data. + /// + /// - Attrs: + /// - window_size: How wide the input window is in samples. For the highest efficiency + /// this should be a power of two, but other values are accepted. + /// - stride: How widely apart the center of adjacent sample windows should be. + /// - magnitude_squared: Whether to return the squared magnitude or just the + /// magnitude. Using squared magnitude can avoid extra calculations. + /// + /// - Output spectrogram: 3D representation of the audio frequencies as an image. + @inlinable @inline(__always) + public static func audioSpectrogram( + _ input: Tensor, + windowSize: Int64, + stride: Int64, + magnitudeSquared: Bool = false + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.audioSpectrogram( + input, windowSize: windowSize, stride: stride, magnitudeSquared: magnitudeSquared), + to: output_device) + case .TF_EAGER: + return _RawTFEager.audioSpectrogram( + input, windowSize: windowSize, stride: stride, magnitudeSquared: magnitudeSquared) + } + + } + + /// Outputs a `Summary` protocol buffer with audio. + /// + /// The summary has up to `max_outputs` summary values containing audio. The + /// audio is built from `tensor` which must be 3-D with shape `[batch_size, + /// frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are + /// assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`. + /// + /// The `tag` argument is a scalar `Tensor` of type `string`. It is used to + /// build the `tag` of the summary values: + /// + /// * If `max_outputs` is 1, the summary value tag is '*tag*/audio'. + /// * If `max_outputs` is greater than 1, the summary value tags are + /// generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc. + /// + /// - Parameters: + /// - tag: Scalar. Used to build the `tag` attribute of the summary values. + /// - tensor: 2-D of shape `[batch_size, frames]`. + /// + /// - Attrs: + /// - sample_rate: The sample rate of the signal in hertz. + /// - max_outputs: Max number of batch elements to generate audio for. + /// + /// - Output summary: Scalar. Serialized `Summary` protocol buffer. + @inlinable @inline(__always) + public static func audioSummary( + tag: StringTensor, + _ tensor: Tensor, + sampleRate: Double, + maxOutputs: Int64 = 3 + ) -> StringTensor { + _RawTFEager.audioSummary(tag: tag, tensor, sampleRate: sampleRate, maxOutputs: maxOutputs) + } + + /// Outputs a `Summary` protocol buffer with audio. + /// + /// The summary has up to `max_outputs` summary values containing audio. The + /// audio is built from `tensor` which must be 3-D with shape `[batch_size, + /// frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are + /// assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`. + /// + /// The `tag` argument is a scalar `Tensor` of type `string`. It is used to + /// build the `tag` of the summary values: + /// + /// * If `max_outputs` is 1, the summary value tag is '*tag*/audio'. + /// * If `max_outputs` is greater than 1, the summary value tags are + /// generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc. + /// + /// - Parameters: + /// - tag: Scalar. Used to build the `tag` attribute of the summary values. + /// - tensor: 2-D of shape `[batch_size, frames]`. + /// - sample_rate: The sample rate of the signal in hertz. + /// + /// - Attr max_outputs: Max number of batch elements to generate audio for. + /// + /// - Output summary: Scalar. Serialized `Summary` protocol buffer. + @inlinable @inline(__always) + public static func audioSummaryV2( + tag: StringTensor, + _ tensor: Tensor, + sampleRate: Tensor, + maxOutputs: Int64 = 3 + ) -> StringTensor { + _RawTFEager.audioSummaryV2(tag: tag, tensor, sampleRate: sampleRate, maxOutputs: maxOutputs) + } + + /// Creates a dataset that shards the input dataset. + /// + /// Creates a dataset that shards the input dataset by num_workers, returning a + /// sharded dataset for the index-th worker. This attempts to automatically shard + /// a dataset by examining the Dataset graph and inserting a shard op before the + /// inputs to a reader Dataset (e.g. CSVDataset, TFRecordDataset). + /// + /// This dataset will throw a NotFound error if we cannot shard the dataset + /// automatically. + /// + /// - Parameters: + /// - input_dataset: A variant tensor representing the input dataset. + /// - num_workers: A scalar representing the number of workers to distribute this dataset across. + /// - index: A scalar representing the index of the current worker out of num_workers. + @inlinable @inline(__always) + public static func autoShardDataset( + inputDataset: VariantHandle, + numWorkers: Tensor, + index: Tensor, + autoShardPolicy: Int64 = 0, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.autoShardDataset( + inputDataset: inputDataset, numWorkers: numWorkers, index: index, + autoShardPolicy: autoShardPolicy, outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Performs average pooling on the input. + /// + /// Each entry in `output` is the mean of the corresponding size `ksize` + /// window in `value`. + /// + /// - Parameter value: 4-D with shape `[batch, height, width, channels]`. + /// + /// - Attrs: + /// - ksize: The size of the sliding window for each dimension of `value`. + /// - strides: The stride of the sliding window for each dimension of `value`. + /// - padding: The type of padding algorithm to use. + /// - data_format: Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// - Output output: The average pooled output tensor. + @inlinable @inline(__always) + public static func avgPool( + value: Tensor, + ksize: [Int32], + strides: [Int32], + padding: Padding, + dataFormat: DataFormat = .nhwc + ) -> Tensor { + switch value.handle.backend { + case .XLA: + return _RawXLA.avgPool( + value: value, ksize: ksize, strides: strides, padding: padding, dataFormat: dataFormat) + case .TF_EAGER: + return _RawTFEager.avgPool( + value: value, ksize: ksize, strides: strides, padding: padding, dataFormat: dataFormat) + } + + } + + /// Performs 3D average pooling on the input. + /// + /// - Parameter input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over. + /// + /// - Attrs: + /// - ksize: 1-D tensor of length 5. The size of the window for each dimension of + /// the input tensor. Must have `ksize[0] = ksize[4] = 1`. + /// - strides: 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. + /// - padding: The type of padding algorithm to use. + /// - data_format: The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// + /// - Output output: The average pooled output tensor. + @inlinable @inline(__always) + public static func avgPool3D( + _ input: Tensor, + ksize: [Int32], + strides: [Int32], + padding: Padding, + dataFormat: DataFormat1 = .ndhwc + ) -> Tensor { + switch input.handle.backend { + case .XLA: + return _RawXLA.avgPool3D( + input, ksize: ksize, strides: strides, padding: padding, dataFormat: dataFormat) + case .TF_EAGER: + return _RawTFEager.avgPool3D( + input, ksize: ksize, strides: strides, padding: padding, dataFormat: dataFormat) + } + + } + + /// Computes gradients of average pooling function. + /// + /// - Parameters: + /// - orig_input_shape: The original input dimensions. + /// - grad: Output backprop of shape `[batch, depth, rows, cols, channels]`. + /// + /// - Attrs: + /// - ksize: 1-D tensor of length 5. The size of the window for each dimension of + /// the input tensor. Must have `ksize[0] = ksize[4] = 1`. + /// - strides: 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. + /// - padding: The type of padding algorithm to use. + /// - data_format: The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// + /// - Output output: The backprop for input. + @inlinable @inline(__always) + public static func avgPool3DGrad( + origInputShape: Tensor, + grad: Tensor, + ksize: [Int32], + strides: [Int32], + padding: Padding, + dataFormat: DataFormat1 = .ndhwc + ) -> Tensor { + switch commonBackend(origInputShape.handle.backend, grad.handle.backend) { + case .XLA: + return _RawXLA.avgPool3DGrad( + origInputShape: origInputShape, grad: grad, ksize: ksize, strides: strides, + padding: padding, dataFormat: dataFormat) + case .TF_EAGER: + return _RawTFEager.avgPool3DGrad( + origInputShape: origInputShape, grad: grad, ksize: ksize, strides: strides, + padding: padding, dataFormat: dataFormat) + } + + } + + /// Computes gradients of the average pooling function. + /// + /// - Parameters: + /// - orig_input_shape: 1-D. Shape of the original input to `avg_pool`. + /// - grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. + /// the output of `avg_pool`. + /// + /// - Attrs: + /// - ksize: The size of the sliding window for each dimension of the input. + /// - strides: The stride of the sliding window for each dimension of the input. + /// - padding: The type of padding algorithm to use. + /// - data_format: Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// - Output output: 4-D. Gradients w.r.t. the input of `avg_pool`. + @inlinable @inline(__always) + public static func avgPoolGrad( + origInputShape: Tensor, + grad: Tensor, + ksize: [Int32], + strides: [Int32], + padding: Padding, + dataFormat: DataFormat = .nhwc + ) -> Tensor { + switch commonBackend(origInputShape.handle.backend, grad.handle.backend) { + case .XLA: + return _RawXLA.avgPoolGrad( + origInputShape: origInputShape, grad: grad, ksize: ksize, strides: strides, + padding: padding, dataFormat: dataFormat) + case .TF_EAGER: + return _RawTFEager.avgPoolGrad( + origInputShape: origInputShape, grad: grad, ksize: ksize, strides: strides, + padding: padding, dataFormat: dataFormat) + } + + } + + @inlinable @inline(__always) + public static func b() -> Tensor { + _RawTFEager.b() + } + + /// Batches all input tensors nondeterministically. + /// + /// When many instances of this Op are being run concurrently with the same + /// container/shared_name in the same device, some will output zero-shaped Tensors + /// and others will output Tensors of size up to max_batch_size. + /// + /// All Tensors in in_tensors are batched together (so, for example, labels and + /// features should be batched with a single instance of this operation. + /// + /// Each invocation of batch emits an `id` scalar which will be used to identify + /// this particular invocation when doing unbatch or its gradient. + /// + /// Each op which emits a non-empty batch will also emit a non-empty batch_index + /// Tensor, which, is a [K, 3] matrix where each row contains the invocation's id, + /// start, and length of elements of each set of Tensors present in batched_tensors. + /// + /// Batched tensors are concatenated along the first dimension, and all tensors in + /// in_tensors must have the first dimension of the same size. + /// + /// in_tensors: The tensors to be batched. + /// num_batch_threads: Number of scheduling threads for processing batches of work. + /// Determines the number of batches processed in parallel. + /// max_batch_size: Batch sizes will never be bigger than this. + /// batch_timeout_micros: Maximum number of microseconds to wait before outputting + /// an incomplete batch. + /// allowed_batch_sizes: Optional list of allowed batch sizes. If left empty, does + /// nothing. Otherwise, supplies a list of batch sizes, causing the op to pad + /// batches up to one of those sizes. The entries must increase monotonically, and + /// the final entry must equal max_batch_size. + /// grad_timeout_micros: The timeout to use for the gradient. See Unbatch. + /// batched_tensors: Either empty tensors or a batch of concatenated Tensors. + /// batch_index: If out_tensors is non-empty, has information to invert it. + /// container: Controls the scope of sharing of this batch. + /// id: always contains a scalar with a unique ID for this invocation of Batch. + /// shared_name: Concurrently running instances of batch in the same device with the + /// same container and shared_name will batch their elements together. If left + /// empty, the op name will be used as the shared name. + /// T: the types of tensors to be batched. + @inlinable @inline(__always) + public static func batch( + inTensors: T, + numBatchThreads: Int64, + maxBatchSize: Int64, + maxEnqueuedBatches: Int64 = 10, + batchTimeoutMicros: Int64, + allowedBatchSizes: [Int32], + gradTimeoutMicros: Int64, + container: String, + sharedName: String, + batchingQueue: String + ) -> (batchedTensors: T, batchIndex: Tensor, id: Tensor) { + _RawTFEager.batch( + inTensors: inTensors, numBatchThreads: numBatchThreads, maxBatchSize: maxBatchSize, + maxEnqueuedBatches: maxEnqueuedBatches, batchTimeoutMicros: batchTimeoutMicros, + allowedBatchSizes: allowedBatchSizes, gradTimeoutMicros: gradTimeoutMicros, + container: container, sharedName: sharedName, batchingQueue: batchingQueue) + } + + @inlinable @inline(__always) + public static func batchCholesky( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.batchCholesky(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.batchCholesky(input) + } + + } + + @inlinable @inline(__always) + public static func batchCholeskyGrad( + l: Tensor, + grad: Tensor + ) -> Tensor { + switch commonBackend(l.handle.backend, grad.handle.backend) { + case .XLA: + let output_device = grad.device + let l = Tensor(copying: l, to: .defaultTFEager) + let grad = Tensor(copying: grad, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.batchCholeskyGrad(l: l, grad: grad), to: output_device) + case .TF_EAGER: + return _RawTFEager.batchCholeskyGrad(l: l, grad: grad) + } + + } + + /// Creates a dataset that batches `batch_size` elements from `input_dataset`. + /// + /// - Parameter batch_size: A scalar representing the number of elements to accumulate in a + /// batch. + @inlinable @inline(__always) + public static func batchDataset( + inputDataset: VariantHandle, + batchSize: Tensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.batchDataset( + inputDataset: inputDataset, batchSize: batchSize, outputTypes: outputTypes, + outputShapes: outputShapes) + } + + /// Creates a dataset that batches `batch_size` elements from `input_dataset`. + /// + /// - Parameters: + /// - batch_size: A scalar representing the number of elements to accumulate in a batch. + /// - drop_remainder: A scalar representing whether the last batch should be dropped in case its size + /// is smaller than desired. + @inlinable @inline(__always) + public static func batchDatasetV2( + inputDataset: VariantHandle, + batchSize: Tensor, + dropRemainder: Tensor, + parallelCopy: Bool = false, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.batchDatasetV2( + inputDataset: inputDataset, batchSize: batchSize, dropRemainder: dropRemainder, + parallelCopy: parallelCopy, outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Batches all the inputs tensors to the computation done by the function. + /// + /// So, for example, in the following code + /// + /// ```python + /// + /// # This input will be captured. + /// y = tf.placeholder_with_default(1.0, shape=[]) + /// + /// @tf.Defun(tf.float32) + /// def computation(a): + /// return tf.matmul(a, a) + y + /// + /// b = gen_batch_ops.batch_function( + /// f=computation + /// in_tensors=[a], + /// captured_tensors=computation.captured_inputs, + /// Tout=[o.type for o in computation.definition.signature.output_arg], + /// num_batch_threads=1, + /// max_batch_size=10, + /// batch_timeout_micros=100000, # 100ms + /// allowed_batch_sizes=[3, 10], + /// batching_queue="") + /// + /// If more than one session.run call is simultaneously trying to compute `b` + /// the values of `a` will be gathered, non-deterministically concatenated + /// along the first axis, and only one thread will run the computation. + /// + /// Assumes that all arguments of the function are Tensors which will be batched + /// along their first dimension. + /// + /// Arguments that are captured, are not batched. The session.run call which does + /// the concatenation, will use the values of the captured tensors available to it. + /// Therefore, typical uses of captured tensors should involve values which remain + /// unchanged across session.run calls. Inference is a good example of this. + /// + /// SparseTensor is not supported. The return value of the decorated function + /// must be a Tensor or a list/tuple of Tensors. + /// + /// - Parameters: + /// - in_tensors: The tensors to be batched. + /// - captured_tensors: The tensors which are captured in the function, and don't need + /// to be batched. + /// + /// - Attrs: + /// - num_batch_threads: Number of scheduling threads for processing batches of work. + /// Determines the number of batches processed in parallel. + /// - max_batch_size: Batch sizes will never be bigger than this. + /// - batch_timeout_micros: Maximum number of microseconds to wait before outputting + /// an incomplete batch. + /// - max_enqueued_batches: Maximum number of batches enqueued. Default: 10. + /// - allowed_batch_sizes: Optional list of allowed batch sizes. If left empty, does + /// nothing. Otherwise, supplies a list of batch sizes, causing the op to pad + /// batches up to one of those sizes. The entries must increase monotonically, and + /// the final entry must equal max_batch_size. + /// - container: Controls the scope of sharing of this batch. + /// - shared_name: Concurrently running instances of batch in the same device with the + /// same container and shared_name will batch their elements together. If left + /// empty, the op name will be used as the shared name. + /// - Tin: the types of tensors to be batched. + /// - Tcaptured: the types of the captured tensors. + /// - Tout: the types of the output tensors. + /// + /// - Output out_tensors: The output tensors. + @inlinable @inline(__always) + public static func batchFunction< + FIn: TensorGroup, + FOut: TensorGroup, + Tin: TensorArrayProtocol, + Tcaptured: TensorArrayProtocol, + Tout: TensorGroup + >( + inTensors: Tin, + capturedTensors: Tcaptured, + f: (FIn) -> FOut, + numBatchThreads: Int64, + maxBatchSize: Int64, + batchTimeoutMicros: Int64, + maxEnqueuedBatches: Int64 = 10, + allowedBatchSizes: [Int32], + container: String, + sharedName: String, + batchingQueue: String + ) -> Tout { + _RawTFEager.batchFunction( + inTensors: inTensors, capturedTensors: capturedTensors, f: f, + numBatchThreads: numBatchThreads, maxBatchSize: maxBatchSize, + batchTimeoutMicros: batchTimeoutMicros, maxEnqueuedBatches: maxEnqueuedBatches, + allowedBatchSizes: allowedBatchSizes, container: container, sharedName: sharedName, + batchingQueue: batchingQueue) + } + + /// Multiplies slices of two tensors in batches. + /// + /// Multiplies all slices of `Tensor` `x` and `y` (each slice can be + /// viewed as an element of a batch), and arranges the individual results + /// in a single output tensor of the same batch size. Each of the + /// individual slices can optionally be adjointed (to adjoint a matrix + /// means to transpose and conjugate it) before multiplication by setting + /// the `adj_x` or `adj_y` flag to `True`, which are by default `False`. + /// + /// The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]` + /// and `[..., r_y, c_y]`. + /// + /// The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where: + /// + /// r_o = c_x if adj_x else r_x + /// c_o = r_y if adj_y else c_y + /// + /// It is computed as: + /// + /// output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) + /// + /// - Parameters: + /// - x: 2-D or higher with shape `[..., r_x, c_x]`. + /// - y: 2-D or higher with shape `[..., r_y, c_y]`. + /// + /// - Attrs: + /// - adj_x: If `True`, adjoint the slices of `x`. Defaults to `False`. + /// - adj_y: If `True`, adjoint the slices of `y`. Defaults to `False`. + /// + /// - Output output: 3-D or higher with shape `[..., r_o, c_o]` + @inlinable @inline(__always) + public static func batchMatMul( + _ x: Tensor, + _ y: Tensor, + adjX: Bool = false, + adjY: Bool = false + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + let output_device = y.device + let x = Tensor(copying: x, to: .defaultTFEager) + let y = Tensor(copying: y, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.batchMatMul(x, y, adjX: adjX, adjY: adjY), to: output_device) + case .TF_EAGER: + return _RawTFEager.batchMatMul(x, y, adjX: adjX, adjY: adjY) + } + + } + + /// Multiplies slices of two tensors in batches. + /// + /// Multiplies all slices of `Tensor` `x` and `y` (each slice can be + /// viewed as an element of a batch), and arranges the individual results + /// in a single output tensor of the same batch size. Each of the + /// individual slices can optionally be adjointed (to adjoint a matrix + /// means to transpose and conjugate it) before multiplication by setting + /// the `adj_x` or `adj_y` flag to `True`, which are by default `False`. + /// + /// The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]` + /// and `[..., r_y, c_y]`. + /// + /// The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where: + /// + /// r_o = c_x if adj_x else r_x + /// c_o = r_y if adj_y else c_y + /// + /// It is computed as: + /// + /// output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) + /// + /// *NOTE*: `BatchMatMulV2` supports broadcasting in the batch dimensions. More + /// about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). + /// + /// + /// - Parameters: + /// - x: 2-D or higher with shape `[..., r_x, c_x]`. + /// - y: 2-D or higher with shape `[..., r_y, c_y]`. + /// + /// - Attrs: + /// - adj_x: If `True`, adjoint the slices of `x`. Defaults to `False`. + /// - adj_y: If `True`, adjoint the slices of `y`. Defaults to `False`. + /// + /// - Output output: 3-D or higher with shape `[..., r_o, c_o]` + @inlinable @inline(__always) + public static func batchMatMulV2( + _ x: Tensor, + _ y: Tensor, + adjX: Bool = false, + adjY: Bool = false + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + return _RawXLA.batchMatMulV2(x, y, adjX: adjX, adjY: adjY) + case .TF_EAGER: + return _RawTFEager.batchMatMulV2(x, y, adjX: adjX, adjY: adjY) + } + + } + + @inlinable @inline(__always) + public static func batchMatrixBandPart( + _ input: Tensor, + numLower: Tensor, + numUpper: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(input.handle.backend, numLower.handle.backend), numUpper.handle.backend) + { + case .XLA: + let output_device = numUpper.device + let input = Tensor(copying: input, to: .defaultTFEager) + let numLower = Tensor(copying: numLower, to: .defaultTFEager) + let numUpper = Tensor(copying: numUpper, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.batchMatrixBandPart(input, numLower: numLower, numUpper: numUpper), + to: output_device) + case .TF_EAGER: + return _RawTFEager.batchMatrixBandPart(input, numLower: numLower, numUpper: numUpper) + } + + } + + @inlinable @inline(__always) + public static func batchMatrixDeterminant( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.batchMatrixDeterminant(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.batchMatrixDeterminant(input) + } + + } + + @inlinable @inline(__always) + public static func batchMatrixDiag( + diagonal: Tensor + ) -> Tensor { + switch diagonal.handle.backend { + case .XLA: + let output_device = diagonal.device + let diagonal = Tensor(copying: diagonal, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.batchMatrixDiag(diagonal: diagonal), to: output_device) + case .TF_EAGER: + return _RawTFEager.batchMatrixDiag(diagonal: diagonal) + } + + } + + @inlinable @inline(__always) + public static func batchMatrixDiagPart( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.batchMatrixDiagPart(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.batchMatrixDiagPart(input) + } + + } + + @inlinable @inline(__always) + public static func batchMatrixInverse( + _ input: Tensor, + adjoint: Bool = false + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.batchMatrixInverse(input, adjoint: adjoint), to: output_device) + case .TF_EAGER: + return _RawTFEager.batchMatrixInverse(input, adjoint: adjoint) + } + + } + + @inlinable @inline(__always) + public static func batchMatrixSetDiag( + _ input: Tensor, + diagonal: Tensor + ) -> Tensor { + switch commonBackend(input.handle.backend, diagonal.handle.backend) { + case .XLA: + let output_device = diagonal.device + let input = Tensor(copying: input, to: .defaultTFEager) + let diagonal = Tensor(copying: diagonal, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.batchMatrixSetDiag(input, diagonal: diagonal), to: output_device) + case .TF_EAGER: + return _RawTFEager.batchMatrixSetDiag(input, diagonal: diagonal) + } + + } + + @inlinable @inline(__always) + public static func batchMatrixSolve( + matrix: Tensor, + rhs: Tensor, + adjoint: Bool = false + ) -> Tensor { + switch commonBackend(matrix.handle.backend, rhs.handle.backend) { + case .XLA: + let output_device = rhs.device + let matrix = Tensor(copying: matrix, to: .defaultTFEager) + let rhs = Tensor(copying: rhs, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.batchMatrixSolve(matrix: matrix, rhs: rhs, adjoint: adjoint), + to: output_device) + case .TF_EAGER: + return _RawTFEager.batchMatrixSolve(matrix: matrix, rhs: rhs, adjoint: adjoint) + } + + } + + @inlinable @inline(__always) + public static func batchMatrixSolveLs( + matrix: Tensor, + rhs: Tensor, + l2Regularizer: Tensor, + fast: Bool = true + ) -> Tensor { + switch commonBackend( + commonBackend(matrix.handle.backend, rhs.handle.backend), l2Regularizer.handle.backend) + { + case .XLA: + let output_device = l2Regularizer.device + let matrix = Tensor(copying: matrix, to: .defaultTFEager) + let rhs = Tensor(copying: rhs, to: .defaultTFEager) + let l2Regularizer = Tensor(copying: l2Regularizer, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.batchMatrixSolveLs( + matrix: matrix, rhs: rhs, l2Regularizer: l2Regularizer, fast: fast), to: output_device) + case .TF_EAGER: + return _RawTFEager.batchMatrixSolveLs( + matrix: matrix, rhs: rhs, l2Regularizer: l2Regularizer, fast: fast) + } + + } + + @inlinable @inline(__always) + public static func batchMatrixTriangularSolve( + matrix: Tensor, + rhs: Tensor, + lower: Bool = true, + adjoint: Bool = false + ) -> Tensor { + switch commonBackend(matrix.handle.backend, rhs.handle.backend) { + case .XLA: + let output_device = rhs.device + let matrix = Tensor(copying: matrix, to: .defaultTFEager) + let rhs = Tensor(copying: rhs, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.batchMatrixTriangularSolve( + matrix: matrix, rhs: rhs, lower: lower, adjoint: adjoint), to: output_device) + case .TF_EAGER: + return _RawTFEager.batchMatrixTriangularSolve( + matrix: matrix, rhs: rhs, lower: lower, adjoint: adjoint) + } + + } + + /// Batch normalization. + /// + /// This op is deprecated. Prefer `tf.nn.batch_normalization`. + /// + /// - Parameters: + /// - t: A 4D input Tensor. + /// - m: A 1D mean Tensor with size matching the last dimension of t. + /// This is the first output from tf.nn.moments, + /// or a saved moving average thereof. + /// - v: A 1D variance Tensor with size matching the last dimension of t. + /// This is the second output from tf.nn.moments, + /// or a saved moving average thereof. + /// - beta: A 1D beta Tensor with size matching the last dimension of t. + /// An offset to be added to the normalized tensor. + /// - gamma: A 1D gamma Tensor with size matching the last dimension of t. + /// If "scale_after_normalization" is true, this tensor will be multiplied + /// with the normalized tensor. + /// + /// - Attrs: + /// - variance_epsilon: A small float number to avoid dividing by 0. + /// - scale_after_normalization: A bool indicating whether the resulted tensor + /// needs to be multiplied with gamma. + @inlinable @inline(__always) + public static func batchNormWithGlobalNormalization( + t: Tensor, + m: Tensor, + v: Tensor, + beta: Tensor, + gamma: Tensor, + varianceEpsilon: Double, + scaleAfterNormalization: Bool + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(commonBackend(t.handle.backend, m.handle.backend), v.handle.backend), + beta.handle.backend), gamma.handle.backend) + { + case .XLA: + let output_device = gamma.device + let t = Tensor(copying: t, to: .defaultTFEager) + let m = Tensor(copying: m, to: .defaultTFEager) + let v = Tensor(copying: v, to: .defaultTFEager) + let beta = Tensor(copying: beta, to: .defaultTFEager) + let gamma = Tensor(copying: gamma, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.batchNormWithGlobalNormalization( + t: t, m: m, v: v, beta: beta, gamma: gamma, varianceEpsilon: varianceEpsilon, + scaleAfterNormalization: scaleAfterNormalization), to: output_device) + case .TF_EAGER: + return _RawTFEager.batchNormWithGlobalNormalization( + t: t, m: m, v: v, beta: beta, gamma: gamma, varianceEpsilon: varianceEpsilon, + scaleAfterNormalization: scaleAfterNormalization) + } + + } + + /// Gradients for batch normalization. + /// + /// This op is deprecated. See `tf.nn.batch_normalization`. + /// + /// - Parameters: + /// - t: A 4D input Tensor. + /// - m: A 1D mean Tensor with size matching the last dimension of t. + /// This is the first output from tf.nn.moments, + /// or a saved moving average thereof. + /// - v: A 1D variance Tensor with size matching the last dimension of t. + /// This is the second output from tf.nn.moments, + /// or a saved moving average thereof. + /// - gamma: A 1D gamma Tensor with size matching the last dimension of t. + /// If "scale_after_normalization" is true, this Tensor will be multiplied + /// with the normalized Tensor. + /// - backprop: 4D backprop Tensor. + /// + /// - Attrs: + /// - variance_epsilon: A small float number to avoid dividing by 0. + /// - scale_after_normalization: A bool indicating whether the resulted tensor + /// needs to be multiplied with gamma. + /// + /// - Outputs: + /// - dx: 4D backprop tensor for input. + /// - dm: 1D backprop tensor for mean. + /// - dv: 1D backprop tensor for variance. + /// - db: 1D backprop tensor for beta. + /// - dg: 1D backprop tensor for gamma. + @inlinable @inline(__always) + public static func batchNormWithGlobalNormalizationGrad( + t: Tensor, + m: Tensor, + v: Tensor, + gamma: Tensor, + backprop: Tensor, + varianceEpsilon: Double, + scaleAfterNormalization: Bool + ) -> (dx: Tensor, dm: Tensor, dv: Tensor, db: Tensor, dg: Tensor) { + _RawTFEager.batchNormWithGlobalNormalizationGrad( + t: t, m: m, v: v, gamma: gamma, backprop: backprop, varianceEpsilon: varianceEpsilon, + scaleAfterNormalization: scaleAfterNormalization) + } + + @inlinable @inline(__always) + public static func batchSelfAdjointEig( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.batchSelfAdjointEig(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.batchSelfAdjointEig(input) + } + + } + + @inlinable @inline(__always) + public static func batchSelfAdjointEigV2( + _ input: Tensor, + computeV: Bool = true + ) -> (e: Tensor, v: Tensor) { + _RawTFEager.batchSelfAdjointEigV2(input, computeV: computeV) + } + + @inlinable @inline(__always) + public static func batchSvd( + _ input: Tensor, + computeUv: Bool = true, + fullMatrices: Bool = false + ) -> (s: Tensor, u: Tensor, v: Tensor) { + _RawTFEager.batchSvd(input, computeUv: computeUv, fullMatrices: fullMatrices) + } + + /// BatchToSpace for 4-D tensors of type T. + /// + /// This is a legacy version of the more general BatchToSpaceND. + /// + /// Rearranges (permutes) data from batch into blocks of spatial data, followed by + /// cropping. This is the reverse transformation of SpaceToBatch. More specifically, + /// this op outputs a copy of the input tensor where values from the `batch` + /// dimension are moved in spatial blocks to the `height` and `width` dimensions, + /// followed by cropping along the `height` and `width` dimensions. + /// + /// - Parameters: + /// - input: 4-D tensor with shape + /// `[batch*block_size*block_size, height_pad/block_size, width_pad/block_size, + /// depth]`. Note that the batch size of the input tensor must be divisible by + /// `block_size * block_size`. + /// - crops: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies + /// how many elements to crop from the intermediate result across the spatial + /// dimensions as follows: + /// + /// crops = [[crop_top, crop_bottom], [crop_left, crop_right]] + /// + /// - Output output: 4-D with shape `[batch, height, width, depth]`, where: + /// + /// height = height_pad - crop_top - crop_bottom + /// width = width_pad - crop_left - crop_right + /// + /// The attr `block_size` must be greater than one. It indicates the block size. + /// + /// Some examples: + /// + /// (1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2: + /// + /// ``` + /// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] + /// ``` + /// + /// The output tensor has shape `[1, 2, 2, 1]` and value: + /// + /// ``` + /// x = [[[[1], [2]], [[3], [4]]]] + /// ``` + /// + /// (2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2: + /// + /// ``` + /// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] + /// ``` + /// + /// The output tensor has shape `[1, 2, 2, 3]` and value: + /// + /// ``` + /// x = [[[[1, 2, 3], [4, 5, 6]], + /// [[7, 8, 9], [10, 11, 12]]]] + /// ``` + /// + /// (3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2: + /// + /// ``` + /// x = [[[[1], [3]], [[9], [11]]], + /// [[[2], [4]], [[10], [12]]], + /// [[[5], [7]], [[13], [15]]], + /// [[[6], [8]], [[14], [16]]]] + /// ``` + /// + /// The output tensor has shape `[1, 4, 4, 1]` and value: + /// + /// ``` + /// x = [[[[1], [2], [3], [4]], + /// [[5], [6], [7], [8]], + /// [[9], [10], [11], [12]], + /// [[13], [14], [15], [16]]]] + /// ``` + /// + /// (4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2: + /// + /// ``` + /// x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], + /// [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] + /// ``` + /// + /// The output tensor has shape `[2, 2, 4, 1]` and value: + /// + /// ``` + /// x = [[[[1], [3]], [[5], [7]]], + /// [[[2], [4]], [[10], [12]]], + /// [[[5], [7]], [[13], [15]]], + /// [[[6], [8]], [[14], [16]]]] + /// ``` + @inlinable @inline(__always) + public static func batchToSpace< + T: TensorFlowScalar, + Tidx: TensorFlowIndex + >( + _ input: Tensor, + crops: Tensor, + blockSize: Int64 + ) -> Tensor { + switch commonBackend(input.handle.backend, crops.handle.backend) { + case .XLA: + let output_device = crops.device + let input = Tensor(copying: input, to: .defaultTFEager) + let crops = Tensor(copying: crops, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.batchToSpace(input, crops: crops, blockSize: blockSize), + to: output_device) + case .TF_EAGER: + return _RawTFEager.batchToSpace(input, crops: crops, blockSize: blockSize) + } + + } + + /// BatchToSpace for N-D tensors of type T. + /// + /// This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape + /// `block_shape + [batch]`, interleaves these blocks back into the grid defined by + /// the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as + /// the input. The spatial dimensions of this intermediate result are then + /// optionally cropped according to `crops` to produce the output. This is the + /// reverse of SpaceToBatch. See below for a precise description. + /// + /// - Parameters: + /// - input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, + /// where spatial_shape has M dimensions. + /// - block_shape: 1-D with shape `[M]`, all values must be >= 1. + /// - crops: 2-D with shape `[M, 2]`, all values must be >= 0. + /// `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input + /// dimension `i + 1`, which corresponds to spatial dimension `i`. It is + /// required that + /// `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`. + /// + /// This operation is equivalent to the following steps: + /// + /// 1. Reshape `input` to `reshaped` of shape: + /// [block_shape[0], ..., block_shape[M-1], + /// batch / prod(block_shape), + /// input_shape[1], ..., input_shape[N-1]] + /// + /// 2. Permute dimensions of `reshaped` to produce `permuted` of shape + /// [batch / prod(block_shape), + /// + /// input_shape[1], block_shape[0], + /// ..., + /// input_shape[M], block_shape[M-1], + /// + /// input_shape[M+1], ..., input_shape[N-1]] + /// + /// 3. Reshape `permuted` to produce `reshaped_permuted` of shape + /// [batch / prod(block_shape), + /// + /// input_shape[1] * block_shape[0], + /// ..., + /// input_shape[M] * block_shape[M-1], + /// + /// input_shape[M+1], + /// ..., + /// input_shape[N-1]] + /// + /// 4. Crop the start and end of dimensions `[1, ..., M]` of + /// `reshaped_permuted` according to `crops` to produce the output of shape: + /// [batch / prod(block_shape), + /// + /// input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], + /// ..., + /// input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1], + /// + /// input_shape[M+1], ..., input_shape[N-1]] + /// + /// Some examples: + /// + /// (1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and + /// `crops = [[0, 0], [0, 0]]`: + /// + /// ``` + /// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] + /// ``` + /// + /// The output tensor has shape `[1, 2, 2, 1]` and value: + /// + /// ``` + /// x = [[[[1], [2]], [[3], [4]]]] + /// ``` + /// + /// (2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and + /// `crops = [[0, 0], [0, 0]]`: + /// + /// ``` + /// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] + /// ``` + /// + /// The output tensor has shape `[1, 2, 2, 3]` and value: + /// + /// ``` + /// x = [[[[1, 2, 3], [4, 5, 6]], + /// [[7, 8, 9], [10, 11, 12]]]] + /// ``` + /// + /// (3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and + /// `crops = [[0, 0], [0, 0]]`: + /// + /// ``` + /// x = [[[[1], [3]], [[9], [11]]], + /// [[[2], [4]], [[10], [12]]], + /// [[[5], [7]], [[13], [15]]], + /// [[[6], [8]], [[14], [16]]]] + /// ``` + /// + /// The output tensor has shape `[1, 4, 4, 1]` and value: + /// + /// ``` + /// x = [[[[1], [2], [3], [4]], + /// [[5], [6], [7], [8]], + /// [[9], [10], [11], [12]], + /// [[13], [14], [15], [16]]]] + /// ``` + /// + /// (4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and + /// `crops = [[0, 0], [2, 0]]`: + /// + /// ``` + /// x = [[[[0], [1], [3]]], [[[0], [9], [11]]], + /// [[[0], [2], [4]]], [[[0], [10], [12]]], + /// [[[0], [5], [7]]], [[[0], [13], [15]]], + /// [[[0], [6], [8]]], [[[0], [14], [16]]]] + /// ``` + /// + /// The output tensor has shape `[2, 2, 4, 1]` and value: + /// + /// ``` + /// x = [[[[1], [2], [3], [4]], + /// [[5], [6], [7], [8]]], + /// [[[9], [10], [11], [12]], + /// [[13], [14], [15], [16]]]] + /// ``` + @inlinable @inline(__always) + public static func batchToSpaceND< + T: TensorFlowScalar, + TblockShape: TensorFlowIndex, + Tcrops: TensorFlowIndex + >( + _ input: Tensor, + blockShape: Tensor, + crops: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(input.handle.backend, blockShape.handle.backend), crops.handle.backend) + { + case .XLA: + let output_device = crops.device + let input = Tensor(copying: input, to: .defaultTFEager) + let blockShape = Tensor(copying: blockShape, to: .defaultTFEager) + let crops = Tensor(copying: crops, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.batchToSpaceND(input, blockShape: blockShape, crops: crops), + to: output_device) + case .TF_EAGER: + return _RawTFEager.batchToSpaceND(input, blockShape: blockShape, crops: crops) + } + + } + + /// Computes the Bessel i0e function of `x` element-wise. + /// + /// Exponentially scaled modified Bessel function of order 0 defined as + /// `bessel_i0e(x) = exp(-abs(x)) bessel_i0(x)`. + /// + /// This function is faster and numerically stabler than `bessel_i0(x)`. + @inlinable @inline(__always) + public static func besselI0e( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + let output_device = x.device + let x = Tensor(copying: x, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.besselI0e(x), to: output_device) + case .TF_EAGER: + return _RawTFEager.besselI0e(x) + } + + } + + /// Computes the Bessel i1e function of `x` element-wise. + /// + /// Exponentially scaled modified Bessel function of order 0 defined as + /// `bessel_i1e(x) = exp(-abs(x)) bessel_i1(x)`. + /// + /// This function is faster and numerically stabler than `bessel_i1(x)`. + @inlinable @inline(__always) + public static func besselI1e( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + let output_device = x.device + let x = Tensor(copying: x, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.besselI1e(x), to: output_device) + case .TF_EAGER: + return _RawTFEager.besselI1e(x) + } + + } + + /// Compute the regularized incomplete beta integral \\(I_x(a, b)\\). + /// + /// The regularized incomplete beta integral is defined as: + /// + /// + /// \\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\\) + /// + /// where + /// + /// + /// \\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\\) + /// + /// + /// is the incomplete beta function and \\(B(a, b)\\) is the *complete* + /// beta function. + @inlinable @inline(__always) + public static func betainc( + _ a: Tensor, + _ b: Tensor, + _ x: Tensor + ) -> Tensor { + switch commonBackend(commonBackend(a.handle.backend, b.handle.backend), x.handle.backend) { + case .XLA: + let output_device = x.device + let a = Tensor(copying: a, to: .defaultTFEager) + let b = Tensor(copying: b, to: .defaultTFEager) + let x = Tensor(copying: x, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.betainc(a, b, x), to: output_device) + case .TF_EAGER: + return _RawTFEager.betainc(a, b, x) + } + + } + + /// Adds `bias` to `value`. + /// + /// This is a special case of `tf.add` where `bias` is restricted to be 1-D. + /// Broadcasting is supported, so `value` may have any number of dimensions. + /// + /// - Parameters: + /// - value: Any number of dimensions. + /// - bias: 1-D with size the last dimension of `value`. + /// + /// - Attr data_format: Specify the data format of the input and output data. With the + /// default format "NHWC", the bias tensor will be added to the last dimension + /// of the value tensor. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// The tensor will be added to "in_channels", the third-to-the-last + /// dimension. + /// + /// - Output output: Broadcasted sum of `value` and `bias`. + @inlinable @inline(__always) + public static func biasAdd( + value: Tensor, + bias: Tensor, + dataFormat: DataFormat = .nhwc + ) -> Tensor { + switch commonBackend(value.handle.backend, bias.handle.backend) { + case .XLA: + let output_device = bias.device + let value = Tensor(copying: value, to: .defaultTFEager) + let bias = Tensor(copying: bias, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.biasAdd(value: value, bias: bias, dataFormat: dataFormat), + to: output_device) + case .TF_EAGER: + return _RawTFEager.biasAdd(value: value, bias: bias, dataFormat: dataFormat) + } + + } + + /// The backward operation for "BiasAdd" on the "bias" tensor. + /// + /// It accumulates all the values from out_backprop into the feature dimension. + /// For NHWC data format, the feature dimension is the last. For NCHW data format, + /// the feature dimension is the third-to-last. + /// + /// - Parameter out_backprop: Any number of dimensions. + /// + /// - Attr data_format: Specify the data format of the input and output data. With the + /// default format "NHWC", the bias tensor will be added to the last dimension + /// of the value tensor. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// The tensor will be added to "in_channels", the third-to-the-last + /// dimension. + /// + /// - Output output: 1-D with size the feature dimension of `out_backprop`. + @inlinable @inline(__always) + public static func biasAddGrad( + outBackprop: Tensor, + dataFormat: DataFormat = .nhwc + ) -> Tensor { + switch outBackprop.handle.backend { + case .XLA: + let output_device = outBackprop.device + let outBackprop = Tensor(copying: outBackprop, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.biasAddGrad(outBackprop: outBackprop, dataFormat: dataFormat), + to: output_device) + case .TF_EAGER: + return _RawTFEager.biasAddGrad(outBackprop: outBackprop, dataFormat: dataFormat) + } + + } + + /// Adds `bias` to `value`. + /// + /// This is a deprecated version of BiasAdd and will be soon removed. + /// + /// This is a special case of `tf.add` where `bias` is restricted to be 1-D. + /// Broadcasting is supported, so `value` may have any number of dimensions. + /// + /// - Parameters: + /// - value: Any number of dimensions. + /// - bias: 1-D with size the last dimension of `value`. + /// + /// - Output output: Broadcasted sum of `value` and `bias`. + @inlinable @inline(__always) + public static func biasAddV1( + value: Tensor, + bias: Tensor + ) -> Tensor { + switch commonBackend(value.handle.backend, bias.handle.backend) { + case .XLA: + let output_device = bias.device + let value = Tensor(copying: value, to: .defaultTFEager) + let bias = Tensor(copying: bias, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.biasAddV1(value: value, bias: bias), to: output_device) + case .TF_EAGER: + return _RawTFEager.biasAddV1(value: value, bias: bias) + } + + } + + @inlinable @inline(__always) + public static func binary( + _ a: Tensor, + _ b: Tensor + ) -> Tensor { + switch commonBackend(a.handle.backend, b.handle.backend) { + case .XLA: + let output_device = b.device + let a = Tensor(copying: a, to: .defaultTFEager) + let b = Tensor(copying: b, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.binary(a, b), to: output_device) + case .TF_EAGER: + return _RawTFEager.binary(a, b) + } + + } + + /// Counts the number of occurrences of each value in an integer array. + /// + /// Outputs a vector with length `size` and the same dtype as `weights`. If + /// `weights` are empty, then index `i` stores the number of times the value `i` is + /// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of + /// the value in `weights` at each index where the corresponding value in `arr` is + /// `i`. + /// + /// Values in `arr` outside of the range [0, size) are ignored. + /// + /// - Parameters: + /// - arr: int32 `Tensor`. + /// - size: non-negative int32 scalar `Tensor`. + /// - weights: is an int32, int64, float32, or float64 `Tensor` with the same + /// shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights + /// equal to 1. + /// + /// - Output bins: 1D `Tensor` with length equal to `size`. The counts or summed weights for + /// each value in the range [0, size). + @inlinable @inline(__always) + public static func bincount( + arr: Tensor, + size: Tensor, + weights: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(arr.handle.backend, size.handle.backend), weights.handle.backend) + { + case .XLA: + let output_device = weights.device + let arr = Tensor(copying: arr, to: .defaultTFEager) + let size = Tensor(copying: size, to: .defaultTFEager) + let weights = Tensor(copying: weights, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.bincount(arr: arr, size: size, weights: weights), to: output_device) + case .TF_EAGER: + return _RawTFEager.bincount(arr: arr, size: size, weights: weights) + } + + } + + /// Bitcasts a tensor from one type to another without copying data. + /// + /// Given a tensor `input`, this operation returns a tensor that has the same buffer + /// data as `input` with datatype `type`. + /// + /// If the input datatype `T` is larger than the output datatype `type` then the + /// shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)]. + /// + /// If `T` is smaller than `type`, the operator requires that the rightmost + /// dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from + /// [..., sizeof(`type`)/sizeof(`T`)] to [...]. + /// + /// tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype + /// (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast() + /// gives module error. + /// For example, + /// + /// Example 1: + /// + /// >>> a = [1., 2., 3.] + /// >>> equality_bitcast = tf.bitcast(a, tf.complex128) + /// Traceback (most recent call last): + /// ... + /// InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast] + /// >>> equality_cast = tf.cast(a, tf.complex128) + /// >>> print(equality_cast) + /// tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128) + /// + /// Example 2: + /// + /// >>> tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8) + /// + /// + /// Example 3: + /// + /// >>> x = [1., 2., 3.] + /// >>> y = [0., 2., 3.] + /// >>> equality= tf.equal(x,y) + /// >>> equality_cast = tf.cast(equality,tf.float32) + /// >>> equality_bitcast = tf.bitcast(equality_cast,tf.uint8) + /// >>> print(equality) + /// tf.Tensor([False True True], shape=(3,), dtype=bool) + /// >>> print(equality_cast) + /// tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32) + /// >>> print(equality_bitcast) + /// tf.Tensor( + /// [[ 0 0 0 0] + /// [ 0 0 128 63] + /// [ 0 0 128 63]], shape=(3, 4), dtype=uint8) + /// + /// *NOTE*: Bitcast is implemented as a low-level cast, so machines with different + /// endian orderings will give different results. + @inlinable @inline(__always) + public static func bitcast< + T: TensorFlowNumeric, + Type: TensorFlowNumeric + >( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.bitcast(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.bitcast(input) + } + + } + + /// Elementwise computes the bitwise AND of `x` and `y`. + /// + /// The result will have those bits set, that are set in both `x` and `y`. The + /// computation is performed on the underlying representations of `x` and `y`. + /// + /// For example: + /// + /// ```python + /// import tensorflow as tf + /// from tensorflow.python.ops import bitwise_ops + /// dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, + /// tf.uint8, tf.uint16, tf.uint32, tf.uint64] + /// + /// for dtype in dtype_list: + /// lhs = tf.constant([0, 5, 3, 14], dtype=dtype) + /// rhs = tf.constant([5, 0, 7, 11], dtype=dtype) + /// exp = tf.constant([0, 0, 3, 10], dtype=tf.float32) + /// + /// res = bitwise_ops.bitwise_and(lhs, rhs) + /// tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE + /// ``` + /// + @inlinable @inline(__always) + public static func bitwiseAnd( + _ x: Tensor, + _ y: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + let output_device = y.device + let x = Tensor(copying: x, to: .defaultTFEager) + let y = Tensor(copying: y, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.bitwiseAnd(x, y), to: output_device) + case .TF_EAGER: + return _RawTFEager.bitwiseAnd(x, y) + } + + } + + /// Elementwise computes the bitwise OR of `x` and `y`. + /// + /// The result will have those bits set, that are set in `x`, `y` or both. The + /// computation is performed on the underlying representations of `x` and `y`. + /// + /// For example: + /// + /// ```python + /// import tensorflow as tf + /// from tensorflow.python.ops import bitwise_ops + /// dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, + /// tf.uint8, tf.uint16, tf.uint32, tf.uint64] + /// + /// for dtype in dtype_list: + /// lhs = tf.constant([0, 5, 3, 14], dtype=dtype) + /// rhs = tf.constant([5, 0, 7, 11], dtype=dtype) + /// exp = tf.constant([5, 5, 7, 15], dtype=tf.float32) + /// + /// res = bitwise_ops.bitwise_or(lhs, rhs) + /// tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE + /// ``` + /// + @inlinable @inline(__always) + public static func bitwiseOr( + _ x: Tensor, + _ y: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + let output_device = y.device + let x = Tensor(copying: x, to: .defaultTFEager) + let y = Tensor(copying: y, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.bitwiseOr(x, y), to: output_device) + case .TF_EAGER: + return _RawTFEager.bitwiseOr(x, y) + } + + } + + /// Elementwise computes the bitwise XOR of `x` and `y`. + /// + /// The result will have those bits set, that are different in `x` and `y`. The + /// computation is performed on the underlying representations of `x` and `y`. + /// + /// For example: + /// + /// ```python + /// import tensorflow as tf + /// from tensorflow.python.ops import bitwise_ops + /// dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, + /// tf.uint8, tf.uint16, tf.uint32, tf.uint64] + /// + /// for dtype in dtype_list: + /// lhs = tf.constant([0, 5, 3, 14], dtype=dtype) + /// rhs = tf.constant([5, 0, 7, 11], dtype=dtype) + /// exp = tf.constant([5, 5, 4, 5], dtype=tf.float32) + /// + /// res = bitwise_ops.bitwise_xor(lhs, rhs) + /// tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE + /// ``` + /// + @inlinable @inline(__always) + public static func bitwiseXor( + _ x: Tensor, + _ y: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + let output_device = y.device + let x = Tensor(copying: x, to: .defaultTFEager) + let y = Tensor(copying: y, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.bitwiseXor(x, y), to: output_device) + case .TF_EAGER: + return _RawTFEager.bitwiseXor(x, y) + } + + } + + /// Computes the LSTM cell forward propagation for all the time steps. + /// + /// This is equivalent to applying LSTMBlockCell in a loop, like so: + /// + /// ```python + /// for x1 in unpack(x): + /// i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock( + /// x1, cs_prev, h_prev, w, wci, wcf, wco, b) + /// cs_prev = cs1 + /// h_prev = h1 + /// i.append(i1) + /// cs.append(cs1) + /// f.append(f1) + /// o.append(o1) + /// ci.append(ci1) + /// co.append(co1) + /// h.append(h1) + /// return pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h) + /// ``` + /// + /// - Parameters: + /// - seq_len_max: Maximum time length actually used by this input. Outputs are padded + /// with zeros beyond this length. + /// - x: The sequence input to the LSTM, shape (timelen, batch_size, num_inputs). + /// - cs_prev: Value of the initial cell state. + /// - h_prev: Initial output of cell (to be used for peephole). + /// - w: The weight matrix. + /// - wci: The weight matrix for input gate peephole connection. + /// - wcf: The weight matrix for forget gate peephole connection. + /// - wco: The weight matrix for output gate peephole connection. + /// - b: The bias vector. + /// + /// - Attrs: + /// - forget_bias: The forget gate bias. + /// - cell_clip: Value to clip the 'cs' value to. + /// - use_peephole: Whether to use peephole weights. + /// + /// - Outputs: + /// - i: The input gate over the whole time sequence. + /// - cs: The cell state before the tanh over the whole time sequence. + /// - f: The forget gate over the whole time sequence. + /// - o: The output gate over the whole time sequence. + /// - ci: The cell input over the whole time sequence. + /// - co: The cell after the tanh over the whole time sequence. + /// - h: The output h vector over the whole time sequence. + @inlinable @inline(__always) + public static func blockLSTM( + seqLenMax: Tensor, + _ x: Tensor, + csPrev: Tensor, + hPrev: Tensor, + w: Tensor, + wci: Tensor, + wcf: Tensor, + wco: Tensor, + _ b: Tensor, + forgetBias: Double = 1, + cellClip: Double = 3, + usePeephole: Bool = false + ) -> ( + i: Tensor, cs: Tensor, f: Tensor, o: Tensor, ci: Tensor, co: Tensor, + h: Tensor + ) { + _RawTFEager.blockLSTM( + seqLenMax: seqLenMax, x, csPrev: csPrev, hPrev: hPrev, w: w, wci: wci, wcf: wcf, wco: wco, + b, forgetBias: forgetBias, cellClip: cellClip, usePeephole: usePeephole) + } + + /// Computes the LSTM cell backward propagation for the entire time sequence. + /// + /// This implementation is to be used in conjunction of LSTMBlock. + /// + /// - Parameters: + /// - seq_len_max: Maximum time length actually used by this input. Outputs are padded + /// with zeros beyond this length. + /// - x: The sequence input to the LSTM, shape (timelen, batch_size, num_inputs). + /// - cs_prev: Value of the initial cell state. + /// - h_prev: Initial output of cell (to be used for peephole). + /// - w: The weight matrix. + /// - wci: The weight matrix for input gate peephole connection. + /// - wcf: The weight matrix for forget gate peephole connection. + /// - wco: The weight matrix for output gate peephole connection. + /// - b: The bias vector. + /// - i: The input gate over the whole time sequence. + /// - cs: The cell state before the tanh over the whole time sequence. + /// - f: The forget gate over the whole time sequence. + /// - o: The output gate over the whole time sequence. + /// - ci: The cell input over the whole time sequence. + /// - co: The cell after the tanh over the whole time sequence. + /// - h: The output h vector over the whole time sequence. + /// - cs_grad: The current gradient of cs. + /// - h_grad: The gradient of h vector. + /// + /// - Attr use_peephole: Whether to use peephole weights. + /// + /// - Outputs: + /// - x_grad: The gradient of x to be back-propped. + /// - cs_prev_grad: The gradient of cs_prev to be back-propped. + /// - h_prev_grad: The gradient of h_prev to be back-propped. + /// - w_grad: The gradient for w to be back-propped. + /// - wci_grad: The gradient for wci to be back-propped. + /// - wcf_grad: The gradient for wcf to be back-propped. + /// - wco_grad: The gradient for wco to be back-propped. + /// - b_grad: The gradient for w to be back-propped. + @inlinable @inline(__always) + public static func blockLSTMGrad( + seqLenMax: Tensor, + _ x: Tensor, + csPrev: Tensor, + hPrev: Tensor, + w: Tensor, + wci: Tensor, + wcf: Tensor, + wco: Tensor, + _ b: Tensor, + i: Tensor, + cs: Tensor, + f: Tensor, + o: Tensor, + ci: Tensor, + co: Tensor, + h: Tensor, + csGrad: Tensor, + hGrad: Tensor, + usePeephole: Bool + ) -> ( + xGrad: Tensor, csPrevGrad: Tensor, hPrevGrad: Tensor, wGrad: Tensor, + wciGrad: Tensor, wcfGrad: Tensor, wcoGrad: Tensor, bGrad: Tensor + ) { + _RawTFEager.blockLSTMGrad( + seqLenMax: seqLenMax, x, csPrev: csPrev, hPrev: hPrev, w: w, wci: wci, wcf: wcf, wco: wco, + b, i: i, cs: cs, f: f, o: o, ci: ci, co: co, h: h, csGrad: csGrad, hGrad: hGrad, + usePeephole: usePeephole) + } + + /// Computes the LSTM cell backward propagation for the entire time sequence. + /// + /// This implementation is to be used in conjunction of BlockLSTMV2. + /// + /// - Parameters: + /// - seq_len_max: Maximum time length actually used by this input. Outputs are padded + /// with zeros beyond this length. + /// - x: The sequence input to the LSTM, shape (timelen, batch_size, num_inputs). + /// - cs_prev: Value of the initial cell state. + /// - h_prev: Initial output of cell (to be used for peephole). + /// - w: The weight matrix. + /// - wci: The weight matrix for input gate peephole connection. + /// - wcf: The weight matrix for forget gate peephole connection. + /// - wco: The weight matrix for output gate peephole connection. + /// - b: The bias vector. + /// - i: The input gate over the whole time sequence. + /// - cs: The cell state before the tanh over the whole time sequence. + /// - f: The forget gate over the whole time sequence. + /// - o: The output gate over the whole time sequence. + /// - ci: The cell input over the whole time sequence. + /// - co: The cell after the tanh over the whole time sequence. + /// - h: The output h vector over the whole time sequence. + /// - cs_grad: The current gradient of cs. + /// - h_grad: The gradient of h vector. + /// + /// - Attr use_peephole: Whether to use peephole weights. + /// + /// - Outputs: + /// - x_grad: The gradient of x to be back-propped. + /// - cs_prev_grad: The gradient of cs_prev to be back-propped. + /// - h_prev_grad: The gradient of h_prev to be back-propped. + /// - w_grad: The gradient for w to be back-propped. + /// - wci_grad: The gradient for wci to be back-propped. + /// - wcf_grad: The gradient for wcf to be back-propped. + /// - wco_grad: The gradient for wco to be back-propped. + /// - b_grad: The gradient for w to be back-propped. + @inlinable @inline(__always) + public static func blockLSTMGradV2( + seqLenMax: Tensor, + _ x: Tensor, + csPrev: Tensor, + hPrev: Tensor, + w: Tensor, + wci: Tensor, + wcf: Tensor, + wco: Tensor, + _ b: Tensor, + i: Tensor, + cs: Tensor, + f: Tensor, + o: Tensor, + ci: Tensor, + co: Tensor, + h: Tensor, + csGrad: Tensor, + hGrad: Tensor, + usePeephole: Bool + ) -> ( + xGrad: Tensor, csPrevGrad: Tensor, hPrevGrad: Tensor, wGrad: Tensor, + wciGrad: Tensor, wcfGrad: Tensor, wcoGrad: Tensor, bGrad: Tensor + ) { + _RawTFEager.blockLSTMGradV2( + seqLenMax: seqLenMax, x, csPrev: csPrev, hPrev: hPrev, w: w, wci: wci, wcf: wcf, wco: wco, + b, i: i, cs: cs, f: f, o: o, ci: ci, co: co, h: h, csGrad: csGrad, hGrad: hGrad, + usePeephole: usePeephole) + } + + /// Computes the LSTM cell forward propagation for all the time steps. + /// + /// This is equivalent to applying LSTMBlockCell in a loop, like so: + /// + /// ```python + /// for x1 in unpack(x): + /// i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock( + /// x1, cs_prev, h_prev, w, wci, wcf, wco, b) + /// cs_prev = cs1 + /// h_prev = h1 + /// i.append(i1) + /// cs.append(cs1) + /// f.append(f1) + /// o.append(o1) + /// ci.append(ci1) + /// co.append(co1) + /// h.append(h1) + /// return pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h) + /// + /// Note that unlike LSTMBlockCell (and BlockLSTM) which uses ICFO gate layout, + /// this op uses IFCO. So in order for the following snippet to be equivalent + /// all gate-related outputs should be reordered. + /// ``` + /// + /// - Parameters: + /// - seq_len_max: Maximum time length actually used by this input. Outputs are padded + /// with zeros beyond this length. + /// - x: The sequence input to the LSTM, shape (timelen, batch_size, num_inputs). + /// - cs_prev: Value of the initial cell state. + /// - h_prev: Initial output of cell (to be used for peephole). + /// - w: The weight matrix. + /// - wci: The weight matrix for input gate peephole connection. + /// - wcf: The weight matrix for forget gate peephole connection. + /// - wco: The weight matrix for output gate peephole connection. + /// - b: The bias vector. + /// + /// - Attrs: + /// - cell_clip: Value to clip the 'cs' value to. + /// - use_peephole: Whether to use peephole weights. + /// + /// - Outputs: + /// - i: The input gate over the whole time sequence. + /// - cs: The cell state before the tanh over the whole time sequence. + /// - f: The forget gate over the whole time sequence. + /// - o: The output gate over the whole time sequence. + /// - ci: The cell input over the whole time sequence. + /// - co: The cell after the tanh over the whole time sequence. + /// - h: The output h vector over the whole time sequence. + @inlinable @inline(__always) + public static func blockLSTMV2( + seqLenMax: Tensor, + _ x: Tensor, + csPrev: Tensor, + hPrev: Tensor, + w: Tensor, + wci: Tensor, + wcf: Tensor, + wco: Tensor, + _ b: Tensor, + cellClip: Double = 0, + usePeephole: Bool = false + ) -> ( + i: Tensor, cs: Tensor, f: Tensor, o: Tensor, ci: Tensor, co: Tensor, + h: Tensor + ) { + _RawTFEager.blockLSTMV2( + seqLenMax: seqLenMax, x, csPrev: csPrev, hPrev: hPrev, w: w, wci: wci, wcf: wcf, wco: wco, + b, cellClip: cellClip, usePeephole: usePeephole) + } + + /// Aggregates the summary of accumulated stats for the batch. + /// + /// The summary stats contains gradients and hessians accumulated for each node, feature dimension id and bucket. + /// + /// - Parameters: + /// - node_ids: int32; Rank 1 Tensor containing node ids for each example, shape [batch_size]. + /// - gradients: float32; Rank 2 Tensor (shape=[batch_size, logits_dimension]) with gradients for each example. + /// - hessians: float32; Rank 2 Tensor (shape=[batch_size, hessian_dimension]) with hessians for each example. + /// - feature: int32; Rank 2 feature Tensors (shape=[batch_size, feature_dimension]). + /// + /// - Attrs: + /// - max_splits: int; the maximum number of splits possible in the whole tree. + /// - num_buckets: int; equals to the maximum possible value of bucketized feature. + /// + /// - Output stats_summary: output Rank 4 Tensor (shape=[splits, feature_dimension, buckets, logits_dimension + hessian_dimension]) + /// containing accumulated stats for each node, feature dimension and bucket. + @inlinable @inline(__always) + public static func boostedTreesAggregateStats( + nodeIds: Tensor, + gradients: Tensor, + hessians: Tensor, + feature: Tensor, + maxSplits: Int64, + numBuckets: Int64 + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(nodeIds.handle.backend, gradients.handle.backend), hessians.handle.backend), + feature.handle.backend) + { + case .XLA: + let output_device = feature.device + let nodeIds = Tensor(copying: nodeIds, to: .defaultTFEager) + let gradients = Tensor(copying: gradients, to: .defaultTFEager) + let hessians = Tensor(copying: hessians, to: .defaultTFEager) + let feature = Tensor(copying: feature, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.boostedTreesAggregateStats( + nodeIds: nodeIds, gradients: gradients, hessians: hessians, feature: feature, + maxSplits: maxSplits, numBuckets: numBuckets), to: output_device) + case .TF_EAGER: + return _RawTFEager.boostedTreesAggregateStats( + nodeIds: nodeIds, gradients: gradients, hessians: hessians, feature: feature, + maxSplits: maxSplits, numBuckets: numBuckets) + } + + } + + /// Bucketize each feature based on bucket boundaries. + /// + /// An op that returns a list of float tensors, where each tensor represents the + /// bucketized values for a single feature. + /// + /// - Parameters: + /// - float_values: float; List of Rank 1 Tensor each containing float values for a single feature. + /// - bucket_boundaries: float; List of Rank 1 Tensors each containing the bucket boundaries for a single + /// feature. + /// + /// - Attr num_features: inferred int; number of features. + /// + /// - Output buckets: int; List of Rank 1 Tensors each containing the bucketized values for a single feature. + @inlinable @inline(__always) + public static func boostedTreesBucketize( + floatValues: [Tensor], + bucketBoundaries: [Tensor] + ) -> [Tensor] { + _RawTFEager.boostedTreesBucketize( + floatValues: floatValues, bucketBoundaries: bucketBoundaries) + } + + /// Calculates gains for each feature and returns the best possible split information for the feature. + /// + /// The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature. + /// + /// It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split. + /// + /// In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features). + /// + /// The output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature. + /// + /// - Parameters: + /// - node_id_range: A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive). + /// - stats_summary: A Rank 4 tensor (#shape=[max_splits, feature_dims, bucket, stats_dims]) for accumulated stats summary (gradient/hessian) per node, per dimension, per buckets for each feature. + /// The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used. + /// - l1: l1 regularization factor on leaf weights, per instance based. + /// - l2: l2 regularization factor on leaf weights, per instance based. + /// - tree_complexity: adjustment to the gain, per leaf based. + /// - min_node_weight: minimum avg of hessians in a node before required for the node to be considered for splitting. + /// + /// - Attrs: + /// - logits_dimension: The dimension of logit, i.e., number of classes. + /// - split_type: A string indicating if this Op should perform inequality split or equality split. + /// + /// - Outputs: + /// - node_ids: A Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes. + /// - gains: A Rank 1 tensors indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes. + /// - feature_dimensions: A Rank 1 tensors indicating the best feature dimension for each feature to split for certain nodes if the feature is multi-dimension. See above for details like shapes and sizes. + /// - thresholds: A Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes. + /// - left_node_contribs: A Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes. + /// - right_node_contribs: A Rank 2 tensors, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node. + /// - split_with_default_directions: A Rank 1 tensors indicating the which direction to go if data is missing. See above for details like shapes and sizes. + /// Inequality with default left returns 0, inequality with default right returns 1, equality with default right returns 2. + @inlinable @inline(__always) + public static func boostedTreesCalculateBestFeatureSplit( + nodeIdRange: Tensor, + statsSummary: Tensor, + l1: Tensor, + l2: Tensor, + treeComplexity: Tensor, + minNodeWeight: Tensor, + logitsDimension: Int64, + splitType: SplitType = .inequality + ) -> ( + nodeIds: Tensor, gains: Tensor, featureDimensions: Tensor, + thresholds: Tensor, leftNodeContribs: Tensor, rightNodeContribs: Tensor, + splitWithDefaultDirections: StringTensor + ) { + _RawTFEager.boostedTreesCalculateBestFeatureSplit( + nodeIdRange: nodeIdRange, statsSummary: statsSummary, l1: l1, l2: l2, + treeComplexity: treeComplexity, minNodeWeight: minNodeWeight, + logitsDimension: logitsDimension, splitType: splitType) + } + + /// Calculates gains for each feature and returns the best possible split information for the feature. + /// + /// The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature. + /// + /// It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split. + /// + /// In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features). + /// + /// The length of output lists are all of the same length, `num_features`. + /// The output shapes are compatible in a way that the first dimension of all tensors of all lists are the same and equal to the number of possible split nodes for each feature. + /// + /// - Parameters: + /// - node_id_range: A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive). + /// - stats_summary_list: A list of Rank 3 tensor (#shape=[max_splits, bucket, 2]) for accumulated stats summary (gradient/hessian) per node per buckets for each feature. The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used. + /// - l1: l1 regularization factor on leaf weights, per instance based. + /// - l2: l2 regularization factor on leaf weights, per instance based. + /// - tree_complexity: adjustment to the gain, per leaf based. + /// - min_node_weight: minimum avg of hessians in a node before required for the node to be considered for splitting. + /// + /// - Attrs: + /// - max_splits: the number of nodes that can be split in the whole tree. Used as a dimension of output tensors. + /// - num_features: inferred from the size of `stats_summary_list`; the number of total features. + /// + /// - Outputs: + /// - node_ids_list: An output list of Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes. + /// - gains_list: An output list of Rank 1 tensors indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes. + /// - thresholds_list: An output list of Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes. + /// - left_node_contribs_list: A list of Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes. + /// - right_node_contribs_list: A list of Rank 2 tensors, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node. + @inlinable @inline(__always) + public static func boostedTreesCalculateBestGainsPerFeature( + nodeIdRange: Tensor, + statsSummaryList: [Tensor], + l1: Tensor, + l2: Tensor, + treeComplexity: Tensor, + minNodeWeight: Tensor, + maxSplits: Int64 + ) -> ( + nodeIdsList: [Tensor], gainsList: [Tensor], thresholdsList: [Tensor], + leftNodeContribsList: [Tensor], rightNodeContribsList: [Tensor] + ) { + _RawTFEager.boostedTreesCalculateBestGainsPerFeature( + nodeIdRange: nodeIdRange, statsSummaryList: statsSummaryList, l1: l1, l2: l2, + treeComplexity: treeComplexity, minNodeWeight: minNodeWeight, maxSplits: maxSplits) + } + + /// Calculates the prior from the training data (the bias) and fills in the first node with the logits' prior. Returns a boolean indicating whether to continue centering. + /// + /// - Parameters: + /// - tree_ensemble_handle: Handle to the tree ensemble. + /// - mean_gradients: A tensor with shape=[logits_dimension] with mean of gradients for a first node. + /// - mean_hessians: A tensor with shape=[logits_dimension] mean of hessians for a first node. + /// - l1: l1 regularization factor on leaf weights, per instance based. + /// - l2: l2 regularization factor on leaf weights, per instance based. + /// + /// - Output continue_centering: Bool, whether to continue bias centering. + @inlinable @inline(__always) + public static func boostedTreesCenterBias( + treeEnsembleHandle: ResourceHandle, + meanGradients: Tensor, + meanHessians: Tensor, + l1: Tensor, + l2: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(meanGradients.handle.backend, meanHessians.handle.backend), + l1.handle.backend), l2.handle.backend) + { + case .XLA: + let output_device = l2.device + let meanGradients = Tensor(copying: meanGradients, to: .defaultTFEager) + let meanHessians = Tensor(copying: meanHessians, to: .defaultTFEager) + let l1 = Tensor(copying: l1, to: .defaultTFEager) + let l2 = Tensor(copying: l2, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.boostedTreesCenterBias( + treeEnsembleHandle: treeEnsembleHandle, meanGradients: meanGradients, + meanHessians: meanHessians, l1: l1, l2: l2), to: output_device) + case .TF_EAGER: + return _RawTFEager.boostedTreesCenterBias( + treeEnsembleHandle: treeEnsembleHandle, meanGradients: meanGradients, + meanHessians: meanHessians, l1: l1, l2: l2) + } + + } + + /// Creates a tree ensemble model and returns a handle to it. + /// + /// - Parameters: + /// - tree_ensemble_handle: Handle to the tree ensemble resource to be created. + /// - stamp_token: Token to use as the initial value of the resource stamp. + /// - tree_ensemble_serialized: Serialized proto of the tree ensemble. + @inlinable @inline(__always) + public static func boostedTreesCreateEnsemble( + treeEnsembleHandle: ResourceHandle, + stampToken: Tensor, + treeEnsembleSerialized: StringTensor + ) { + _RawTFEager.boostedTreesCreateEnsemble( + treeEnsembleHandle: treeEnsembleHandle, stampToken: stampToken, + treeEnsembleSerialized: treeEnsembleSerialized) + } + + /// Create the Resource for Quantile Streams. + /// + /// - Parameters: + /// - quantile_stream_resource_handle: resource; Handle to quantile stream resource. + /// - epsilon: float; The required approximation error of the stream resource. + /// - num_streams: int; The number of streams managed by the resource that shares the same epsilon. + /// + /// - Attr max_elements: int; The maximum number of data points that can be fed to the stream. + @inlinable @inline(__always) + public static func boostedTreesCreateQuantileStreamResource( + quantileStreamResourceHandle: ResourceHandle, + epsilon: Tensor, + numStreams: Tensor, + maxElements: Int64 = 1_099_511_627_776 + ) { + _RawTFEager.boostedTreesCreateQuantileStreamResource( + quantileStreamResourceHandle: quantileStreamResourceHandle, epsilon: epsilon, + numStreams: numStreams, maxElements: maxElements) + } + + /// Deserializes a serialized tree ensemble config and replaces current tree + /// + /// ensemble. + /// + /// - Parameters: + /// - tree_ensemble_handle: Handle to the tree ensemble. + /// - stamp_token: Token to use as the new value of the resource stamp. + /// - tree_ensemble_serialized: Serialized proto of the ensemble. + @inlinable @inline(__always) + public static func boostedTreesDeserializeEnsemble( + treeEnsembleHandle: ResourceHandle, + stampToken: Tensor, + treeEnsembleSerialized: StringTensor + ) { + _RawTFEager.boostedTreesDeserializeEnsemble( + treeEnsembleHandle: treeEnsembleHandle, stampToken: stampToken, + treeEnsembleSerialized: treeEnsembleSerialized) + } + + /// Creates a handle to a BoostedTreesEnsembleResource + @inlinable @inline(__always) + public static func boostedTreesEnsembleResourceHandleOp( + container: String, + sharedName: String + ) -> ResourceHandle { + _RawTFEager.boostedTreesEnsembleResourceHandleOp(container: container, sharedName: sharedName) + } + + /// Debugging/model interpretability outputs for each example. + /// + /// It traverses all the trees and computes debug metrics for individual examples, + /// such as getting split feature ids and logits after each split along the decision + /// path used to compute directional feature contributions. + /// + /// - Parameter bucketized_features: A list of rank 1 Tensors containing bucket id for each + /// feature. + /// + /// - Attrs: + /// - num_bucketized_features: Inferred. + /// - logits_dimension: scalar, dimension of the logits, to be used for constructing the protos in + /// examples_debug_outputs_serialized. + /// + /// - Output examples_debug_outputs_serialized: Output rank 1 Tensor containing a proto serialized as a string for each example. + @inlinable @inline(__always) + public static func boostedTreesExampleDebugOutputs( + treeEnsembleHandle: ResourceHandle, + bucketizedFeatures: [Tensor], + logitsDimension: Int64 + ) -> StringTensor { + _RawTFEager.boostedTreesExampleDebugOutputs( + treeEnsembleHandle: treeEnsembleHandle, bucketizedFeatures: bucketizedFeatures, + logitsDimension: logitsDimension) + } + + /// Flush the quantile summaries from each quantile stream resource. + /// + /// An op that outputs a list of quantile summaries of a quantile stream resource. + /// Each summary Tensor is rank 2, containing summaries (value, weight, min_rank, + /// max_rank) for a single feature. + /// + /// - Parameter quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource. + @inlinable @inline(__always) + public static func boostedTreesFlushQuantileSummaries( + quantileStreamResourceHandle: ResourceHandle, + numFeatures: Int64 + ) -> [Tensor] { + _RawTFEager.boostedTreesFlushQuantileSummaries( + quantileStreamResourceHandle: quantileStreamResourceHandle, numFeatures: numFeatures) + } + + /// Retrieves the tree ensemble resource stamp token, number of trees and growing statistics. + /// + /// - Parameter tree_ensemble_handle: Handle to the tree ensemble. + /// + /// - Outputs: + /// - stamp_token: Stamp token of the tree ensemble resource. + /// - num_trees: The number of trees in the tree ensemble resource. + /// - num_finalized_trees: The number of trees that were finished successfully. + /// - num_attempted_layers: The number of layers we attempted to build (but not necessarily succeeded). + /// - last_layer_nodes_range: Rank size 2 tensor that contains start and end ids of the nodes in the latest + /// layer. + @inlinable @inline(__always) + public static func boostedTreesGetEnsembleStates( + treeEnsembleHandle: ResourceHandle + ) -> ( + stampToken: Tensor, numTrees: Tensor, numFinalizedTrees: Tensor, + numAttemptedLayers: Tensor, lastLayerNodesRange: Tensor + ) { + _RawTFEager.boostedTreesGetEnsembleStates(treeEnsembleHandle: treeEnsembleHandle) + } + + /// Makes the summary of quantiles for the batch. + /// + /// An op that takes a list of tensors (one tensor per feature) and outputs the + /// quantile summaries for each tensor. + /// + /// - Parameters: + /// - float_values: float; List of Rank 1 Tensors each containing values for a single feature. + /// - example_weights: float; Rank 1 Tensor with weights per instance. + /// - epsilon: float; The required maximum approximation error. + /// + /// - Attr num_features: int; Inferred from the size of float_values. + /// The number of float features. + /// + /// - Output summaries: float; List of Rank 2 Tensors each containing the quantile summary + /// (value, weight, min_rank, max_rank) of a single feature. + @inlinable @inline(__always) + public static func boostedTreesMakeQuantileSummaries( + floatValues: [Tensor], + exampleWeights: Tensor, + epsilon: Tensor + ) -> [Tensor] { + switch commonBackend( + commonBackend(commonBackend(floatValues), exampleWeights.handle.backend), + epsilon.handle.backend) + { + case .XLA: + let output_device = epsilon.device + let floatValues = [Tensor](copying: floatValues, to: .defaultTFEager) + let exampleWeights = Tensor(copying: exampleWeights, to: .defaultTFEager) + let epsilon = Tensor(copying: epsilon, to: .defaultTFEager) + return [Tensor]( + copying: _RawTFEager.boostedTreesMakeQuantileSummaries( + floatValues: floatValues, exampleWeights: exampleWeights, epsilon: epsilon), + to: output_device) + case .TF_EAGER: + return _RawTFEager.boostedTreesMakeQuantileSummaries( + floatValues: floatValues, exampleWeights: exampleWeights, epsilon: epsilon) + } + + } + + /// Makes the summary of accumulated stats for the batch. + /// + /// The summary stats contains gradients and hessians accumulated into the corresponding node and bucket for each example. + /// + /// - Parameters: + /// - node_ids: int32 Rank 1 Tensor containing node ids, which each example falls into for the requested layer. + /// - gradients: float32; Rank 2 Tensor (shape=[#examples, 1]) for gradients. + /// - hessians: float32; Rank 2 Tensor (shape=[#examples, 1]) for hessians. + /// - bucketized_features_list: int32 list of Rank 1 Tensors, each containing the bucketized feature (for each feature column). + /// + /// - Attrs: + /// - max_splits: int; the maximum number of splits possible in the whole tree. + /// - num_buckets: int; equals to the maximum possible value of bucketized feature. + /// - num_features: int; inferred from the size of bucketized_features_list; the number of features. + /// + /// - Output stats_summary: output Rank 4 Tensor (shape=[#features, #splits, #buckets, 2]) containing accumulated stats put into the corresponding node and bucket. The first index of 4th dimension refers to gradients, and the second to hessians. + @inlinable @inline(__always) + public static func boostedTreesMakeStatsSummary( + nodeIds: Tensor, + gradients: Tensor, + hessians: Tensor, + bucketizedFeaturesList: [Tensor], + maxSplits: Int64, + numBuckets: Int64 + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(nodeIds.handle.backend, gradients.handle.backend), hessians.handle.backend), + commonBackend(bucketizedFeaturesList)) + { + case .XLA: + let output_device = hessians.device + let nodeIds = Tensor(copying: nodeIds, to: .defaultTFEager) + let gradients = Tensor(copying: gradients, to: .defaultTFEager) + let hessians = Tensor(copying: hessians, to: .defaultTFEager) + let bucketizedFeaturesList = [Tensor]( + copying: bucketizedFeaturesList, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.boostedTreesMakeStatsSummary( + nodeIds: nodeIds, gradients: gradients, hessians: hessians, + bucketizedFeaturesList: bucketizedFeaturesList, maxSplits: maxSplits, + numBuckets: numBuckets), to: output_device) + case .TF_EAGER: + return _RawTFEager.boostedTreesMakeStatsSummary( + nodeIds: nodeIds, gradients: gradients, hessians: hessians, + bucketizedFeaturesList: bucketizedFeaturesList, maxSplits: maxSplits, + numBuckets: numBuckets) + } + + } + + /// Runs multiple additive regression ensemble predictors on input instances and + /// + /// computes the logits. It is designed to be used during prediction. + /// It traverses all the trees and calculates the final score for each instance. + /// + /// - Parameter bucketized_features: A list of rank 1 Tensors containing bucket id for each + /// feature. + /// + /// - Attrs: + /// - num_bucketized_features: Inferred. + /// - logits_dimension: scalar, dimension of the logits, to be used for partial logits + /// shape. + /// + /// - Output logits: Output rank 2 Tensor containing logits for each example. + @inlinable @inline(__always) + public static func boostedTreesPredict( + treeEnsembleHandle: ResourceHandle, + bucketizedFeatures: [Tensor], + logitsDimension: Int64 + ) -> Tensor { + _RawTFEager.boostedTreesPredict( + treeEnsembleHandle: treeEnsembleHandle, bucketizedFeatures: bucketizedFeatures, + logitsDimension: logitsDimension) + } + + /// Add the quantile summaries to each quantile stream resource. + /// + /// An op that adds a list of quantile summaries to a quantile stream resource. Each + /// summary Tensor is rank 2, containing summaries (value, weight, min_rank, max_rank) + /// for a single feature. + /// + /// - Parameters: + /// - quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource. + /// - summaries: string; List of Rank 2 Tensor each containing the summaries for a single feature. + @inlinable @inline(__always) + public static func boostedTreesQuantileStreamResourceAddSummaries( + quantileStreamResourceHandle: ResourceHandle, + summaries: [Tensor] + ) { + _RawTFEager.boostedTreesQuantileStreamResourceAddSummaries( + quantileStreamResourceHandle: quantileStreamResourceHandle, summaries: summaries) + } + + /// Deserialize bucket boundaries and ready flag into current QuantileAccumulator. + /// + /// An op that deserializes bucket boundaries and are boundaries ready flag into current QuantileAccumulator. + /// + /// - Parameters: + /// - quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource. + /// - bucket_boundaries: float; List of Rank 1 Tensors each containing the bucket boundaries for a feature. + /// + /// - Attr num_streams: inferred int; number of features to get bucket boundaries for. + @inlinable @inline(__always) + public static func boostedTreesQuantileStreamResourceDeserialize( + quantileStreamResourceHandle: ResourceHandle, + bucketBoundaries: [Tensor] + ) { + _RawTFEager.boostedTreesQuantileStreamResourceDeserialize( + quantileStreamResourceHandle: quantileStreamResourceHandle, + bucketBoundaries: bucketBoundaries) + } + + /// Flush the summaries for a quantile stream resource. + /// + /// An op that flushes the summaries for a quantile stream resource. + /// + /// - Parameters: + /// - quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource. + /// - num_buckets: int; approximate number of buckets unless using generate_quantiles. + /// + /// - Attr generate_quantiles: bool; If True, the output will be the num_quantiles for each stream where the ith + /// entry is the ith quantile of the input with an approximation error of epsilon. + /// Duplicate values may be present. + /// If False, the output will be the points in the histogram that we got which roughly + /// translates to 1/epsilon boundaries and without any duplicates. + /// Default to False. + @inlinable @inline(__always) + public static func boostedTreesQuantileStreamResourceFlush( + quantileStreamResourceHandle: ResourceHandle, + numBuckets: Tensor, + generateQuantiles: Bool = false + ) { + _RawTFEager.boostedTreesQuantileStreamResourceFlush( + quantileStreamResourceHandle: quantileStreamResourceHandle, numBuckets: numBuckets, + generateQuantiles: generateQuantiles) + } + + /// Generate the bucket boundaries for each feature based on accumulated summaries. + /// + /// An op that returns a list of float tensors for a quantile stream resource. Each + /// tensor is Rank 1 containing bucket boundaries for a single feature. + /// + /// - Parameter quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource. + /// + /// - Attr num_features: inferred int; number of features to get bucket boundaries for. + /// + /// - Output bucket_boundaries: float; List of Rank 1 Tensors each containing the bucket boundaries for a feature. + @inlinable @inline(__always) + public static func boostedTreesQuantileStreamResourceGetBucketBoundaries( + quantileStreamResourceHandle: ResourceHandle, + numFeatures: Int64 + ) -> [Tensor] { + _RawTFEager.boostedTreesQuantileStreamResourceGetBucketBoundaries( + quantileStreamResourceHandle: quantileStreamResourceHandle, numFeatures: numFeatures) + } + + /// Creates a handle to a BoostedTreesQuantileStreamResource. + @inlinable @inline(__always) + public static func boostedTreesQuantileStreamResourceHandleOp( + container: String, + sharedName: String + ) -> ResourceHandle { + _RawTFEager.boostedTreesQuantileStreamResourceHandleOp( + container: container, sharedName: sharedName) + } + + /// Serializes the tree ensemble to a proto. + /// + /// - Parameter tree_ensemble_handle: Handle to the tree ensemble. + /// + /// - Outputs: + /// - stamp_token: Stamp token of the tree ensemble resource. + /// - tree_ensemble_serialized: Serialized proto of the ensemble. + @inlinable @inline(__always) + public static func boostedTreesSerializeEnsemble( + treeEnsembleHandle: ResourceHandle + ) -> (stampToken: Tensor, treeEnsembleSerialized: StringTensor) { + _RawTFEager.boostedTreesSerializeEnsemble(treeEnsembleHandle: treeEnsembleHandle) + } + + /// Aggregates the summary of accumulated stats for the batch. + /// + /// The summary stats contains gradients and hessians accumulated for each node, bucket and dimension id. + /// + /// - Parameters: + /// - node_ids: int32; Rank 1 Tensor containing node ids for each example, shape [batch_size]. + /// - gradients: float32; Rank 2 Tensor (shape=[batch_size, logits_dimension]) with gradients for each example. + /// - hessians: float32; Rank 2 Tensor (shape=[batch_size, hessian_dimension]) with hessians for each example. + /// - feature_indices: int32; Rank 2 indices of feature sparse Tensors (shape=[number of sparse entries, 2]). + /// Number of sparse entries across all instances from the batch. The first value is + /// the index of the instance, the second is dimension of the feature. The second axis + /// can only have 2 values, i.e., the input dense version of Tensor can only be matrix. + /// - feature_values: int32; Rank 1 values of feature sparse Tensors (shape=[number of sparse entries]). + /// Number of sparse entries across all instances from the batch. The first value is + /// the index of the instance, the second is dimension of the feature. + /// - feature_shape: int32; Rank 1 dense shape of feature sparse Tensors (shape=[2]). + /// The first axis can only have 2 values, [batch_size, feature_dimension]. + /// + /// - Attrs: + /// - max_splits: int; the maximum number of splits possible in the whole tree. + /// - num_buckets: int; equals to the maximum possible value of bucketized feature + 1. + /// + /// - Outputs: + /// - stats_summary_indices: int32; Rank 2 indices of summary sparse Tensors (shape=[number of non zero statistics, 4]) + /// The second axis can only be 4 including node id, feature dimension, bucket id, and statistics_dimension. + /// statistics_dimension = logits_dimension + hessian_dimension. + /// - stats_summary_values: output Rank 1 Tensor (shape=[number of non zero statistics]) + /// - stats_summary_shape: output Rank 1 Tensor (shape=[4]) + /// The tensor has following 4 values: [max_splits, feature_dimension, num_buckets, statistics_dimension], + /// where statistics_dimension = gradient_dimension + hessian_dimension. gradient_dimension + /// is the same as label_dimension, i.e., the output space. hessian_dimension can be the same + /// as logits dimension when diagonal hessian is used, or label_dimension^2 when full + /// hessian is used. + @inlinable @inline(__always) + public static func boostedTreesSparseAggregateStats( + nodeIds: Tensor, + gradients: Tensor, + hessians: Tensor, + featureIndices: Tensor, + featureValues: Tensor, + featureShape: Tensor, + maxSplits: Int64, + numBuckets: Int64 + ) -> ( + statsSummaryIndices: Tensor, statsSummaryValues: Tensor, + statsSummaryShape: Tensor + ) { + _RawTFEager.boostedTreesSparseAggregateStats( + nodeIds: nodeIds, gradients: gradients, hessians: hessians, featureIndices: featureIndices, + featureValues: featureValues, featureShape: featureShape, maxSplits: maxSplits, + numBuckets: numBuckets) + } + + /// Calculates gains for each feature and returns the best possible split information for the feature. + /// + /// The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature. + /// + /// It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split. + /// + /// In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features). + /// + /// The output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature. + /// + /// - Parameters: + /// - node_id_range: A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive). + /// - stats_summary_indices: A Rank 2 int64 tensor of dense shape [N, 4] (N specifies the number of non-zero values) for accumulated stats summary (gradient/hessian) per node per bucket for each feature. The second dimension contains node id, feature dimension, bucket id, and stats dim. + /// stats dim is the sum of logits dimension and hessian dimension, hessian dimension can either be logits dimension if diagonal hessian is used, or logits dimension^2 if full hessian is used. + /// - stats_summary_values: A Rank 1 float tensor of dense shape [N] (N specifies the number of non-zero values), which supplies the values for each element in summary_indices. + /// - stats_summary_shape: A Rank 1 float tensor of dense shape [4], which specifies the dense shape of the sparse tensor, which is [num tree nodes, feature dimensions, num buckets, stats dim]. + /// - l1: l1 regularization factor on leaf weights, per instance based. + /// - l2: l2 regularization factor on leaf weights, per instance based. + /// - tree_complexity: adjustment to the gain, per leaf based. + /// - min_node_weight: minimum avg of hessians in a node before required for the node to be considered for splitting. + /// + /// - Attrs: + /// - logits_dimension: The dimension of logit, i.e., number of classes. + /// - split_type: A string indicating if this Op should perform inequality split or equality split. + /// + /// - Outputs: + /// - node_ids: A Rank 1 tensor indicating possible node ids that can be split. + /// - gains: A Rank 1 tensor indicating the best gains to split each node. + /// - feature_dimensions: A Rank 1 tensor indicating the best feature dimension for each feature to split for each node. + /// - thresholds: A Rank 1 tensor indicating the bucket id to compare with (as a threshold) for split in each node. + /// - left_node_contribs: A Rank 2 tensor indicating the contribution of the left nodes when branching from parent nodes to the left direction by the given threshold for each feature. + /// This value will be used to make the left node value by adding to the parent node value. Second dimension size is logits dimension. + /// - right_node_contribs: A Rank 2 tensor, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node. + /// - split_with_default_directions: A Rank 1 tensor indicating which direction to go if data is missing. + /// Inequality with default left returns 0, inequality with default right returns 1, equality with default right returns 2. + @inlinable @inline(__always) + public static func boostedTreesSparseCalculateBestFeatureSplit( + nodeIdRange: Tensor, + statsSummaryIndices: Tensor, + statsSummaryValues: Tensor, + statsSummaryShape: Tensor, + l1: Tensor, + l2: Tensor, + treeComplexity: Tensor, + minNodeWeight: Tensor, + logitsDimension: Int64, + splitType: SplitType1 = .inequality + ) -> ( + nodeIds: Tensor, gains: Tensor, featureDimensions: Tensor, + thresholds: Tensor, leftNodeContribs: Tensor, rightNodeContribs: Tensor, + splitWithDefaultDirections: StringTensor + ) { + _RawTFEager.boostedTreesSparseCalculateBestFeatureSplit( + nodeIdRange: nodeIdRange, statsSummaryIndices: statsSummaryIndices, + statsSummaryValues: statsSummaryValues, statsSummaryShape: statsSummaryShape, l1: l1, + l2: l2, treeComplexity: treeComplexity, minNodeWeight: minNodeWeight, + logitsDimension: logitsDimension, splitType: splitType) + } + + /// Runs multiple additive regression ensemble predictors on input instances and + /// + /// computes the update to cached logits. It is designed to be used during training. + /// It traverses the trees starting from cached tree id and cached node id and + /// calculates the updates to be pushed to the cache. + /// + /// - Parameters: + /// - cached_tree_ids: Rank 1 Tensor containing cached tree ids which is the starting + /// tree of prediction. + /// - cached_node_ids: Rank 1 Tensor containing cached node id which is the starting + /// node of prediction. + /// - bucketized_features: A list of rank 1 Tensors containing bucket id for each + /// feature. + /// + /// - Attrs: + /// - num_bucketized_features: Inferred. + /// - logits_dimension: scalar, dimension of the logits, to be used for partial logits + /// shape. + /// + /// - Outputs: + /// - partial_logits: Rank 2 Tensor containing logits update (with respect to cached + /// values stored) for each example. + /// - tree_ids: Rank 1 Tensor containing new tree ids for each example. + /// - node_ids: Rank 1 Tensor containing new node ids in the new tree_ids. + @inlinable @inline(__always) + public static func boostedTreesTrainingPredict( + treeEnsembleHandle: ResourceHandle, + cachedTreeIds: Tensor, + cachedNodeIds: Tensor, + bucketizedFeatures: [Tensor], + logitsDimension: Int64 + ) -> (partialLogits: Tensor, treeIds: Tensor, nodeIds: Tensor) { + _RawTFEager.boostedTreesTrainingPredict( + treeEnsembleHandle: treeEnsembleHandle, cachedTreeIds: cachedTreeIds, + cachedNodeIds: cachedNodeIds, bucketizedFeatures: bucketizedFeatures, + logitsDimension: logitsDimension) + } + + /// Updates the tree ensemble by either adding a layer to the last tree being grown + /// + /// or by starting a new tree. + /// + /// - Parameters: + /// - tree_ensemble_handle: Handle to the ensemble variable. + /// - feature_ids: Rank 1 tensor with ids for each feature. This is the real id of + /// the feature that will be used in the split. + /// - node_ids: List of rank 1 tensors representing the nodes for which this feature + /// has a split. + /// - gains: List of rank 1 tensors representing the gains for each of the feature's + /// split. + /// - thresholds: List of rank 1 tensors representing the thesholds for each of the + /// feature's split. + /// - left_node_contribs: List of rank 2 tensors with left leaf contribs for each of + /// the feature's splits. Will be added to the previous node values to constitute + /// the values of the left nodes. + /// - right_node_contribs: List of rank 2 tensors with right leaf contribs for each + /// of the feature's splits. Will be added to the previous node values to constitute + /// the values of the right nodes. + /// - max_depth: Max depth of the tree to build. + /// - learning_rate: shrinkage const for each new tree. + /// + /// - Attrs: + /// - pruning_mode: 0-No pruning, 1-Pre-pruning, 2-Post-pruning. + /// - num_features: Number of features that have best splits returned. INFERRED. + @inlinable @inline(__always) + public static func boostedTreesUpdateEnsemble( + treeEnsembleHandle: ResourceHandle, + featureIds: Tensor, + nodeIds: [Tensor], + gains: [Tensor], + thresholds: [Tensor], + leftNodeContribs: [Tensor], + rightNodeContribs: [Tensor], + maxDepth: Tensor, + learningRate: Tensor, + pruningMode: Int64 + ) { + _RawTFEager.boostedTreesUpdateEnsemble( + treeEnsembleHandle: treeEnsembleHandle, featureIds: featureIds, nodeIds: nodeIds, + gains: gains, thresholds: thresholds, leftNodeContribs: leftNodeContribs, + rightNodeContribs: rightNodeContribs, maxDepth: maxDepth, learningRate: learningRate, + pruningMode: pruningMode) + } + + /// + /// - Parameters: + /// - tree_ensemble_handle: Handle to the ensemble variable. + /// - feature_ids: Rank 1 tensor with ids for each feature. This is the real id of + /// the feature that will be used in the split. + /// - dimension_ids: List of rank 1 tensors representing the dimension in each feature. + /// - node_ids: List of rank 1 tensors representing the nodes for which this feature + /// has a split. + /// - gains: List of rank 1 tensors representing the gains for each of the feature's + /// split. + /// - thresholds: List of rank 1 tensors representing the thesholds for each of the + /// feature's split. + /// - left_node_contribs: List of rank 2 tensors with left leaf contribs for each of + /// the feature's splits. Will be added to the previous node values to constitute + /// the values of the left nodes. + /// - right_node_contribs: List of rank 2 tensors with right leaf contribs for each + /// of the feature's splits. Will be added to the previous node values to constitute + /// the values of the right nodes. + /// - split_types: List of rank 1 tensors representing the split type for each feature. + /// - max_depth: Max depth of the tree to build. + /// - learning_rate: shrinkage const for each new tree. + /// - pruning_mode: 0-No pruning, 1-Pre-pruning, 2-Post-pruning. + /// + /// - Attrs: + /// - num_features: Number of features that have best splits returned. INFERRED. + /// - logits_dimension: scalar, dimension of the logits + @inlinable @inline(__always) + public static func boostedTreesUpdateEnsembleV2( + treeEnsembleHandle: ResourceHandle, + featureIds: Tensor, + dimensionIds: [Tensor], + nodeIds: [Tensor], + gains: [Tensor], + thresholds: [Tensor], + leftNodeContribs: [Tensor], + rightNodeContribs: [Tensor], + splitTypes: [StringTensor], + maxDepth: Tensor, + learningRate: Tensor, + pruningMode: Tensor, + logitsDimension: Int64 = 1 + ) { + _RawTFEager.boostedTreesUpdateEnsembleV2( + treeEnsembleHandle: treeEnsembleHandle, featureIds: featureIds, dimensionIds: dimensionIds, + nodeIds: nodeIds, gains: gains, thresholds: thresholds, leftNodeContribs: leftNodeContribs, + rightNodeContribs: rightNodeContribs, splitTypes: splitTypes, maxDepth: maxDepth, + learningRate: learningRate, pruningMode: pruningMode, logitsDimension: logitsDimension) + } + + /// Return the shape of s0 op s1 with broadcast. + /// + /// Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the + /// broadcasted shape. `s0`, `s1` and `r0` are all integer vectors. + @inlinable @inline(__always) + public static func broadcastArgs( + s0: Tensor, + s1: Tensor + ) -> Tensor { + switch commonBackend(s0.handle.backend, s1.handle.backend) { + case .XLA: + let output_device = s1.device + let s0 = Tensor(copying: s0, to: .defaultTFEager) + let s1 = Tensor(copying: s1, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.broadcastArgs(s0: s0, s1: s1), to: output_device) + case .TF_EAGER: + return _RawTFEager.broadcastArgs(s0: s0, s1: s1) + } + + } + + /// Return the reduction indices for computing gradients of s0 op s1 with broadcast. + /// + /// This is typically used by gradient computations for a broadcasting operation. + @inlinable @inline(__always) + public static func broadcastGradientArgs( + s0: Tensor, + s1: Tensor + ) -> (r0: Tensor, r1: Tensor) { + switch commonBackend(s0.handle.backend, s1.handle.backend) { + case .XLA: + return _RawXLA.broadcastGradientArgs(s0: s0, s1: s1) + case .TF_EAGER: + return _RawTFEager.broadcastGradientArgs(s0: s0, s1: s1) + } + + } + + /// Broadcast an array for a compatible shape. + /// + /// Broadcasting is the process of making arrays to have compatible shapes + /// for arithmetic operations. Two shapes are compatible if for each + /// dimension pair they are either equal or one of them is one. When trying + /// to broadcast a Tensor to a shape, it starts with the trailing dimensions, + /// and works its way forward. + /// + /// For example, + /// + /// >>> x = tf.constant([1, 2, 3]) + /// >>> y = tf.broadcast_to(x, [3, 3]) + /// >>> print(y) + /// tf.Tensor( + /// [[1 2 3] + /// [1 2 3] + /// [1 2 3]], shape=(3, 3), dtype=int32) + /// + /// In the above example, the input Tensor with the shape of `[1, 3]` + /// is broadcasted to output Tensor with shape of `[3, 3]`. + /// + /// - Parameters: + /// - input: A Tensor to broadcast. + /// - shape: An 1-D `int` Tensor. The shape of the desired output. + /// + /// - Output output: A Tensor. + @inlinable @inline(__always) + public static func broadcastTo< + T: TensorFlowScalar, + Tidx: TensorFlowIndex + >( + _ input: Tensor, + shape: Tensor + ) -> Tensor { + switch commonBackend(input.handle.backend, shape.handle.backend) { + case .XLA: + return _RawXLA.broadcastTo(input, shape: shape) + case .TF_EAGER: + return _RawTFEager.broadcastTo(input, shape: shape) + } + + } + + /// Bucketizes 'input' based on 'boundaries'. + /// + /// For example, if the inputs are + /// boundaries = [0, 10, 100] + /// input = [[-5, 10000] + /// [150, 10] + /// [5, 100]] + /// + /// then the output will be + /// output = [[0, 3] + /// [3, 2] + /// [1, 3]] + /// + /// - Parameter input: Any shape of Tensor contains with int or float type. + /// + /// - Attr boundaries: A sorted list of floats gives the boundary of the buckets. + /// + /// - Output output: Same shape with 'input', each value of input replaced with bucket index. + /// + /// @compatibility(numpy) + /// Equivalent to np.digitize. + /// @end_compatibility + @inlinable @inline(__always) + public static func bucketize( + _ input: Tensor, + boundaries: [Double] + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.bucketize(input, boundaries: boundaries), to: output_device) + case .TF_EAGER: + return _RawTFEager.bucketize(input, boundaries: boundaries) + } + + } + + /// Records the bytes size of each element of `input_dataset` in a StatsAggregator. + @inlinable @inline(__always) + public static func bytesProducedStatsDataset( + inputDataset: VariantHandle, + tag: StringTensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.bytesProducedStatsDataset( + inputDataset: inputDataset, tag: tag, outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Reads out the CSR components at batch `index`. + /// + /// This op is meant only for debugging / testing, and its interface is not expected + /// to be stable. + /// + /// - Parameters: + /// - csr_sparse_matrix: A batched CSRSparseMatrix. + /// - index: The index in `csr_sparse_matrix`'s batch. + /// + /// - Outputs: + /// - row_ptrs: An array containing CSR matrix row pointers. + /// - col_inds: An array containing CSR matrix column indices. + /// - values: An array containing CSR matrix nonzero values. + @inlinable @inline(__always) + public static func cSRSparseMatrixComponents( + csrSparseMatrix: VariantHandle, + index: Tensor + ) -> (rowPtrs: Tensor, colInds: Tensor, values: Tensor) { + _RawTFEager.cSRSparseMatrixComponents(csrSparseMatrix: csrSparseMatrix, index: index) + } + + /// Convert a (possibly batched) CSRSparseMatrix to dense. + /// + /// - Parameter sparse_input: A batched CSRSparseMatrix. + /// + /// - Output dense_output: A dense tensor. + @inlinable @inline(__always) + public static func cSRSparseMatrixToDense( + sparseInput: VariantHandle + ) -> Tensor { + _RawTFEager.cSRSparseMatrixToDense(sparseInput: sparseInput) + } + + /// Converts a (possibly batched) CSRSparesMatrix to a SparseTensor. + /// + /// - Parameter sparse_matrix: A (possibly batched) CSRSparseMatrix. + /// + /// - Outputs: + /// - indices: SparseTensor indices. + /// - values: SparseTensor values. + /// - dense_shape: SparseTensor dense shape. + @inlinable @inline(__always) + public static func cSRSparseMatrixToSparseTensor( + sparseMatrix: VariantHandle + ) -> (indices: Tensor, values: Tensor, denseShape: Tensor) { + _RawTFEager.cSRSparseMatrixToSparseTensor(sparseMatrix: sparseMatrix) + } + + @inlinable @inline(__always) + public static func cSVDataset( + filenames: StringTensor, + compressionType: StringTensor, + bufferSize: Tensor, + header: Tensor, + fieldDelim: StringTensor, + useQuoteDelim: Tensor, + naValue: StringTensor, + selectCols: Tensor, + recordDefaults: OutputTypes, + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.cSVDataset( + filenames: filenames, compressionType: compressionType, bufferSize: bufferSize, + header: header, fieldDelim: fieldDelim, useQuoteDelim: useQuoteDelim, naValue: naValue, + selectCols: selectCols, recordDefaults: recordDefaults, outputShapes: outputShapes) + } + + /// Performs beam search decoding on the logits given in input. + /// + /// A note about the attribute merge_repeated: For the beam search decoder, + /// this means that if consecutive entries in a beam are the same, only + /// the first of these is emitted. That is, when the top path is "A B B B B", + /// "A B" is returned if merge_repeated = True but "A B B B B" is + /// returned if merge_repeated = False. + /// + /// - Parameters: + /// - inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. + /// - sequence_length: A vector containing sequence lengths, size `(batch)`. + /// + /// - Attrs: + /// - beam_width: A scalar >= 0 (beam search beam width). + /// - top_paths: A scalar >= 0, <= beam_width (controls output size). + /// - merge_repeated: If true, merge repeated classes in output. + /// + /// - Outputs: + /// - decoded_indices: A list (length: top_paths) of indices matrices. Matrix j, + /// size `(total_decoded_outputs[j] x 2)`, has indices of a + /// `SparseTensor`. The rows store: [batch, time]. + /// - decoded_values: A list (length: top_paths) of values vectors. Vector j, + /// size `(length total_decoded_outputs[j])`, has the values of a + /// `SparseTensor`. The vector stores the decoded classes for beam j. + /// - decoded_shape: A list (length: top_paths) of shape vector. Vector j, + /// size `(2)`, stores the shape of the decoded `SparseTensor[j]`. + /// Its values are: `[batch_size, max_decoded_length[j]]`. + /// - log_probability: A matrix, shaped: `(batch_size x top_paths)`. The + /// sequence log-probabilities. + @inlinable @inline(__always) + public static func cTCBeamSearchDecoder( + inputs: Tensor, + sequenceLength: Tensor, + beamWidth: Int64, + topPaths: Int64, + mergeRepeated: Bool = true + ) -> ( + decodedIndices: [Tensor], decodedValues: [Tensor], + decodedShape: [Tensor], logProbability: Tensor + ) { + _RawTFEager.cTCBeamSearchDecoder( + inputs: inputs, sequenceLength: sequenceLength, beamWidth: beamWidth, topPaths: topPaths, + mergeRepeated: mergeRepeated) + } + + /// Performs greedy decoding on the logits given in inputs. + /// + /// A note about the attribute merge_repeated: if enabled, when + /// consecutive logits' maximum indices are the same, only the first of + /// these is emitted. Labeling the blank '*', the sequence "A B B * B B" + /// becomes "A B B" if merge_repeated = True and "A B B B B" if + /// merge_repeated = False. + /// + /// Regardless of the value of merge_repeated, if the maximum index of a given + /// time and batch corresponds to the blank, index `(num_classes - 1)`, no new + /// element is emitted. + /// + /// - Parameters: + /// - inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. + /// - sequence_length: A vector containing sequence lengths, size `(batch_size)`. + /// + /// - Attr merge_repeated: If True, merge repeated classes in output. + /// + /// - Outputs: + /// - decoded_indices: Indices matrix, size `(total_decoded_outputs x 2)`, + /// of a `SparseTensor`. The rows store: [batch, time]. + /// - decoded_values: Values vector, size: `(total_decoded_outputs)`, + /// of a `SparseTensor`. The vector stores the decoded classes. + /// - decoded_shape: Shape vector, size `(2)`, of the decoded SparseTensor. + /// Values are: `[batch_size, max_decoded_length]`. + /// - log_probability: Matrix, size `(batch_size x 1)`, containing sequence + /// log-probabilities. + @inlinable @inline(__always) + public static func cTCGreedyDecoder( + inputs: Tensor, + sequenceLength: Tensor, + mergeRepeated: Bool = false + ) -> ( + decodedIndices: Tensor, decodedValues: Tensor, decodedShape: Tensor, + logProbability: Tensor + ) { + _RawTFEager.cTCGreedyDecoder( + inputs: inputs, sequenceLength: sequenceLength, mergeRepeated: mergeRepeated) + } + + /// Calculates the CTC Loss (log probability) for each batch entry. Also calculates + /// + /// the gradient. This class performs the softmax operation for you, so inputs + /// should be e.g. linear projections of outputs by an LSTM. + /// + /// - Parameters: + /// - inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. + /// - labels_indices: The indices of a `SparseTensor`. + /// `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for + /// `(batch b, time t)`. + /// - labels_values: The values (labels) associated with the given batch and time. + /// - sequence_length: A vector containing sequence lengths (batch). + /// + /// - Attrs: + /// - preprocess_collapse_repeated: Scalar, if true then repeated labels are + /// collapsed prior to the CTC calculation. + /// - ctc_merge_repeated: Scalar. If set to false, *during* CTC calculation + /// repeated non-blank labels will not be merged and are interpreted as + /// individual labels. This is a simplified version of CTC. + /// - ignore_longer_outputs_than_inputs: Scalar. If set to true, during CTC + /// calculation, items that have longer output sequences than input sequences + /// are skipped: they don't contribute to the loss term and have zero-gradient. + /// + /// - Outputs: + /// - loss: A vector (batch) containing log-probabilities. + /// - gradient: The gradient of `loss`. 3-D, shape: + /// `(max_time x batch_size x num_classes)`. + @inlinable @inline(__always) + public static func cTCLoss( + inputs: Tensor, + labelsIndices: Tensor, + labelsValues: Tensor, + sequenceLength: Tensor, + preprocessCollapseRepeated: Bool = false, + ctcMergeRepeated: Bool = true, + ignoreLongerOutputsThanInputs: Bool = false + ) -> (loss: Tensor, gradient: Tensor) { + _RawTFEager.cTCLoss( + inputs: inputs, labelsIndices: labelsIndices, labelsValues: labelsValues, + sequenceLength: sequenceLength, preprocessCollapseRepeated: preprocessCollapseRepeated, + ctcMergeRepeated: ctcMergeRepeated, + ignoreLongerOutputsThanInputs: ignoreLongerOutputsThanInputs) + } + + /// Creates a dataset that caches elements from `input_dataset`. + /// + /// A CacheDataset will iterate over the input_dataset, and store tensors. If the + /// cache already exists, the cache will be used. If the cache is inappropriate + /// (e.g. cannot be opened, contains tensors of the wrong shape / size), an error + /// will the returned when used. + /// + /// - Parameter filename: A path on the filesystem where we should cache the dataset. Note: this + /// will be a directory. + @inlinable @inline(__always) + public static func cacheDataset( + inputDataset: VariantHandle, + filename: StringTensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.cacheDataset( + inputDataset: inputDataset, filename: filename, outputTypes: outputTypes, + outputShapes: outputShapes) + } + + @inlinable @inline(__always) + public static func cacheDatasetV2( + inputDataset: VariantHandle, + filename: StringTensor, + cache: ResourceHandle, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.cacheDatasetV2( + inputDataset: inputDataset, filename: filename, cache: cache, outputTypes: outputTypes, + outputShapes: outputShapes) + } + + /// Cast x of type SrcT to y of DstT. + @inlinable @inline(__always) + public static func cast< + Srct: TensorFlowScalar, + Dstt: TensorFlowScalar + >( + _ x: Tensor, + truncate: Bool = false + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.cast(x, truncate: truncate) + case .TF_EAGER: + return _RawTFEager.cast(x, truncate: truncate) + } + + } + + /// Returns element-wise smallest integer not less than x. + @inlinable @inline(__always) + public static func ceil( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.ceil(x) + case .TF_EAGER: + return _RawTFEager.ceil(x) + } + + } + + /// Checks a tensor for NaN and Inf values. + /// + /// When run, reports an `InvalidArgument` error if `tensor` has any values + /// that are not a number (NaN) or infinity (Inf). Otherwise, passes `tensor` as-is. + /// + /// - Attr message: Prefix of the error message. + @inlinable @inline(__always) + public static func checkNumerics( + _ tensor: Tensor, + message: String + ) -> Tensor { + switch tensor.handle.backend { + case .XLA: + let output_device = tensor.device + let tensor = Tensor(copying: tensor, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.checkNumerics(tensor, message: message), to: output_device) + case .TF_EAGER: + return _RawTFEager.checkNumerics(tensor, message: message) + } + + } + + /// Computes the Cholesky decomposition of one or more square matrices. + /// + /// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + /// form square matrices. + /// + /// The input has to be symmetric and positive definite. Only the lower-triangular + /// part of the input will be used for this operation. The upper-triangular part + /// will not be read. + /// + /// The output is a tensor of the same shape as the input + /// containing the Cholesky decompositions for all input submatrices `[..., :, :]`. + /// + /// **Note**: The gradient computation on GPU is faster for large matrices but + /// not for large batch dimensions when the submatrices are small. In this + /// case it might be faster to use the CPU. + /// + /// - Parameter input: Shape is `[..., M, M]`. + /// + /// - Output output: Shape is `[..., M, M]`. + @inlinable @inline(__always) + public static func cholesky( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.cholesky(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.cholesky(input) + } + + } + + /// Computes the reverse mode backpropagated gradient of the Cholesky algorithm. + /// + /// For an explanation see "Differentiation of the Cholesky algorithm" by + /// Iain Murray http://arxiv.org/abs/1602.07527. + /// + /// - Parameters: + /// - l: Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`. + /// Algorithm depends only on lower triangular part of the innermost matrices of + /// this tensor. + /// - grad: df/dl where f is some scalar function. Shape is `[..., M, M]`. + /// Algorithm depends only on lower triangular part of the innermost matrices of + /// this tensor. + /// + /// - Output output: Symmetrized version of df/dA . Shape is `[..., M, M]` + @inlinable @inline(__always) + public static func choleskyGrad( + l: Tensor, + grad: Tensor + ) -> Tensor { + switch commonBackend(l.handle.backend, grad.handle.backend) { + case .XLA: + let output_device = grad.device + let l = Tensor(copying: l, to: .defaultTFEager) + let grad = Tensor(copying: grad, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.choleskyGrad(l: l, grad: grad), to: output_device) + case .TF_EAGER: + return _RawTFEager.choleskyGrad(l: l, grad: grad) + } + + } + + @inlinable @inline(__always) + public static func chooseFastestDataset( + inputDatasets: [VariantHandle], + numExperiments: Int64, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.chooseFastestDataset( + inputDatasets: inputDatasets, numExperiments: numExperiments, outputTypes: outputTypes, + outputShapes: outputShapes) + } + + /// Clips tensor values to a specified min and max. + /// + /// Given a tensor `t`, this operation returns a tensor of the same type and + /// shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`. + /// Any values less than `clip_value_min` are set to `clip_value_min`. Any values + /// greater than `clip_value_max` are set to `clip_value_max`. + /// + /// - Parameters: + /// - t: A `Tensor`. + /// - clip_value_min: A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape + /// as `t`. The minimum value to clip by. + /// - clip_value_max: A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape + /// as `t`. The maximum value to clip by. + /// + /// - Output output: A clipped `Tensor` with the same shape as input 't'. + @inlinable @inline(__always) + public static func clipByValue( + t: Tensor, + clipValueMin: Tensor, + clipValueMax: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(t.handle.backend, clipValueMin.handle.backend), clipValueMax.handle.backend) + { + case .XLA: + return _RawXLA.clipByValue(t: t, clipValueMin: clipValueMin, clipValueMax: clipValueMax) + case .TF_EAGER: + return _RawTFEager.clipByValue(t: t, clipValueMin: clipValueMin, clipValueMax: clipValueMax) + } + + } + + @inlinable @inline(__always) + public static func closeSummaryWriter( + writer: ResourceHandle + ) { + _RawTFEager.closeSummaryWriter(writer: writer) + } + + /// Receives a tensor value broadcast from another device. + @inlinable @inline(__always) + public static func collectiveBcastRecv( + groupSize: Int64, + groupKey: Int64, + instanceKey: Int64, + shape: TensorShape?, + communicationHint: String = "auto" + ) -> Tensor { + _RawTFEager.collectiveBcastRecv( + groupSize: groupSize, groupKey: groupKey, instanceKey: instanceKey, shape: shape, + communicationHint: communicationHint) + } + + /// Broadcasts a tensor value to one or more other devices. + @inlinable @inline(__always) + public static func collectiveBcastSend( + _ input: Tensor, + groupSize: Int64, + groupKey: Int64, + instanceKey: Int64, + shape: TensorShape?, + communicationHint: String = "auto" + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.collectiveBcastSend( + input, groupSize: groupSize, groupKey: groupKey, instanceKey: instanceKey, shape: shape, + communicationHint: communicationHint), to: output_device) + case .TF_EAGER: + return _RawTFEager.collectiveBcastSend( + input, groupSize: groupSize, groupKey: groupKey, instanceKey: instanceKey, shape: shape, + communicationHint: communicationHint) + } + + } + + /// Mutually accumulates multiple tensors of identical type and shape. + @inlinable @inline(__always) + public static func collectiveGather( + _ input: Tensor, + groupSize: Int64, + groupKey: Int64, + instanceKey: Int64, + shape: TensorShape?, + communicationHint: String = "auto" + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.collectiveGather( + input, groupSize: groupSize, groupKey: groupKey, instanceKey: instanceKey, shape: shape, + communicationHint: communicationHint), to: output_device) + case .TF_EAGER: + return _RawTFEager.collectiveGather( + input, groupSize: groupSize, groupKey: groupKey, instanceKey: instanceKey, shape: shape, + communicationHint: communicationHint) + } + + } + + /// An Op to permute tensors across replicated TPU instances. + /// + /// Each instance supplies its own input. + /// + /// For example, suppose there are 4 TPU instances: `[A, B, C, D]`. Passing + /// source_target_pairs=`[[0,1],[1,2],[2,3],[3,0]]` gets the outputs: + /// `[D, A, B, C]`. + /// + /// - Parameters: + /// - input: The local input to be permuted. Currently only supports float and + /// bfloat16. + /// - source_target_pairs: A tensor with shape [num_pairs, 2]. + /// + /// - Attr T: The type of elements to be exchanged. + /// + /// - Output output: The permuted input. + @inlinable @inline(__always) + public static func collectivePermute( + _ input: Tensor, + sourceTargetPairs: Tensor + ) -> Tensor { + switch commonBackend(input.handle.backend, sourceTargetPairs.handle.backend) { + case .XLA: + let output_device = sourceTargetPairs.device + let input = Tensor(copying: input, to: .defaultTFEager) + let sourceTargetPairs = Tensor(copying: sourceTargetPairs, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.collectivePermute(input, sourceTargetPairs: sourceTargetPairs), + to: output_device) + case .TF_EAGER: + return _RawTFEager.collectivePermute(input, sourceTargetPairs: sourceTargetPairs) + } + + } + + /// Mutually reduces multiple tensors of identical type and shape. + @inlinable @inline(__always) + public static func collectiveReduce( + _ input: Tensor, + groupSize: Int64, + groupKey: Int64, + instanceKey: Int64, + mergeOp: MergeOp, + finalOp: FinalOp, + subdivOffsets: [Int32], + waitFor: [Int32], + communicationHint: String = "auto" + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.collectiveReduce( + input, groupSize: groupSize, groupKey: groupKey, instanceKey: instanceKey, + mergeOp: mergeOp, finalOp: finalOp, subdivOffsets: subdivOffsets, waitFor: waitFor, + communicationHint: communicationHint), to: output_device) + case .TF_EAGER: + return _RawTFEager.collectiveReduce( + input, groupSize: groupSize, groupKey: groupKey, instanceKey: instanceKey, + mergeOp: mergeOp, finalOp: finalOp, subdivOffsets: subdivOffsets, waitFor: waitFor, + communicationHint: communicationHint) + } + + } + + /// Greedily selects a subset of bounding boxes in descending order of score, + /// + /// This operation performs non_max_suppression on the inputs per batch, across + /// all classes. + /// Prunes away boxes that have high intersection-over-union (IOU) overlap + /// with previously selected boxes. Bounding boxes are supplied as + /// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any + /// diagonal pair of box corners and the coordinates can be provided as normalized + /// (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm + /// is agnostic to where the origin is in the coordinate system. Also note that + /// this algorithm is invariant to orthogonal transformations and translations + /// of the coordinate system; thus translating or reflections of the coordinate + /// system result in the same boxes being selected by the algorithm. + /// The output of this operation is the final boxes, scores and classes tensor + /// returned after performing non_max_suppression. + /// + /// - Parameters: + /// - boxes: A 4-D float tensor of shape `[batch_size, num_boxes, q, 4]`. If `q` is 1 then + /// same boxes are used for all classes otherwise, if `q` is equal to number of + /// classes, class-specific boxes are used. + /// - scores: A 3-D float tensor of shape `[batch_size, num_boxes, num_classes]` + /// representing a single score corresponding to each box (each row of boxes). + /// - max_output_size_per_class: A scalar integer tensor representing the maximum number of + /// boxes to be selected by non max suppression per class + /// - max_total_size: A scalar representing maximum number of boxes retained over all classes. + /// - iou_threshold: A 0-D float tensor representing the threshold for deciding whether + /// boxes overlap too much with respect to IOU. + /// - score_threshold: A 0-D float tensor representing the threshold for deciding when to remove + /// boxes based on score. + /// + /// - Attrs: + /// - pad_per_class: If false, the output nmsed boxes, scores and classes + /// are padded/clipped to `max_total_size`. If true, the + /// output nmsed boxes, scores and classes are padded to be of length + /// `max_size_per_class`*`num_classes`, unless it exceeds `max_total_size` in + /// which case it is clipped to `max_total_size`. Defaults to false. + /// - clip_boxes: If true, assume the box coordinates are between [0, 1] and clip the output boxes + /// if they fall beyond [0, 1]. If false, do not do clipping and output the box + /// coordinates as it is. + /// + /// - Outputs: + /// - nmsed_boxes: A [batch_size, max_detections, 4] float32 tensor + /// containing the non-max suppressed boxes. + /// - nmsed_scores: A [batch_size, max_detections] float32 tensor + /// containing the scores for the boxes. + /// - nmsed_classes: A [batch_size, max_detections] float32 tensor + /// containing the classes for the boxes. + /// - valid_detections: A [batch_size] int32 tensor indicating the number of + /// valid detections per batch item. Only the top num_detections[i] entries in + /// nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the + /// entries are zero paddings. + @inlinable @inline(__always) + public static func combinedNonMaxSuppression( + boxes: Tensor, + scores: Tensor, + maxOutputSizePerClass: Tensor, + maxTotalSize: Tensor, + iouThreshold: Tensor, + scoreThreshold: Tensor, + padPerClass: Bool = false, + clipBoxes: Bool = true + ) -> ( + nmsedBoxes: Tensor, nmsedScores: Tensor, nmsedClasses: Tensor, + validDetections: Tensor + ) { + _RawTFEager.combinedNonMaxSuppression( + boxes: boxes, scores: scores, maxOutputSizePerClass: maxOutputSizePerClass, + maxTotalSize: maxTotalSize, iouThreshold: iouThreshold, scoreThreshold: scoreThreshold, + padPerClass: padPerClass, clipBoxes: clipBoxes) + } + + /// Compare values of `input` to `threshold` and pack resulting bits into a `uint8`. + /// + /// Each comparison returns a boolean `true` (if `input_value > threshold`) + /// or and `false` otherwise. + /// + /// This operation is useful for Locality-Sensitive-Hashing (LSH) and other + /// algorithms that use hashing approximations of cosine and `L2` distances; + /// codes can be generated from an input via: + /// + /// ```python + /// codebook_size = 50 + /// codebook_bits = codebook_size * 32 + /// codebook = tf.get_variable('codebook', [x.shape[-1].value, codebook_bits], + /// dtype=x.dtype, + /// initializer=tf.orthogonal_initializer()) + /// codes = compare_and_threshold(tf.matmul(x, codebook), threshold=0.) + /// codes = tf.bitcast(codes, tf.int32) # go from uint8 to int32 + /// # now codes has shape x.shape[:-1] + [codebook_size] + /// ``` + /// + /// **NOTE**: Currently, the innermost dimension of the tensor must be divisible + /// by 8. + /// + /// Given an `input` shaped `[s0, s1, ..., s_n]`, the output is + /// a `uint8` tensor shaped `[s0, s1, ..., s_n / 8]`. + /// + /// - Parameters: + /// - input: Values to compare against `threshold` and bitpack. + /// - threshold: Threshold to compare against. + /// + /// - Attr T: The type of the input and threshold. + /// + /// - Output output: The bitpacked comparisons. + @inlinable @inline(__always) + public static func compareAndBitpack( + _ input: Tensor, + threshold: Tensor + ) -> Tensor { + switch commonBackend(input.handle.backend, threshold.handle.backend) { + case .XLA: + let output_device = threshold.device + let input = Tensor(copying: input, to: .defaultTFEager) + let threshold = Tensor(copying: threshold, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.compareAndBitpack(input, threshold: threshold), to: output_device) + case .TF_EAGER: + return _RawTFEager.compareAndBitpack(input, threshold: threshold) + } + + } + + /// Converts two real numbers to a complex number. + /// + /// Given a tensor `real` representing the real part of a complex number, and a + /// tensor `imag` representing the imaginary part of a complex number, this + /// operation returns complex numbers elementwise of the form \\(a + bj\\), where + /// *a* represents the `real` part and *b* represents the `imag` part. + /// + /// The input tensors `real` and `imag` must have the same shape. + /// + /// For example: + /// + /// ``` + /// # tensor 'real' is [2.25, 3.25] + /// # tensor `imag` is [4.75, 5.75] + /// tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] + /// ``` + @inlinable @inline(__always) + public static func complex< + T: FloatingPoint & TensorFlowScalar, + Tout: TensorFlowScalar + >( + real: Tensor, + imag: Tensor + ) -> Tensor { + switch commonBackend(real.handle.backend, imag.handle.backend) { + case .XLA: + let output_device = imag.device + let real = Tensor(copying: real, to: .defaultTFEager) + let imag = Tensor(copying: imag, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.complex(real: real, imag: imag), to: output_device) + case .TF_EAGER: + return _RawTFEager.complex(real: real, imag: imag) + } + + } + + /// Computes the complex absolute value of a tensor. + /// + /// Given a tensor `x` of complex numbers, this operation returns a tensor of type + /// `float` or `double` that is the absolute value of each element in `x`. All + /// elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute + /// value is computed as \\( \sqrt{a^2 + b^2}\\). + @inlinable @inline(__always) + public static func complexAbs< + T: TensorFlowScalar, + Tout: FloatingPoint & TensorFlowScalar + >( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + let output_device = x.device + let x = Tensor(copying: x, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.complexAbs(x), to: output_device) + case .TF_EAGER: + return _RawTFEager.complexAbs(x) + } + + } + + @inlinable @inline(__always) + public static func complexStruct( + nA: Int64, + nB: Int64 + ) -> (a: [Tensor], b: [Tensor], c: TC) { + _RawTFEager.complexStruct(nA: nA, nB: nB) + } + + /// Computes the ids of the positions in sampled_candidates that match true_labels. + /// + /// When doing log-odds NCE, the result of this op should be passed through a + /// SparseToDense op, then added to the logits of the sampled candidates. This has + /// the effect of 'removing' the sampled labels that match the true labels by + /// making the classifier sure that they are sampled labels. + /// + /// - Parameters: + /// - true_classes: The true_classes output of UnpackSparseLabels. + /// - sampled_candidates: The sampled_candidates output of CandidateSampler. + /// + /// - Attrs: + /// - num_true: Number of true labels per context. + /// - seed: If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// - seed2: An second seed to avoid seed collision. + /// + /// - Outputs: + /// - indices: A vector of indices corresponding to rows of true_candidates. + /// - ids: A vector of IDs of positions in sampled_candidates that match a true_label + /// for the row with the corresponding index in indices. + /// - weights: A vector of the same length as indices and ids, in which each element + /// is -FLOAT_MAX. + @inlinable @inline(__always) + public static func computeAccidentalHits( + trueClasses: Tensor, + sampledCandidates: Tensor, + numTrue: Int64, + seed: Int64 = 0, + seed2: Int64 = 0 + ) -> (indices: Tensor, ids: Tensor, weights: Tensor) { + _RawTFEager.computeAccidentalHits( + trueClasses: trueClasses, sampledCandidates: sampledCandidates, numTrue: numTrue, + seed: seed, seed2: seed2) + } + + /// Concatenates tensors along one dimension. + /// + /// - Parameters: + /// - concat_dim: 0-D. The dimension along which to concatenate. Must be in the + /// range [0, rank(values)). + /// - values: The `N` Tensors to concatenate. Their ranks and types must match, + /// and their sizes must match in all dimensions except `concat_dim`. + /// + /// - Output output: A `Tensor` with the concatenation of values stacked along the + /// `concat_dim` dimension. This tensor's shape matches that of `values` except + /// in `concat_dim` where it has the sum of the sizes. + @inlinable @inline(__always) + public static func concat( + concatDim: Tensor, + _ values: [Tensor] + ) -> Tensor { + switch commonBackend(concatDim.handle.backend, commonBackend(values)) { + case .XLA: + let output_device = concatDim.device + let concatDim = Tensor(copying: concatDim, to: .defaultTFEager) + let values = [Tensor](copying: values, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.concat(concatDim: concatDim, values), to: output_device) + case .TF_EAGER: + return _RawTFEager.concat(concatDim: concatDim, values) + } + + } + + /// Computes offsets of concat inputs within its output. + /// + /// For example: + /// + /// ``` + /// # 'x' is [2, 2, 7] + /// # 'y' is [2, 3, 7] + /// # 'z' is [2, 5, 7] + /// concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0] + /// ``` + /// + /// This is typically used by gradient computations for a concat operation. + /// + /// - Parameters: + /// - concat_dim: The dimension along which to concatenate. + /// - shape: The `N` int32 vectors representing shape of tensors being concatenated. + /// + /// - Output offset: The `N` int32 vectors representing the starting offset + /// of input tensors within the concatenated output. + @inlinable @inline(__always) + public static func concatOffset( + concatDim: Tensor, + shape: [Tensor] + ) -> [Tensor] { + switch commonBackend(concatDim.handle.backend, commonBackend(shape)) { + case .XLA: + let output_device = concatDim.device + let concatDim = Tensor(copying: concatDim, to: .defaultTFEager) + let shape = [Tensor](copying: shape, to: .defaultTFEager) + return [Tensor]( + copying: _RawTFEager.concatOffset(concatDim: concatDim, shape: shape), to: output_device) + case .TF_EAGER: + return _RawTFEager.concatOffset(concatDim: concatDim, shape: shape) + } + + } + + /// Concatenates tensors along one dimension. + /// + /// - Parameters: + /// - values: List of `N` Tensors to concatenate. Their ranks and types must match, + /// and their sizes must match in all dimensions except `concat_dim`. + /// - axis: 0-D. The dimension along which to concatenate. Must be in the + /// range [-rank(values), rank(values)). + /// + /// - Output output: A `Tensor` with the concatenation of values stacked along the + /// `concat_dim` dimension. This tensor's shape matches that of `values` except + /// in `concat_dim` where it has the sum of the sizes. + @inlinable @inline(__always) + public static func concatV2< + T: TensorFlowScalar, + Tidx: TensorFlowIndex + >( + _ values: [Tensor], + axis: Tensor + ) -> Tensor { + switch commonBackend(commonBackend(values), axis.handle.backend) { + case .XLA: + return _RawXLA.concatV2(values, axis: axis) + case .TF_EAGER: + return _RawTFEager.concatV2(values, axis: axis) + } + + } + + /// Creates a dataset that concatenates `input_dataset` with `another_dataset`. + @inlinable @inline(__always) + public static func concatenateDataset( + inputDataset: VariantHandle, + anotherDataset: VariantHandle, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.concatenateDataset( + inputDataset: inputDataset, anotherDataset: anotherDataset, outputTypes: outputTypes, + outputShapes: outputShapes) + } + + /// Sets up the centralized structures for a distributed TPU system. + /// + /// - Attrs: + /// - embedding_config: Reserved. Do not use. + /// - tpu_embedding_config: Serialized tensorflow.tpu.TPUEmbeddingConfiguration that + /// describes the embedding lookups of the program. + /// - is_global_init: Reserved. Do not use. + /// + /// - Output topology: A serialized tensorflow.tpu.TopologyProto that describes the TPU + /// topology. + @inlinable @inline(__always) + public static func configureDistributedTPU( + embeddingConfig: String, + tpuEmbeddingConfig: String, + isGlobalInit: Bool = false, + enableWholeMeshCompilations: Bool = false, + compilationFailureClosesChips: Bool = true + ) -> StringTensor { + _RawTFEager.configureDistributedTPU( + embeddingConfig: embeddingConfig, tpuEmbeddingConfig: tpuEmbeddingConfig, + isGlobalInit: isGlobalInit, enableWholeMeshCompilations: enableWholeMeshCompilations, + compilationFailureClosesChips: compilationFailureClosesChips) + } + + /// Sets up TPUEmbedding in a distributed TPU system. + /// + /// - Attr config: Serialized tensorflow.tpu.TPUEmbeddingConfiguration that + /// describes the embedding lookups of the program. + @inlinable @inline(__always) + public static func configureTPUEmbedding( + config: String + ) { + _RawTFEager.configureTPUEmbedding(config: config) + } + + /// Returns the complex conjugate of a complex number. + /// + /// Given a tensor `input` of complex numbers, this operation returns a tensor of + /// complex numbers that are the complex conjugate of each element in `input`. The + /// complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the + /// real part and *b* is the imaginary part. + /// + /// The complex conjugate returned by this operation is of the form \\(a - bj\\). + /// + /// For example: + /// + /// ``` + /// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + /// tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] + /// ``` + @inlinable @inline(__always) + public static func conj( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.conj(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.conj(input) + } + + } + + /// Shuffle dimensions of x according to a permutation and conjugate the result. + /// + /// The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy: + /// `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]` + /// `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])` + @inlinable @inline(__always) + public static func conjugateTranspose< + T: TensorFlowScalar, + Tperm: TensorFlowIndex + >( + _ x: Tensor, + perm: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, perm.handle.backend) { + case .XLA: + let output_device = perm.device + let x = Tensor(copying: x, to: .defaultTFEager) + let perm = Tensor(copying: perm, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.conjugateTranspose(x, perm: perm), to: output_device) + case .TF_EAGER: + return _RawTFEager.conjugateTranspose(x, perm: perm) + } + + } + + @inlinable @inline(__always) + public static func constructionFails() { + _RawTFEager.constructionFails() + } + + /// This op consumes a lock created by `MutexLock`. + /// + /// This op exists to consume a tensor created by `MutexLock` (other than + /// direct control dependencies). It should be the only that consumes the tensor, + /// and will raise an error if it is not. Its only purpose is to keep the + /// mutex lock tensor alive until it is consumed by this op. + /// + /// **NOTE**: This operation must run on the same device as its input. This may + /// be enforced via the `colocate_with` mechanism. + /// + /// - Parameter mutex_lock: A tensor returned by `MutexLock`. + @inlinable @inline(__always) + public static func consumeMutexLock( + mutexLock: VariantHandle + ) { + _RawTFEager.consumeMutexLock(mutexLock: mutexLock) + } + + /// Does nothing. Serves as a control trigger for scheduling. + /// + /// Only useful as a placeholder for control edges. + @inlinable @inline(__always) + public static func controlTrigger() { + _RawTFEager.controlTrigger() + } + + /// Computes a 2-D convolution given 4-D `input` and `filter` tensors. + /// + /// Given an input tensor of shape `[batch, in_height, in_width, in_channels]` + /// and a filter / kernel tensor of shape + /// `[filter_height, filter_width, in_channels, out_channels]`, this op + /// performs the following: + /// + /// 1. Flattens the filter to a 2-D matrix with shape + /// `[filter_height * filter_width * in_channels, output_channels]`. + /// 2. Extracts image patches from the input tensor to form a *virtual* + /// tensor of shape `[batch, out_height, out_width, + /// filter_height * filter_width * in_channels]`. + /// 3. For each patch, right-multiplies the filter matrix and the image patch + /// vector. + /// + /// In detail, with the default NHWC format, + /// + /// output[b, i, j, k] = + /// sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * + /// filter[di, dj, q, k] + /// + /// Must have `strides[0] = strides[3] = 1`. For the most common case of the same + /// horizontal and vertices strides, `strides = [1, stride, stride, 1]`. + /// + /// - Parameters: + /// - input: A 4-D tensor. The dimension order is interpreted according to the value + /// of `data_format`, see below for details. + /// - filter: A 4-D tensor of shape + /// `[filter_height, filter_width, in_channels, out_channels]` + /// + /// - Attrs: + /// - strides: 1-D tensor of length 4. The stride of the sliding window for each + /// dimension of `input`. The dimension order is determined by the value of + /// `data_format`, see below for details. + /// - padding: The type of padding algorithm to use. + /// - explicit_paddings: If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith + /// dimension, the amount of padding inserted before and after the dimension is + /// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If + /// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. + /// - data_format: Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, height, width, channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, channels, height, width]. + /// - dilations: 1-D tensor of length 4. The dilation factor for each dimension of + /// `input`. If set to k > 1, there will be k-1 skipped cells between each + /// filter element on that dimension. The dimension order is determined by the + /// value of `data_format`, see above for details. Dilations in the batch and + /// depth dimensions must be 1. + /// + /// - Output output: A 4-D tensor. The dimension order is determined by the value of + /// `data_format`, see below for details. + @inlinable @inline(__always) + public static func conv2D( + _ input: Tensor, + filter: Tensor, + strides: [Int32], + useCudnnOnGpu: Bool = true, + padding: Padding1, + explicitPaddings: [Int32], + dataFormat: DataFormat = .nhwc, + dilations: [Int32] = [1, 1, 1, 1] + ) -> Tensor { + switch commonBackend(input.handle.backend, filter.handle.backend) { + case .XLA: + return _RawXLA.conv2D( + input, filter: filter, strides: strides, useCudnnOnGpu: useCudnnOnGpu, padding: padding, + explicitPaddings: explicitPaddings, dataFormat: dataFormat, dilations: dilations) + case .TF_EAGER: + return _RawTFEager.conv2D( + input, filter: filter, strides: strides, useCudnnOnGpu: useCudnnOnGpu, padding: padding, + explicitPaddings: explicitPaddings, dataFormat: dataFormat, dilations: dilations) + } + + } + + /// Computes the gradients of convolution with respect to the filter. + /// + /// - Parameters: + /// - input: 4-D with shape `[batch, in_height, in_width, in_channels]`. + /// - filter_sizes: An integer vector representing the tensor shape of `filter`, + /// where `filter` is a 4-D + /// `[filter_height, filter_width, in_channels, out_channels]` tensor. + /// - out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. + /// Gradients w.r.t. the output of the convolution. + /// + /// - Attrs: + /// - strides: The stride of the sliding window for each dimension of the input + /// of the convolution. Must be in the same order as the dimension specified with + /// format. + /// - padding: The type of padding algorithm to use. + /// - explicit_paddings: If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith + /// dimension, the amount of padding inserted before and after the dimension is + /// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If + /// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. + /// - data_format: Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// - dilations: 1-D tensor of length 4. The dilation factor for each dimension of + /// `input`. If set to k > 1, there will be k-1 skipped cells between each filter + /// element on that dimension. The dimension order is determined by the value of + /// `data_format`, see above for details. Dilations in the batch and depth + /// dimensions must be 1. + /// + /// - Output output: 4-D with shape + /// `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t. + /// the `filter` input of the convolution. + @inlinable @inline(__always) + public static func conv2DBackpropFilter( + _ input: Tensor, + filterSizes: Tensor, + outBackprop: Tensor, + strides: [Int32], + useCudnnOnGpu: Bool = true, + padding: Padding1, + explicitPaddings: [Int32], + dataFormat: DataFormat = .nhwc, + dilations: [Int32] = [1, 1, 1, 1] + ) -> Tensor { + switch commonBackend( + commonBackend(input.handle.backend, filterSizes.handle.backend), outBackprop.handle.backend) + { + case .XLA: + return _RawXLA.conv2DBackpropFilter( + input, filterSizes: filterSizes, outBackprop: outBackprop, strides: strides, + useCudnnOnGpu: useCudnnOnGpu, padding: padding, explicitPaddings: explicitPaddings, + dataFormat: dataFormat, dilations: dilations) + case .TF_EAGER: + return _RawTFEager.conv2DBackpropFilter( + input, filterSizes: filterSizes, outBackprop: outBackprop, strides: strides, + useCudnnOnGpu: useCudnnOnGpu, padding: padding, explicitPaddings: explicitPaddings, + dataFormat: dataFormat, dilations: dilations) + } + + } + + /// Computes the gradients of convolution with respect to the input. + /// + /// - Parameters: + /// - input_sizes: An integer vector representing the shape of `input`, + /// where `input` is a 4-D `[batch, height, width, channels]` tensor. + /// - filter: 4-D with shape + /// `[filter_height, filter_width, in_channels, out_channels]`. + /// - out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`. + /// Gradients w.r.t. the output of the convolution. + /// + /// - Attrs: + /// - strides: The stride of the sliding window for each dimension of the input + /// of the convolution. Must be in the same order as the dimension specified with + /// format. + /// - padding: The type of padding algorithm to use. + /// - explicit_paddings: If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith + /// dimension, the amount of padding inserted before and after the dimension is + /// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If + /// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. + /// - data_format: Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// - dilations: 1-D tensor of length 4. The dilation factor for each dimension of + /// `input`. If set to k > 1, there will be k-1 skipped cells between each filter + /// element on that dimension. The dimension order is determined by the value of + /// `data_format`, see above for details. Dilations in the batch and depth + /// dimensions must be 1. + /// + /// - Output output: 4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient + /// w.r.t. the input of the convolution. + @inlinable @inline(__always) + public static func conv2DBackpropInput( + inputSizes: Tensor, + filter: Tensor, + outBackprop: Tensor, + strides: [Int32], + useCudnnOnGpu: Bool = true, + padding: Padding1, + explicitPaddings: [Int32], + dataFormat: DataFormat = .nhwc, + dilations: [Int32] = [1, 1, 1, 1] + ) -> Tensor { + switch commonBackend( + commonBackend(inputSizes.handle.backend, filter.handle.backend), outBackprop.handle.backend) + { + case .XLA: + return _RawXLA.conv2DBackpropInput( + inputSizes: inputSizes, filter: filter, outBackprop: outBackprop, strides: strides, + useCudnnOnGpu: useCudnnOnGpu, padding: padding, explicitPaddings: explicitPaddings, + dataFormat: dataFormat, dilations: dilations) + case .TF_EAGER: + return _RawTFEager.conv2DBackpropInput( + inputSizes: inputSizes, filter: filter, outBackprop: outBackprop, strides: strides, + useCudnnOnGpu: useCudnnOnGpu, padding: padding, explicitPaddings: explicitPaddings, + dataFormat: dataFormat, dilations: dilations) + } + + } + + /// Computes a 3-D convolution given 5-D `input` and `filter` tensors. + /// + /// In signal processing, cross-correlation is a measure of similarity of + /// two waveforms as a function of a time-lag applied to one of them. This + /// is also known as a sliding dot product or sliding inner-product. + /// + /// Our Conv3D implements a form of cross-correlation. + /// + /// - Parameters: + /// - input: Shape `[batch, in_depth, in_height, in_width, in_channels]`. + /// - filter: Shape `[filter_depth, filter_height, filter_width, in_channels, + /// out_channels]`. `in_channels` must match between `input` and `filter`. + /// + /// - Attrs: + /// - strides: 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. + /// - padding: The type of padding algorithm to use. + /// - data_format: The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// - dilations: 1-D tensor of length 5. The dilation factor for each dimension of + /// `input`. If set to k > 1, there will be k-1 skipped cells between each + /// filter element on that dimension. The dimension order is determined by the + /// value of `data_format`, see above for details. Dilations in the batch and + /// depth dimensions must be 1. + @inlinable @inline(__always) + public static func conv3D( + _ input: Tensor, + filter: Tensor, + strides: [Int32], + padding: Padding, + dataFormat: DataFormat1 = .ndhwc, + dilations: [Int32] = [1, 1, 1, 1, 1] + ) -> Tensor { + switch commonBackend(input.handle.backend, filter.handle.backend) { + case .XLA: + return _RawXLA.conv3D( + input, filter: filter, strides: strides, padding: padding, dataFormat: dataFormat, + dilations: dilations) + case .TF_EAGER: + return _RawTFEager.conv3D( + input, filter: filter, strides: strides, padding: padding, dataFormat: dataFormat, + dilations: dilations) + } + + } + + /// Computes the gradients of 3-D convolution with respect to the filter. + /// + /// - Parameters: + /// - input: Shape `[batch, depth, rows, cols, in_channels]`. + /// - filter: Shape `[depth, rows, cols, in_channels, out_channels]`. + /// `in_channels` must match between `input` and `filter`. + /// - out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + /// out_channels]`. + /// + /// - Attrs: + /// - strides: 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. + /// - padding: The type of padding algorithm to use. + @inlinable @inline(__always) + public static func conv3DBackpropFilter( + _ input: Tensor, + filter: Tensor, + outBackprop: Tensor, + strides: [Int32], + padding: Padding, + dilations: [Int32] = [1, 1, 1, 1, 1] + ) -> Tensor { + switch commonBackend( + commonBackend(input.handle.backend, filter.handle.backend), outBackprop.handle.backend) + { + case .XLA: + let output_device = outBackprop.device + let input = Tensor(copying: input, to: .defaultTFEager) + let filter = Tensor(copying: filter, to: .defaultTFEager) + let outBackprop = Tensor(copying: outBackprop, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.conv3DBackpropFilter( + input, filter: filter, outBackprop: outBackprop, strides: strides, padding: padding, + dilations: dilations), to: output_device) + case .TF_EAGER: + return _RawTFEager.conv3DBackpropFilter( + input, filter: filter, outBackprop: outBackprop, strides: strides, padding: padding, + dilations: dilations) + } + + } + + /// Computes the gradients of 3-D convolution with respect to the filter. + /// + /// - Parameters: + /// - input: Shape `[batch, depth, rows, cols, in_channels]`. + /// - filter_sizes: An integer vector representing the tensor shape of `filter`, + /// where `filter` is a 5-D + /// `[filter_depth, filter_height, filter_width, in_channels, out_channels]` + /// tensor. + /// - out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + /// out_channels]`. + /// + /// - Attrs: + /// - strides: 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. + /// - padding: The type of padding algorithm to use. + /// - data_format: The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// - dilations: 1-D tensor of length 5. The dilation factor for each dimension of + /// `input`. If set to k > 1, there will be k-1 skipped cells between each + /// filter element on that dimension. The dimension order is determined by the + /// value of `data_format`, see above for details. Dilations in the batch and + /// depth dimensions must be 1. + @inlinable @inline(__always) + public static func conv3DBackpropFilterV2( + _ input: Tensor, + filterSizes: Tensor, + outBackprop: Tensor, + strides: [Int32], + padding: Padding, + dataFormat: DataFormat1 = .ndhwc, + dilations: [Int32] = [1, 1, 1, 1, 1] + ) -> Tensor { + switch commonBackend( + commonBackend(input.handle.backend, filterSizes.handle.backend), outBackprop.handle.backend) + { + case .XLA: + return _RawXLA.conv3DBackpropFilterV2( + input, filterSizes: filterSizes, outBackprop: outBackprop, strides: strides, + padding: padding, dataFormat: dataFormat, dilations: dilations) + case .TF_EAGER: + return _RawTFEager.conv3DBackpropFilterV2( + input, filterSizes: filterSizes, outBackprop: outBackprop, strides: strides, + padding: padding, dataFormat: dataFormat, dilations: dilations) + } + + } + + /// Computes the gradients of 3-D convolution with respect to the input. + /// + /// - Parameters: + /// - input: Shape `[batch, depth, rows, cols, in_channels]`. + /// - filter: Shape `[depth, rows, cols, in_channels, out_channels]`. + /// `in_channels` must match between `input` and `filter`. + /// - out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + /// out_channels]`. + /// + /// - Attrs: + /// - strides: 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. + /// - padding: The type of padding algorithm to use. + @inlinable @inline(__always) + public static func conv3DBackpropInput( + _ input: Tensor, + filter: Tensor, + outBackprop: Tensor, + strides: [Int32], + padding: Padding, + dilations: [Int32] = [1, 1, 1, 1, 1] + ) -> Tensor { + switch commonBackend( + commonBackend(input.handle.backend, filter.handle.backend), outBackprop.handle.backend) + { + case .XLA: + let output_device = outBackprop.device + let input = Tensor(copying: input, to: .defaultTFEager) + let filter = Tensor(copying: filter, to: .defaultTFEager) + let outBackprop = Tensor(copying: outBackprop, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.conv3DBackpropInput( + input, filter: filter, outBackprop: outBackprop, strides: strides, padding: padding, + dilations: dilations), to: output_device) + case .TF_EAGER: + return _RawTFEager.conv3DBackpropInput( + input, filter: filter, outBackprop: outBackprop, strides: strides, padding: padding, + dilations: dilations) + } + + } + + /// Computes the gradients of 3-D convolution with respect to the input. + /// + /// - Parameters: + /// - input_sizes: An integer vector representing the tensor shape of `input`, + /// where `input` is a 5-D + /// `[batch, depth, rows, cols, in_channels]` tensor. + /// - filter: Shape `[depth, rows, cols, in_channels, out_channels]`. + /// `in_channels` must match between `input` and `filter`. + /// - out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + /// out_channels]`. + /// + /// - Attrs: + /// - strides: 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. + /// - padding: The type of padding algorithm to use. + /// - data_format: The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// - dilations: 1-D tensor of length 5. The dilation factor for each dimension of + /// `input`. If set to k > 1, there will be k-1 skipped cells between each + /// filter element on that dimension. The dimension order is determined by the + /// value of `data_format`, see above for details. Dilations in the batch and + /// depth dimensions must be 1. + @inlinable @inline(__always) + public static func conv3DBackpropInputV2< + T: FloatingPoint & TensorFlowScalar, + Tshape: TensorFlowIndex + >( + inputSizes: Tensor, + filter: Tensor, + outBackprop: Tensor, + strides: [Int32], + padding: Padding, + dataFormat: DataFormat1 = .ndhwc, + dilations: [Int32] = [1, 1, 1, 1, 1] + ) -> Tensor { + switch commonBackend( + commonBackend(inputSizes.handle.backend, filter.handle.backend), outBackprop.handle.backend) + { + case .XLA: + return _RawXLA.conv3DBackpropInputV2( + inputSizes: inputSizes, filter: filter, outBackprop: outBackprop, strides: strides, + padding: padding, dataFormat: dataFormat, dilations: dilations) + case .TF_EAGER: + return _RawTFEager.conv3DBackpropInputV2( + inputSizes: inputSizes, filter: filter, outBackprop: outBackprop, strides: strides, + padding: padding, dataFormat: dataFormat, dilations: dilations) + } + + } + + /// Copy a tensor from CPU-to-CPU or GPU-to-GPU. + /// + /// Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the + /// device on which the tensor is allocated. + /// N.B.: If the all downstream attached debug ops are disabled given the current + /// gRPC gating status, the output will simply forward the input tensor without + /// deep-copying. See the documentation of Debug* ops for more details. + /// + /// Unlike the CopyHost Op, this op does not have HostMemory constraint on its + /// input or output. + /// + /// - Parameter input: Input tensor. + /// + /// - Attrs: + /// - tensor_name: The name of the input tensor. + /// - debug_ops_spec: A list of debug op spec (op, url, gated_grpc) for attached debug + /// ops. Each element of the list has the format + /// ;;, wherein gated_grpc is boolean represented + /// as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1", + /// "DebugIdentity;file:///tmp/tfdbg_1;0". + @inlinable @inline(__always) + public static func copy( + _ input: Tensor, + tensorName: String, + debugOpsSpec: [String] + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.copy(input, tensorName: tensorName, debugOpsSpec: debugOpsSpec), + to: output_device) + case .TF_EAGER: + return _RawTFEager.copy(input, tensorName: tensorName, debugOpsSpec: debugOpsSpec) + } + + } + + /// Copy a tensor to host. + /// + /// Performs CPU-to-CPU deep-copying of tensor. + /// N.B.: If the all downstream attached debug ops are disabled given the current + /// gRPC gating status, the output will simply forward the input tensor without + /// deep-copying. See the documentation of Debug* ops for more details. + /// + /// Unlike the Copy Op, this op has HostMemory constraint on its input or output. + /// + /// - Parameter input: Input tensor. + /// + /// - Attrs: + /// - tensor_name: The name of the input tensor. + /// - debug_ops_spec: A list of debug op spec (op, url, gated_grpc) for attached debug + /// ops. Each element of the list has the format + /// ;;, wherein gated_grpc is boolean represented + /// as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1", + /// "DebugIdentity;file:///tmp/tfdbg_1;0". + @inlinable @inline(__always) + public static func copyHost( + _ input: Tensor, + tensorName: String, + debugOpsSpec: [String] + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.copyHost(input, tensorName: tensorName, debugOpsSpec: debugOpsSpec), + to: output_device) + case .TF_EAGER: + return _RawTFEager.copyHost(input, tensorName: tensorName, debugOpsSpec: debugOpsSpec) + } + + } + + @inlinable @inline(__always) + public static func copyOp( + _ a: Tensor + ) -> Tensor { + switch a.handle.backend { + case .XLA: + let output_device = a.device + let a = Tensor(copying: a, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.copyOp(a), to: output_device) + case .TF_EAGER: + return _RawTFEager.copyOp(a) + } + + } + + /// Computes cos of x element-wise. + /// + /// Given an input tensor, this function computes cosine of every + /// element in the tensor. Input range is `(-inf, inf)` and + /// output range is `[-1,1]`. If input lies outside the boundary, `nan` + /// is returned. + /// + /// ```python + /// x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")]) + /// tf.math.cos(x) ==> [nan -0.91113025 0.87758255 0.5403023 0.36235774 0.48718765 -0.95215535 nan] + /// ``` + @inlinable @inline(__always) + public static func cos( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.cos(x) + case .TF_EAGER: + return _RawTFEager.cos(x) + } + + } + + /// Computes hyperbolic cosine of x element-wise. + /// + /// Given an input tensor, this function computes hyperbolic cosine of every + /// element in the tensor. Input range is `[-inf, inf]` and output range + /// is `[1, inf]`. + /// + /// ```python + /// x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) + /// tf.math.cosh(x) ==> [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 3.7621956e+00 1.1013233e+04 inf] + /// ``` + @inlinable @inline(__always) + public static func cosh( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.cosh(x) + case .TF_EAGER: + return _RawTFEager.cosh(x) + } + + } + + @inlinable @inline(__always) + public static func createSummaryDbWriter( + writer: ResourceHandle, + dbUri: StringTensor, + experimentName: StringTensor, + runName: StringTensor, + userName: StringTensor + ) { + _RawTFEager.createSummaryDbWriter( + writer: writer, dbUri: dbUri, experimentName: experimentName, runName: runName, + userName: userName) + } + + @inlinable @inline(__always) + public static func createSummaryFileWriter( + writer: ResourceHandle, + logdir: StringTensor, + maxQueue: Tensor, + flushMillis: Tensor, + filenameSuffix: StringTensor + ) { + _RawTFEager.createSummaryFileWriter( + writer: writer, logdir: logdir, maxQueue: maxQueue, flushMillis: flushMillis, + filenameSuffix: filenameSuffix) + } + + @inlinable @inline(__always) + public static func createTRTResourceHandle( + resourceName: String + ) -> ResourceHandle { + _RawTFEager.createTRTResourceHandle(resourceName: resourceName) + } + + /// Extracts crops from the input image tensor and resizes them. + /// + /// Extracts crops from the input image tensor and resizes them using bilinear + /// sampling or nearest neighbor sampling (possibly with aspect ratio change) to a + /// common output size specified by `crop_size`. This is more general than the + /// `crop_to_bounding_box` op which extracts a fixed size slice from the input image + /// and does not allow resizing or aspect ratio change. + /// + /// Returns a tensor with `crops` from the input `image` at positions defined at the + /// bounding box locations in `boxes`. The cropped boxes are all resized (with + /// bilinear or nearest neighbor interpolation) to a fixed + /// `size = [crop_height, crop_width]`. The result is a 4-D tensor + /// `[num_boxes, crop_height, crop_width, depth]`. The resizing is corner aligned. + /// In particular, if `boxes = [[0, 0, 1, 1]]`, the method will give identical + /// results to using `tf.image.resize_bilinear()` or + /// `tf.image.resize_nearest_neighbor()`(depends on the `method` argument) with + /// `align_corners=True`. + /// + /// - Parameters: + /// - image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. + /// Both `image_height` and `image_width` need to be positive. + /// - boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor + /// specifies the coordinates of a box in the `box_ind[i]` image and is specified + /// in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of + /// `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the + /// `[0, 1]` interval of normalized image height is mapped to + /// `[0, image_height - 1]` in image height coordinates. We do allow `y1` > `y2`, in + /// which case the sampled crop is an up-down flipped version of the original + /// image. The width dimension is treated similarly. Normalized coordinates + /// outside the `[0, 1]` range are allowed, in which case we use + /// `extrapolation_value` to extrapolate the input image values. + /// - box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. + /// The value of `box_ind[i]` specifies the image that the `i`-th box refers to. + /// - crop_size: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All + /// cropped image patches are resized to this size. The aspect ratio of the image + /// content is not preserved. Both `crop_height` and `crop_width` need to be + /// positive. + /// + /// - Attrs: + /// - method: A string specifying the sampling method for resizing. It can be either + /// `"bilinear"` or `"nearest"` and default to `"bilinear"`. Currently two sampling + /// methods are supported: Bilinear and Nearest Neighbor. + /// - extrapolation_value: Value used for extrapolation, when applicable. + /// + /// - Output crops: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. + @inlinable @inline(__always) + public static func cropAndResize( + image: Tensor, + boxes: Tensor, + boxInd: Tensor, + cropSize: Tensor, + method: Method = .bilinear, + extrapolationValue: Double = 0 + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(image.handle.backend, boxes.handle.backend), boxInd.handle.backend), + cropSize.handle.backend) + { + case .XLA: + let output_device = cropSize.device + let image = Tensor(copying: image, to: .defaultTFEager) + let boxes = Tensor(copying: boxes, to: .defaultTFEager) + let boxInd = Tensor(copying: boxInd, to: .defaultTFEager) + let cropSize = Tensor(copying: cropSize, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.cropAndResize( + image: image, boxes: boxes, boxInd: boxInd, cropSize: cropSize, method: method, + extrapolationValue: extrapolationValue), to: output_device) + case .TF_EAGER: + return _RawTFEager.cropAndResize( + image: image, boxes: boxes, boxInd: boxInd, cropSize: cropSize, method: method, + extrapolationValue: extrapolationValue) + } + + } + + /// Computes the gradient of the crop_and_resize op wrt the input boxes tensor. + /// + /// - Parameters: + /// - grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. + /// - image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. + /// Both `image_height` and `image_width` need to be positive. + /// - boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor + /// specifies the coordinates of a box in the `box_ind[i]` image and is specified + /// in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of + /// `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the + /// `[0, 1]` interval of normalized image height is mapped to + /// `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in + /// which case the sampled crop is an up-down flipped version of the original + /// image. The width dimension is treated similarly. Normalized coordinates + /// outside the `[0, 1]` range are allowed, in which case we use + /// `extrapolation_value` to extrapolate the input image values. + /// - box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. + /// The value of `box_ind[i]` specifies the image that the `i`-th box refers to. + /// + /// - Attr method: A string specifying the interpolation method. Only 'bilinear' is + /// supported for now. + /// + /// - Output output: A 2-D tensor of shape `[num_boxes, 4]`. + @inlinable @inline(__always) + public static func cropAndResizeGradBoxes( + grads: Tensor, + image: Tensor, + boxes: Tensor, + boxInd: Tensor, + method: Method1 = .bilinear + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(grads.handle.backend, image.handle.backend), boxes.handle.backend), + boxInd.handle.backend) + { + case .XLA: + let output_device = boxInd.device + let grads = Tensor(copying: grads, to: .defaultTFEager) + let image = Tensor(copying: image, to: .defaultTFEager) + let boxes = Tensor(copying: boxes, to: .defaultTFEager) + let boxInd = Tensor(copying: boxInd, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.cropAndResizeGradBoxes( + grads: grads, image: image, boxes: boxes, boxInd: boxInd, method: method), + to: output_device) + case .TF_EAGER: + return _RawTFEager.cropAndResizeGradBoxes( + grads: grads, image: image, boxes: boxes, boxInd: boxInd, method: method) + } + + } + + /// Computes the gradient of the crop_and_resize op wrt the input image tensor. + /// + /// - Parameters: + /// - grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. + /// - boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor + /// specifies the coordinates of a box in the `box_ind[i]` image and is specified + /// in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of + /// `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the + /// `[0, 1]` interval of normalized image height is mapped to + /// `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in + /// which case the sampled crop is an up-down flipped version of the original + /// image. The width dimension is treated similarly. Normalized coordinates + /// outside the `[0, 1]` range are allowed, in which case we use + /// `extrapolation_value` to extrapolate the input image values. + /// - box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. + /// The value of `box_ind[i]` specifies the image that the `i`-th box refers to. + /// - image_size: A 1-D tensor with value `[batch, image_height, image_width, depth]` + /// containing the original image size. Both `image_height` and `image_width` need + /// to be positive. + /// + /// - Attr method: A string specifying the interpolation method. Only 'bilinear' is + /// supported for now. + /// + /// - Output output: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. + @inlinable @inline(__always) + public static func cropAndResizeGradImage( + grads: Tensor, + boxes: Tensor, + boxInd: Tensor, + imageSize: Tensor, + method: Method = .bilinear + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(grads.handle.backend, boxes.handle.backend), boxInd.handle.backend), + imageSize.handle.backend) + { + case .XLA: + let output_device = imageSize.device + let grads = Tensor(copying: grads, to: .defaultTFEager) + let boxes = Tensor(copying: boxes, to: .defaultTFEager) + let boxInd = Tensor(copying: boxInd, to: .defaultTFEager) + let imageSize = Tensor(copying: imageSize, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.cropAndResizeGradImage( + grads: grads, boxes: boxes, boxInd: boxInd, imageSize: imageSize, method: method), + to: output_device) + case .TF_EAGER: + return _RawTFEager.cropAndResizeGradImage( + grads: grads, boxes: boxes, boxInd: boxInd, imageSize: imageSize, method: method) + } + + } + + /// Compute the pairwise cross product. + /// + /// `a` and `b` must be the same shape; they can either be simple 3-element vectors, + /// or any shape where the innermost dimension is 3. In the latter case, each pair + /// of corresponding 3-element vectors is cross-multiplied independently. + /// + /// - Parameters: + /// - a: A tensor containing 3-element vectors. + /// - b: Another tensor, of same type and shape as `a`. + /// + /// - Output product: Pairwise cross product of the vectors in `a` and `b`. + @inlinable @inline(__always) + public static func cross( + _ a: Tensor, + _ b: Tensor + ) -> Tensor { + switch commonBackend(a.handle.backend, b.handle.backend) { + case .XLA: + let output_device = b.device + let a = Tensor(copying: a, to: .defaultTFEager) + let b = Tensor(copying: b, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.cross(a, b), to: output_device) + case .TF_EAGER: + return _RawTFEager.cross(a, b) + } + + } + + /// An Op to sum inputs across replicated TPU instances. + /// + /// Each instance supplies its own input. + /// + /// For example, suppose there are 8 TPU instances: `[A, B, C, D, E, F, G, H]`. + /// Passing group_assignment=`[[0,2,4,6],[1,3,5,7]]` sets `A, C, E, G` as group 0, + /// and `B, D, F, H` as group 1. Thus we get the outputs: + /// `[A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H]`. + /// + /// - Parameters: + /// - input: The local input to the sum. + /// - group_assignment: An int32 tensor with shape + /// [num_groups, num_replicas_per_group]. `group_assignment[i]` represents the + /// replica ids in the ith subgroup. + /// + /// - Attr T: The type of elements to be summed. + /// + /// - Output output: The sum of all the distributed inputs. + @inlinable @inline(__always) + public static func crossReplicaSum( + _ input: Tensor, + groupAssignment: Tensor + ) -> Tensor { + switch commonBackend(input.handle.backend, groupAssignment.handle.backend) { + case .XLA: + let output_device = groupAssignment.device + let input = Tensor(copying: input, to: .defaultTFEager) + let groupAssignment = Tensor(copying: groupAssignment, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.crossReplicaSum(input, groupAssignment: groupAssignment), + to: output_device) + case .TF_EAGER: + return _RawTFEager.crossReplicaSum(input, groupAssignment: groupAssignment) + } + + } + + /// A RNN backed by cuDNN. + /// + /// Computes the RNN from the input and initial states, with respect to the params + /// buffer. + /// + /// rnn_mode: Indicates the type of the RNN model. + /// input_mode: Indicate whether there is a linear projection between the input and + /// the actual computation before the first layer. 'skip_input' is only allowed + /// when input_size == num_units; 'auto_select' implies 'skip_input' when + /// input_size == num_units; otherwise, it implies 'linear_input'. + /// direction: Indicates whether a bidirectional model will be used. Should be + /// "unidirectional" or "bidirectional". + /// dropout: Dropout probability. When set to 0., dropout is disabled. + /// seed: The 1st part of a seed to initialize dropout. + /// seed2: The 2nd part of a seed to initialize dropout. + /// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size]. + /// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size, + /// num_units]. + /// input_c: For LSTM, a 3-D tensor with the shape of + /// [num_layer * dir, batch, num_units]. For other models, it is ignored. + /// params: A 1-D tensor that contains the weights and biases in an opaque layout. + /// The size must be created through CudnnRNNParamsSize, and initialized + /// separately. Note that they might not be compatible across different + /// generations. So it is a good idea to save and restore + /// output: A 3-D tensor with the shape of [seq_length, batch_size, + /// dir * num_units]. + /// output_h: The same shape has input_h. + /// output_c: The same shape as input_c for LSTM. An empty tensor for other models. + /// is_training: Indicates whether this operation is used for inferenece or + /// training. + /// reserve_space: An opaque tensor that can be used in backprop calculation. It + /// is only produced if is_training is false. + @inlinable @inline(__always) + public static func cudnnRNN( + _ input: Tensor, + inputH: Tensor, + inputC: Tensor, + params: Tensor, + rnnMode: RnnMode = .lstm, + inputMode: InputMode = .linearInput, + direction: Direction = .unidirectional, + dropout: Double = 0, + seed: Int64 = 0, + seed2: Int64 = 0, + isTraining: Bool = true + ) -> (output: Tensor, outputH: Tensor, outputC: Tensor, reserveSpace: Tensor) { + _RawTFEager.cudnnRNN( + input, inputH: inputH, inputC: inputC, params: params, rnnMode: rnnMode, + inputMode: inputMode, direction: direction, dropout: dropout, seed: seed, seed2: seed2, + isTraining: isTraining) + } + + /// Backprop step of CudnnRNN. + /// + /// Compute the backprop of both data and weights in a RNN. + /// + /// rnn_mode: Indicates the type of the RNN model. + /// input_mode: Indicate whether there is a linear projection between the input and + /// the actual computation before the first layer. 'skip_input' is only allowed + /// when input_size == num_units; 'auto_select' implies 'skip_input' when + /// input_size == num_units; otherwise, it implies 'linear_input'. + /// direction: Indicates whether a bidirectional model will be used. Should be + /// "unidirectional" or "bidirectional". + /// dropout: Dropout probability. When set to 0., dropout is disabled. + /// seed: The 1st part of a seed to initialize dropout. + /// seed2: The 2nd part of a seed to initialize dropout. + /// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size]. + /// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size, + /// num_units]. + /// input_c: For LSTM, a 3-D tensor with the shape of + /// [num_layer * dir, batch, num_units]. For other models, it is ignored. + /// params: A 1-D tensor that contains the weights and biases in an opaque layout. + /// The size must be created through CudnnRNNParamsSize, and initialized + /// separately. Note that they might not be compatible across different + /// generations. So it is a good idea to save and restore + /// output: A 3-D tensor with the shape of [seq_length, batch_size, + /// dir * num_units]. + /// output_h: The same shape has input_h. + /// output_c: The same shape as input_c for LSTM. An empty tensor for other models. + /// output_backprop: A 3-D tensor with the same shape as output in the forward pass. + /// output_h_backprop: A 3-D tensor with the same shape as output_h in the forward + /// pass. + /// output_c_backprop: A 3-D tensor with the same shape as output_c in the forward + /// pass. + /// reserve_space: The same reserve_space produced in for forward operation. + /// input_backprop: The backprop to input in the forward pass. Has the same shape + /// as input. + /// input_h_backprop: The backprop to input_h in the forward pass. Has the same + /// shape as input_h. + /// input_c_backprop: The backprop to input_c in the forward pass. Has the same + /// shape as input_c. + /// params_backprop: The backprop to the params buffer in the forward pass. Has the + /// same shape as params. + @inlinable @inline(__always) + public static func cudnnRNNBackprop( + _ input: Tensor, + inputH: Tensor, + inputC: Tensor, + params: Tensor, + output: Tensor, + outputH: Tensor, + outputC: Tensor, + outputBackprop: Tensor, + outputHBackprop: Tensor, + outputCBackprop: Tensor, + reserveSpace: Tensor, + rnnMode: RnnMode = .lstm, + inputMode: InputMode = .linearInput, + direction: Direction = .unidirectional, + dropout: Double = 0, + seed: Int64 = 0, + seed2: Int64 = 0 + ) -> ( + inputBackprop: Tensor, inputHBackprop: Tensor, inputCBackprop: Tensor, + paramsBackprop: Tensor + ) { + _RawTFEager.cudnnRNNBackprop( + input, inputH: inputH, inputC: inputC, params: params, output: output, outputH: outputH, + outputC: outputC, outputBackprop: outputBackprop, outputHBackprop: outputHBackprop, + outputCBackprop: outputCBackprop, reserveSpace: reserveSpace, rnnMode: rnnMode, + inputMode: inputMode, direction: direction, dropout: dropout, seed: seed, seed2: seed2) + } + + /// Backprop step of CudnnRNN. + /// + /// Compute the backprop of both data and weights in a RNN. Takes an extra + /// "host_reserved" inupt than CudnnRNNBackprop, which is used to determine RNN + /// cudnnRNNAlgo_t and cudnnMathType_t. + /// + /// rnn_mode: Indicates the type of the RNN model. + /// input_mode: Indicates whether there is a linear projection between the input and + /// the actual computation before the first layer. 'skip_input' is only allowed + /// when input_size == num_units; 'auto_select' implies 'skip_input' when + /// input_size == num_units; otherwise, it implies 'linear_input'. + /// direction: Indicates whether a bidirectional model will be used. Should be + /// "unidirectional" or "bidirectional". + /// dropout: Dropout probability. When set to 0., dropout is disabled. + /// seed: The 1st part of a seed to initialize dropout. + /// seed2: The 2nd part of a seed to initialize dropout. + /// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size]. + /// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size, + /// num_units]. + /// input_c: For LSTM, a 3-D tensor with the shape of + /// [num_layer * dir, batch, num_units]. For other models, it is ignored. + /// params: A 1-D tensor that contains the weights and biases in an opaque layout. + /// The size must be created through CudnnRNNParamsSize, and initialized + /// separately. Note that they might not be compatible across different + /// generations. So it is a good idea to save and restore + /// output: A 3-D tensor with the shape of [seq_length, batch_size, + /// dir * num_units]. + /// output_h: The same shape has input_h. + /// output_c: The same shape as input_c for LSTM. An empty tensor for other models. + /// output_backprop: A 3-D tensor with the same shape as output in the forward pass. + /// output_h_backprop: A 3-D tensor with the same shape as output_h in the forward + /// pass. + /// output_c_backprop: A 3-D tensor with the same shape as output_c in the forward + /// pass. + /// reserve_space: The same reserve_space produced in the forward operation. + /// host_reserved: The same host_reserved produced in the forward operation. + /// input_backprop: The backprop to input in the forward pass. Has the same shape + /// as input. + /// input_h_backprop: The backprop to input_h in the forward pass. Has the same + /// shape as input_h. + /// input_c_backprop: The backprop to input_c in the forward pass. Has the same + /// shape as input_c. + /// params_backprop: The backprop to the params buffer in the forward pass. Has the + /// same shape as params. + @inlinable @inline(__always) + public static func cudnnRNNBackpropV2( + _ input: Tensor, + inputH: Tensor, + inputC: Tensor, + params: Tensor, + output: Tensor, + outputH: Tensor, + outputC: Tensor, + outputBackprop: Tensor, + outputHBackprop: Tensor, + outputCBackprop: Tensor, + reserveSpace: Tensor, + hostReserved: Tensor, + rnnMode: RnnMode = .lstm, + inputMode: InputMode = .linearInput, + direction: Direction = .unidirectional, + dropout: Double = 0, + seed: Int64 = 0, + seed2: Int64 = 0 + ) -> ( + inputBackprop: Tensor, inputHBackprop: Tensor, inputCBackprop: Tensor, + paramsBackprop: Tensor + ) { + _RawTFEager.cudnnRNNBackpropV2( + input, inputH: inputH, inputC: inputC, params: params, output: output, outputH: outputH, + outputC: outputC, outputBackprop: outputBackprop, outputHBackprop: outputHBackprop, + outputCBackprop: outputCBackprop, reserveSpace: reserveSpace, hostReserved: hostReserved, + rnnMode: rnnMode, inputMode: inputMode, direction: direction, dropout: dropout, seed: seed, + seed2: seed2) + } + + /// Backprop step of CudnnRNNV3. + /// + /// Compute the backprop of both data and weights in a RNN. Takes an extra + /// "sequence_lengths" input than CudnnRNNBackprop. + /// + /// rnn_mode: Indicates the type of the RNN model. + /// input_mode: Indicates whether there is a linear projection between the input and + /// the actual computation before the first layer. 'skip_input' is only allowed + /// when input_size == num_units; 'auto_select' implies 'skip_input' when + /// input_size == num_units; otherwise, it implies 'linear_input'. + /// direction: Indicates whether a bidirectional model will be used. Should be + /// "unidirectional" or "bidirectional". + /// dropout: Dropout probability. When set to 0., dropout is disabled. + /// seed: The 1st part of a seed to initialize dropout. + /// seed2: The 2nd part of a seed to initialize dropout. + /// input: If time_major is true, this is a 3-D tensor with the shape of + /// [seq_length, batch_size, input_size]. If time_major is false, the shape is + /// [batch_size, seq_length, input_size]. + /// input_h: If time_major is true, this is a 3-D tensor with the shape of + /// [num_layer * dir, batch_size, num_units]. If time_major is false, the shape + /// is [batch_size, num_layer * dir, num_units]. + /// input_c: For LSTM, a 3-D tensor with the shape of + /// [num_layer * dir, batch, num_units]. For other models, it is ignored. + /// params: A 1-D tensor that contains the weights and biases in an opaque layout. + /// The size must be created through CudnnRNNParamsSize, and initialized + /// separately. Note that they might not be compatible across different + /// generations. So it is a good idea to save and restore + /// sequence_lengths: a vector of lengths of each input sequence. + /// output: If time_major is true, this is a 3-D tensor with the shape of + /// [seq_length, batch_size, dir * num_units]. If time_major is false, the + /// shape is [batch_size, seq_length, dir * num_units]. + /// output_h: The same shape has input_h. + /// output_c: The same shape as input_c for LSTM. An empty tensor for other models. + /// output_backprop: A 3-D tensor with the same shape as output in the forward pass. + /// output_h_backprop: A 3-D tensor with the same shape as output_h in the forward + /// pass. + /// output_c_backprop: A 3-D tensor with the same shape as output_c in the forward + /// pass. + /// time_major: Indicates whether the input/output format is time major or batch + /// major. + /// reserve_space: The same reserve_space produced in the forward operation. + /// input_backprop: The backprop to input in the forward pass. Has the same shape + /// as input. + /// input_h_backprop: The backprop to input_h in the forward pass. Has the same + /// shape as input_h. + /// input_c_backprop: The backprop to input_c in the forward pass. Has the same + /// shape as input_c. + /// params_backprop: The backprop to the params buffer in the forward pass. Has the + /// same shape as params. + @inlinable @inline(__always) + public static func cudnnRNNBackpropV3( + _ input: Tensor, + inputH: Tensor, + inputC: Tensor, + params: Tensor, + sequenceLengths: Tensor, + output: Tensor, + outputH: Tensor, + outputC: Tensor, + outputBackprop: Tensor, + outputHBackprop: Tensor, + outputCBackprop: Tensor, + reserveSpace: Tensor, + hostReserved: Tensor, + rnnMode: RnnMode = .lstm, + inputMode: InputMode = .linearInput, + direction: Direction = .unidirectional, + dropout: Double = 0, + seed: Int64 = 0, + seed2: Int64 = 0, + numProj: Int64 = 0, + timeMajor: Bool = true + ) -> ( + inputBackprop: Tensor, inputHBackprop: Tensor, inputCBackprop: Tensor, + paramsBackprop: Tensor + ) { + _RawTFEager.cudnnRNNBackpropV3( + input, inputH: inputH, inputC: inputC, params: params, sequenceLengths: sequenceLengths, + output: output, outputH: outputH, outputC: outputC, outputBackprop: outputBackprop, + outputHBackprop: outputHBackprop, outputCBackprop: outputCBackprop, + reserveSpace: reserveSpace, hostReserved: hostReserved, rnnMode: rnnMode, + inputMode: inputMode, direction: direction, dropout: dropout, seed: seed, seed2: seed2, + numProj: numProj, timeMajor: timeMajor) + } + + /// Converts CudnnRNN params from canonical form to usable form. + /// + /// Writes a set of weights into the opaque params buffer so they can be used in + /// upcoming training or inferences. + /// + /// Note that the params buffer may not be compatible across different GPUs. So any + /// save and restoration should be converted to and from the canonical weights and + /// biases. + /// + /// num_layers: Specifies the number of layers in the RNN model. + /// num_units: Specifies the size of the hidden state. + /// input_size: Specifies the size of the input state. + /// weights: the canonical form of weights that can be used for saving + /// and restoration. They are more likely to be compatible across different + /// generations. + /// biases: the canonical form of biases that can be used for saving + /// and restoration. They are more likely to be compatible across different + /// generations. + /// num_params: number of parameter sets for all layers. + /// Each layer may contain multiple parameter sets, with each set consisting of + /// a weight matrix and a bias vector. + /// rnn_mode: Indicates the type of the RNN model. + /// input_mode: Indicate whether there is a linear projection between the input and + /// The actual computation before the first layer. 'skip_input' is only allowed + /// when input_size == num_units; 'auto_select' implies 'skip_input' when + /// input_size == num_units; otherwise, it implies 'linear_input'. + /// direction: Indicates whether a bidirectional model will be used. + /// dir = (direction == bidirectional) ? 2 : 1 + /// dropout: dropout probability. When set to 0., dropout is disabled. + /// seed: the 1st part of a seed to initialize dropout. + /// seed2: the 2nd part of a seed to initialize dropout. + @inlinable @inline(__always) + public static func cudnnRNNCanonicalToParams( + numLayers: Tensor, + numUnits: Tensor, + inputSize: Tensor, + weights: [Tensor], + biases: [Tensor], + rnnMode: RnnMode = .lstm, + inputMode: InputMode = .linearInput, + direction: Direction = .unidirectional, + dropout: Double = 0, + seed: Int64 = 0, + seed2: Int64 = 0 + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend( + commonBackend(numLayers.handle.backend, numUnits.handle.backend), + inputSize.handle.backend), commonBackend(weights)), commonBackend(biases)) + { + case .XLA: + let output_device = inputSize.device + let numLayers = Tensor(copying: numLayers, to: .defaultTFEager) + let numUnits = Tensor(copying: numUnits, to: .defaultTFEager) + let inputSize = Tensor(copying: inputSize, to: .defaultTFEager) + let weights = [Tensor](copying: weights, to: .defaultTFEager) + let biases = [Tensor](copying: biases, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.cudnnRNNCanonicalToParams( + numLayers: numLayers, numUnits: numUnits, inputSize: inputSize, weights: weights, + biases: biases, rnnMode: rnnMode, inputMode: inputMode, direction: direction, + dropout: dropout, seed: seed, seed2: seed2), to: output_device) + case .TF_EAGER: + return _RawTFEager.cudnnRNNCanonicalToParams( + numLayers: numLayers, numUnits: numUnits, inputSize: inputSize, weights: weights, + biases: biases, rnnMode: rnnMode, inputMode: inputMode, direction: direction, + dropout: dropout, seed: seed, seed2: seed2) + } + + } + + /// Converts CudnnRNN params from canonical form to usable form. It supports the projection in LSTM. + /// + /// Writes a set of weights into the opaque params buffer so they can be used in + /// upcoming training or inferences. + /// + /// Note that the params buffer may not be compatible across different GPUs. So any + /// save and restoration should be converted to and from the canonical weights and + /// biases. + /// + /// num_layers: Specifies the number of layers in the RNN model. + /// num_units: Specifies the size of the hidden state. + /// input_size: Specifies the size of the input state. + /// weights: the canonical form of weights that can be used for saving + /// and restoration. They are more likely to be compatible across different + /// generations. + /// biases: the canonical form of biases that can be used for saving + /// and restoration. They are more likely to be compatible across different + /// generations. + /// num_params_weights: number of weight parameter matrix for all layers. + /// num_params_biases: number of bias parameter vector for all layers. + /// rnn_mode: Indicates the type of the RNN model. + /// input_mode: Indicate whether there is a linear projection between the input and + /// The actual computation before the first layer. 'skip_input' is only allowed + /// when input_size == num_units; 'auto_select' implies 'skip_input' when + /// input_size == num_units; otherwise, it implies 'linear_input'. + /// direction: Indicates whether a bidirectional model will be used. + /// dir = (direction == bidirectional) ? 2 : 1 + /// dropout: dropout probability. When set to 0., dropout is disabled. + /// seed: the 1st part of a seed to initialize dropout. + /// seed2: the 2nd part of a seed to initialize dropout. + /// num_proj: The output dimensionality for the projection matrices. If None or 0, + /// no projection is performed. + @inlinable @inline(__always) + public static func cudnnRNNCanonicalToParamsV2( + numLayers: Tensor, + numUnits: Tensor, + inputSize: Tensor, + weights: [Tensor], + biases: [Tensor], + rnnMode: RnnMode = .lstm, + inputMode: InputMode = .linearInput, + direction: Direction = .unidirectional, + dropout: Double = 0, + seed: Int64 = 0, + seed2: Int64 = 0, + numProj: Int64 = 0 + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend( + commonBackend(numLayers.handle.backend, numUnits.handle.backend), + inputSize.handle.backend), commonBackend(weights)), commonBackend(biases)) + { + case .XLA: + let output_device = inputSize.device + let numLayers = Tensor(copying: numLayers, to: .defaultTFEager) + let numUnits = Tensor(copying: numUnits, to: .defaultTFEager) + let inputSize = Tensor(copying: inputSize, to: .defaultTFEager) + let weights = [Tensor](copying: weights, to: .defaultTFEager) + let biases = [Tensor](copying: biases, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.cudnnRNNCanonicalToParamsV2( + numLayers: numLayers, numUnits: numUnits, inputSize: inputSize, weights: weights, + biases: biases, rnnMode: rnnMode, inputMode: inputMode, direction: direction, + dropout: dropout, seed: seed, seed2: seed2, numProj: numProj), to: output_device) + case .TF_EAGER: + return _RawTFEager.cudnnRNNCanonicalToParamsV2( + numLayers: numLayers, numUnits: numUnits, inputSize: inputSize, weights: weights, + biases: biases, rnnMode: rnnMode, inputMode: inputMode, direction: direction, + dropout: dropout, seed: seed, seed2: seed2, numProj: numProj) + } + + } + + /// Computes size of weights that can be used by a Cudnn RNN model. + /// + /// Return the params size that can be used by the Cudnn RNN model. Subsequent + /// weight allocation and initialization should use this size. + /// + /// num_layers: Specifies the number of layers in the RNN model. + /// num_units: Specifies the size of the hidden state. + /// input_size: Specifies the size of the input state. + /// rnn_mode: Indicates the type of the RNN model. + /// input_mode: Indicate whether there is a linear projection between the input and + /// The actual computation before the first layer. 'skip_input' is only allowed + /// when input_size == num_units; 'auto_select' implies 'skip_input' when + /// input_size == num_units; otherwise, it implies 'linear_input'. + /// direction: Indicates whether a bidirectional model will be used. + /// dir = (direction == bidirectional) ? 2 : 1 + /// dropout: dropout probability. When set to 0., dropout is disabled. + /// seed: the 1st part of a seed to initialize dropout. + /// seed2: the 2nd part of a seed to initialize dropout. + /// params_size: The size of the params buffer that should be allocated and + /// initialized for this RNN model. Note that this params buffer may not be + /// compatible across GPUs. Please use CudnnRNNParamsWeights and + /// CudnnRNNParamsBiases to save and restore them in a way that is compatible + /// across different runs. + @inlinable @inline(__always) + public static func cudnnRNNParamsSize( + numLayers: Tensor, + numUnits: Tensor, + inputSize: Tensor, + t: TensorDataType, + rnnMode: RnnMode = .lstm, + inputMode: InputMode = .linearInput, + direction: Direction = .unidirectional, + dropout: Double = 0, + seed: Int64 = 0, + seed2: Int64 = 0, + numProj: Int64 = 0 + ) -> Tensor { + switch commonBackend( + commonBackend(numLayers.handle.backend, numUnits.handle.backend), inputSize.handle.backend) + { + case .XLA: + let output_device = inputSize.device + let numLayers = Tensor(copying: numLayers, to: .defaultTFEager) + let numUnits = Tensor(copying: numUnits, to: .defaultTFEager) + let inputSize = Tensor(copying: inputSize, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.cudnnRNNParamsSize( + numLayers: numLayers, numUnits: numUnits, inputSize: inputSize, t: t, rnnMode: rnnMode, + inputMode: inputMode, direction: direction, dropout: dropout, seed: seed, seed2: seed2, + numProj: numProj), to: output_device) + case .TF_EAGER: + return _RawTFEager.cudnnRNNParamsSize( + numLayers: numLayers, numUnits: numUnits, inputSize: inputSize, t: t, rnnMode: rnnMode, + inputMode: inputMode, direction: direction, dropout: dropout, seed: seed, seed2: seed2, + numProj: numProj) + } + + } + + /// Retrieves CudnnRNN params in canonical form. + /// + /// Retrieves a set of weights from the opaque params buffer that can be saved and + /// restored in a way compatible with future runs. + /// + /// Note that the params buffer may not be compatible across different GPUs. So any + /// save and restoration should be converted to and from the canonical weights and + /// biases. + /// + /// num_layers: Specifies the number of layers in the RNN model. + /// num_units: Specifies the size of the hidden state. + /// input_size: Specifies the size of the input state. + /// num_params: number of parameter sets for all layers. + /// Each layer may contain multiple parameter sets, with each set consisting of + /// a weight matrix and a bias vector. + /// weights: the canonical form of weights that can be used for saving + /// and restoration. They are more likely to be compatible across different + /// generations. + /// biases: the canonical form of biases that can be used for saving + /// and restoration. They are more likely to be compatible across different + /// generations. + /// rnn_mode: Indicates the type of the RNN model. + /// input_mode: Indicate whether there is a linear projection between the input and + /// The actual computation before the first layer. 'skip_input' is only allowed + /// when input_size == num_units; 'auto_select' implies 'skip_input' when + /// input_size == num_units; otherwise, it implies 'linear_input'. + /// direction: Indicates whether a bidirectional model will be used. + /// dir = (direction == bidirectional) ? 2 : 1 + /// dropout: dropout probability. When set to 0., dropout is disabled. + /// seed: the 1st part of a seed to initialize dropout. + /// seed2: the 2nd part of a seed to initialize dropout. + @inlinable @inline(__always) + public static func cudnnRNNParamsToCanonical( + numLayers: Tensor, + numUnits: Tensor, + inputSize: Tensor, + params: Tensor, + numParams: Int64, + rnnMode: RnnMode = .lstm, + inputMode: InputMode = .linearInput, + direction: Direction = .unidirectional, + dropout: Double = 0, + seed: Int64 = 0, + seed2: Int64 = 0 + ) -> (weights: [Tensor], biases: [Tensor]) { + _RawTFEager.cudnnRNNParamsToCanonical( + numLayers: numLayers, numUnits: numUnits, inputSize: inputSize, params: params, + numParams: numParams, rnnMode: rnnMode, inputMode: inputMode, direction: direction, + dropout: dropout, seed: seed, seed2: seed2) + } + + /// Retrieves CudnnRNN params in canonical form. It supports the projection in LSTM. + /// + /// Retrieves a set of weights from the opaque params buffer that can be saved and + /// restored in a way compatible with future runs. + /// + /// Note that the params buffer may not be compatible across different GPUs. So any + /// save and restoration should be converted to and from the canonical weights and + /// biases. + /// + /// num_layers: Specifies the number of layers in the RNN model. + /// num_units: Specifies the size of the hidden state. + /// input_size: Specifies the size of the input state. + /// num_params_weights: number of weight parameter matrix for all layers. + /// num_params_biases: number of bias parameter vector for all layers. + /// weights: the canonical form of weights that can be used for saving + /// and restoration. They are more likely to be compatible across different + /// generations. + /// biases: the canonical form of biases that can be used for saving + /// and restoration. They are more likely to be compatible across different + /// generations. + /// rnn_mode: Indicates the type of the RNN model. + /// input_mode: Indicate whether there is a linear projection between the input and + /// The actual computation before the first layer. 'skip_input' is only allowed + /// when input_size == num_units; 'auto_select' implies 'skip_input' when + /// input_size == num_units; otherwise, it implies 'linear_input'. + /// direction: Indicates whether a bidirectional model will be used. + /// dir = (direction == bidirectional) ? 2 : 1 + /// dropout: dropout probability. When set to 0., dropout is disabled. + /// seed: the 1st part of a seed to initialize dropout. + /// seed2: the 2nd part of a seed to initialize dropout. + /// num_proj: The output dimensionality for the projection matrices. If None or 0, + /// no projection is performed. + @inlinable @inline(__always) + public static func cudnnRNNParamsToCanonicalV2( + numLayers: Tensor, + numUnits: Tensor, + inputSize: Tensor, + params: Tensor, + numParamsWeights: Int64, + numParamsBiases: Int64, + rnnMode: RnnMode = .lstm, + inputMode: InputMode = .linearInput, + direction: Direction = .unidirectional, + dropout: Double = 0, + seed: Int64 = 0, + seed2: Int64 = 0, + numProj: Int64 = 0 + ) -> (weights: [Tensor], biases: [Tensor]) { + _RawTFEager.cudnnRNNParamsToCanonicalV2( + numLayers: numLayers, numUnits: numUnits, inputSize: inputSize, params: params, + numParamsWeights: numParamsWeights, numParamsBiases: numParamsBiases, rnnMode: rnnMode, + inputMode: inputMode, direction: direction, dropout: dropout, seed: seed, seed2: seed2, + numProj: numProj) + } + + /// A RNN backed by cuDNN. + /// + /// Computes the RNN from the input and initial states, with respect to the params + /// buffer. Produces one extra output "host_reserved" than CudnnRNN. + /// + /// rnn_mode: Indicates the type of the RNN model. + /// input_mode: Indicates whether there is a linear projection between the input and + /// the actual computation before the first layer. 'skip_input' is only allowed + /// when input_size == num_units; 'auto_select' implies 'skip_input' when + /// input_size == num_units; otherwise, it implies 'linear_input'. + /// direction: Indicates whether a bidirectional model will be used. Should be + /// "unidirectional" or "bidirectional". + /// dropout: Dropout probability. When set to 0., dropout is disabled. + /// seed: The 1st part of a seed to initialize dropout. + /// seed2: The 2nd part of a seed to initialize dropout. + /// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size]. + /// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size, + /// num_units]. + /// input_c: For LSTM, a 3-D tensor with the shape of + /// [num_layer * dir, batch, num_units]. For other models, it is ignored. + /// params: A 1-D tensor that contains the weights and biases in an opaque layout. + /// The size must be created through CudnnRNNParamsSize, and initialized + /// separately. Note that they might not be compatible across different + /// generations. So it is a good idea to save and restore + /// output: A 3-D tensor with the shape of [seq_length, batch_size, + /// dir * num_units]. + /// output_h: The same shape has input_h. + /// output_c: The same shape as input_c for LSTM. An empty tensor for other models. + /// is_training: Indicates whether this operation is used for inferenece or + /// training. + /// reserve_space: An opaque tensor that can be used in backprop calculation. It + /// is only produced if is_training is true. + /// host_reserved: An opaque tensor that can be used in backprop calculation. It is + /// only produced if is_training is true. It is output on host memory rather than + /// device memory. + @inlinable @inline(__always) + public static func cudnnRNNV2( + _ input: Tensor, + inputH: Tensor, + inputC: Tensor, + params: Tensor, + rnnMode: RnnMode = .lstm, + inputMode: InputMode = .linearInput, + direction: Direction = .unidirectional, + dropout: Double = 0, + seed: Int64 = 0, + seed2: Int64 = 0, + isTraining: Bool = true + ) -> ( + output: Tensor, outputH: Tensor, outputC: Tensor, reserveSpace: Tensor, + hostReserved: Tensor + ) { + _RawTFEager.cudnnRNNV2( + input, inputH: inputH, inputC: inputC, params: params, rnnMode: rnnMode, + inputMode: inputMode, direction: direction, dropout: dropout, seed: seed, seed2: seed2, + isTraining: isTraining) + } + + /// A RNN backed by cuDNN. + /// + /// Computes the RNN from the input and initial states, with respect to the params + /// buffer. Accepts one extra input "sequence_lengths" than CudnnRNN. + /// + /// rnn_mode: Indicates the type of the RNN model. + /// input_mode: Indicates whether there is a linear projection between the input and + /// the actual computation before the first layer. 'skip_input' is only allowed + /// when input_size == num_units; 'auto_select' implies 'skip_input' when + /// input_size == num_units; otherwise, it implies 'linear_input'. + /// direction: Indicates whether a bidirectional model will be used. Should be + /// "unidirectional" or "bidirectional". + /// dropout: Dropout probability. When set to 0., dropout is disabled. + /// seed: The 1st part of a seed to initialize dropout. + /// seed2: The 2nd part of a seed to initialize dropout. + /// input: If time_major is true, this is a 3-D tensor with the shape of + /// [seq_length, batch_size, input_size]. If time_major is false, the shape is + /// [batch_size, seq_length, input_size]. + /// input_h: If time_major is true, this is a 3-D tensor with the shape of + /// [num_layer * dir, batch_size, num_units]. If time_major is false, the shape + /// is [batch_size, num_layer * dir, num_units]. + /// input_c: For LSTM, a 3-D tensor with the shape of + /// [num_layer * dir, batch, num_units]. For other models, it is ignored. + /// params: A 1-D tensor that contains the weights and biases in an opaque layout. + /// The size must be created through CudnnRNNParamsSize, and initialized + /// separately. Note that they might not be compatible across different + /// generations. So it is a good idea to save and restore + /// sequence_lengths: a vector of lengths of each input sequence. + /// output: If time_major is true, this is a 3-D tensor with the shape of + /// [seq_length, batch_size, dir * num_units]. If time_major is false, the + /// shape is [batch_size, seq_length, dir * num_units]. + /// output_h: The same shape has input_h. + /// output_c: The same shape as input_c for LSTM. An empty tensor for other models. + /// is_training: Indicates whether this operation is used for inferenece or + /// training. + /// time_major: Indicates whether the input/output format is time major or batch + /// major. + /// reserve_space: An opaque tensor that can be used in backprop calculation. It + /// is only produced if is_training is true. + @inlinable @inline(__always) + public static func cudnnRNNV3( + _ input: Tensor, + inputH: Tensor, + inputC: Tensor, + params: Tensor, + sequenceLengths: Tensor, + rnnMode: RnnMode = .lstm, + inputMode: InputMode = .linearInput, + direction: Direction = .unidirectional, + dropout: Double = 0, + seed: Int64 = 0, + seed2: Int64 = 0, + numProj: Int64 = 0, + isTraining: Bool = true, + timeMajor: Bool = true + ) -> ( + output: Tensor, outputH: Tensor, outputC: Tensor, reserveSpace: Tensor, + hostReserved: Tensor + ) { + _RawTFEager.cudnnRNNV3( + input, inputH: inputH, inputC: inputC, params: params, sequenceLengths: sequenceLengths, + rnnMode: rnnMode, inputMode: inputMode, direction: direction, dropout: dropout, seed: seed, + seed2: seed2, numProj: numProj, isTraining: isTraining, timeMajor: timeMajor) + } + + /// Compute the cumulative product of the tensor `x` along `axis`. + /// + /// By default, this op performs an inclusive cumprod, which means that the first + /// element of the input is identical to the first element of the output: + /// + /// ```python + /// tf.cumprod([a, b, c]) # => [a, a * b, a * b * c] + /// ``` + /// + /// By setting the `exclusive` kwarg to `True`, an exclusive cumprod is + /// performed instead: + /// + /// ```python + /// tf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b] + /// ``` + /// + /// By setting the `reverse` kwarg to `True`, the cumprod is performed in the + /// opposite direction: + /// + /// ```python + /// tf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c] + /// ``` + /// + /// This is more efficient than using separate `tf.reverse` ops. + /// + /// The `reverse` and `exclusive` kwargs can also be combined: + /// + /// ```python + /// tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1] + /// ``` + /// + /// - Parameters: + /// - x: A `Tensor`. Must be one of the following types: `float32`, `float64`, + /// `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, + /// `complex128`, `qint8`, `quint8`, `qint32`, `half`. + /// - axis: A `Tensor` of type `int32` (default: 0). Must be in the range + /// `[-rank(x), rank(x))`. + /// + /// - Attrs: + /// - exclusive: If `True`, perform exclusive cumprod. + /// - reverse: A `bool` (default: False). + @inlinable @inline(__always) + public static func cumprod< + T: TensorFlowNumeric, + Tidx: TensorFlowIndex + >( + _ x: Tensor, + axis: Tensor, + exclusive: Bool = false, + reverse: Bool = false + ) -> Tensor { + switch commonBackend(x.handle.backend, axis.handle.backend) { + case .XLA: + return _RawXLA.cumprod(x, axis: axis, exclusive: exclusive, reverse: reverse) + case .TF_EAGER: + return _RawTFEager.cumprod(x, axis: axis, exclusive: exclusive, reverse: reverse) + } + + } + + /// Compute the cumulative sum of the tensor `x` along `axis`. + /// + /// By default, this op performs an inclusive cumsum, which means that the first + /// element of the input is identical to the first element of the output: + /// + /// ```python + /// tf.cumsum([a, b, c]) # => [a, a + b, a + b + c] + /// ``` + /// + /// By setting the `exclusive` kwarg to `True`, an exclusive cumsum is + /// performed instead: + /// + /// ```python + /// tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b] + /// ``` + /// + /// By setting the `reverse` kwarg to `True`, the cumsum is performed in the + /// opposite direction: + /// + /// ```python + /// tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c] + /// ``` + /// + /// This is more efficient than using separate `tf.reverse` ops. + /// + /// The `reverse` and `exclusive` kwargs can also be combined: + /// + /// ```python + /// tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0] + /// ``` + /// + /// - Parameters: + /// - x: A `Tensor`. Must be one of the following types: `float32`, `float64`, + /// `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, + /// `complex128`, `qint8`, `quint8`, `qint32`, `half`. + /// - axis: A `Tensor` of type `int32` (default: 0). Must be in the range + /// `[-rank(x), rank(x))`. + /// + /// - Attrs: + /// - exclusive: If `True`, perform exclusive cumsum. + /// - reverse: A `bool` (default: False). + @inlinable @inline(__always) + public static func cumsum< + T: TensorFlowNumeric, + Tidx: TensorFlowIndex + >( + _ x: Tensor, + axis: Tensor, + exclusive: Bool = false, + reverse: Bool = false + ) -> Tensor { + switch commonBackend(x.handle.backend, axis.handle.backend) { + case .XLA: + return _RawXLA.cumsum(x, axis: axis, exclusive: exclusive, reverse: reverse) + case .TF_EAGER: + return _RawTFEager.cumsum(x, axis: axis, exclusive: exclusive, reverse: reverse) + } + + } + + /// Compute the cumulative product of the tensor `x` along `axis`. + /// + /// By default, this op performs an inclusive cumulative log-sum-exp, + /// which means that the first + /// element of the input is identical to the first element of the output: + /// ```python + /// tf.math.cumulative_logsumexp([a, b, c]) # => [a, log(exp(a) + exp(b)), log(exp(a) + exp(b) + exp(c))] + /// ``` + /// + /// By setting the `exclusive` kwarg to `True`, an exclusive cumulative log-sum-exp is + /// performed instead: + /// ```python + /// tf.cumulative_logsumexp([a, b, c], exclusive=True) # => [-inf, a, log(exp(a) * exp(b))] + /// ``` + /// Note that the neutral element of the log-sum-exp operation is `-inf`, + /// however, for performance reasons, the minimal value representable by the + /// floating point type is used instead. + /// + /// By setting the `reverse` kwarg to `True`, the cumulative log-sum-exp is performed in the + /// opposite direction. + /// + /// - Parameters: + /// - x: A `Tensor`. Must be one of the following types: `float16`, `float32`, `float64`. + /// - axis: A `Tensor` of type `int32` (default: 0). Must be in the range + /// `[-rank(x), rank(x))`. + /// + /// - Attrs: + /// - exclusive: If `True`, perform exclusive cumulative log-sum-exp. + /// - reverse: A `bool` (default: False). + @inlinable @inline(__always) + public static func cumulativeLogsumexp< + T: FloatingPoint & TensorFlowScalar, + Tidx: TensorFlowIndex + >( + _ x: Tensor, + axis: Tensor, + exclusive: Bool = false, + reverse: Bool = false + ) -> Tensor { + switch commonBackend(x.handle.backend, axis.handle.backend) { + case .XLA: + let output_device = axis.device + let x = Tensor(copying: x, to: .defaultTFEager) + let axis = Tensor(copying: axis, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.cumulativeLogsumexp( + x, axis: axis, exclusive: exclusive, reverse: reverse), to: output_device) + case .TF_EAGER: + return _RawTFEager.cumulativeLogsumexp( + x, axis: axis, exclusive: exclusive, reverse: reverse) + } + + } + + /// Returns the dimension index in the destination data format given the one in + /// + /// the source data format. + /// + /// - Parameter x: A Tensor with each element as a dimension index in source data format. + /// Must be in the range [-4, 4). + /// + /// - Attrs: + /// - src_format: source data format. + /// - dst_format: destination data format. + /// + /// - Output y: A Tensor with each element as a dimension index in destination data format. + @inlinable @inline(__always) + public static func dataFormatDimMap( + _ x: Tensor, + srcFormat: String = "NHWC", + dstFormat: String = "NCHW" + ) -> Tensor { + switch x.handle.backend { + case .XLA: + let output_device = x.device + let x = Tensor(copying: x, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.dataFormatDimMap(x, srcFormat: srcFormat, dstFormat: dstFormat), + to: output_device) + case .TF_EAGER: + return _RawTFEager.dataFormatDimMap(x, srcFormat: srcFormat, dstFormat: dstFormat) + } + + } + + /// Returns the permuted vector/tensor in the destination data format given the + /// + /// one in the source data format. + /// + /// - Parameter x: Vector of size 4 or Tensor of shape (4, 2) in source data format. + /// + /// - Attrs: + /// - src_format: source data format. + /// - dst_format: destination data format. + /// + /// - Output y: Vector of size 4 or Tensor of shape (4, 2) in destination data format. + @inlinable @inline(__always) + public static func dataFormatVecPermute( + _ x: Tensor, + srcFormat: String = "NHWC", + dstFormat: String = "NCHW" + ) -> Tensor { + switch x.handle.backend { + case .XLA: + let output_device = x.device + let x = Tensor(copying: x, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.dataFormatVecPermute(x, srcFormat: srcFormat, dstFormat: dstFormat), + to: output_device) + case .TF_EAGER: + return _RawTFEager.dataFormatVecPermute(x, srcFormat: srcFormat, dstFormat: dstFormat) + } + + } + + /// Returns the cardinality of `input_dataset`. + /// + /// Returns the cardinality of `input_dataset`. + /// + /// - Parameter input_dataset: A variant tensor representing the dataset to return cardinality for. + /// + /// - Output cardinality: The cardinality of `input_dataset`. Named constants are used to represent + /// infinite and unknown cardinality. + @inlinable @inline(__always) + public static func datasetCardinality( + inputDataset: VariantHandle + ) -> Tensor { + _RawTFEager.datasetCardinality(inputDataset: inputDataset) + } + + /// Creates a dataset from the given `graph_def`. + /// + /// Creates a dataset from the provided `graph_def`. + /// + /// - Parameter graph_def: The graph representation of the dataset (as serialized GraphDef). + /// + /// - Output handle: A variant tensor representing the dataset. + @inlinable @inline(__always) + public static func datasetFromGraph( + graphDef: StringTensor + ) -> VariantHandle { + _RawTFEager.datasetFromGraph(graphDef: graphDef) + } + + /// Returns a serialized GraphDef representing `input_dataset`. + /// + /// Returns a graph representation for `input_dataset`. + /// + /// - Parameter input_dataset: A variant tensor representing the dataset to return the graph representation for. + /// + /// - Output graph: The graph representation of the dataset (as serialized GraphDef). + @inlinable @inline(__always) + public static func datasetToGraph( + inputDataset: VariantHandle, + statefulWhitelist: [String], + allowStateful: Bool = false, + stripDeviceAssignment: Bool = false + ) -> StringTensor { + _RawTFEager.datasetToGraph( + inputDataset: inputDataset, statefulWhitelist: statefulWhitelist, + allowStateful: allowStateful, stripDeviceAssignment: stripDeviceAssignment) + } + + /// Returns a serialized GraphDef representing `input_dataset`. + /// + /// Returns a graph representation for `input_dataset`. + /// + /// - Parameter input_dataset: A variant tensor representing the dataset to return the graph representation for. + /// + /// - Output graph: The graph representation of the dataset (as serialized GraphDef). + @inlinable @inline(__always) + public static func datasetToGraphV2( + inputDataset: VariantHandle, + externalStatePolicy: Int64 = 0, + stripDeviceAssignment: Bool = false + ) -> StringTensor { + _RawTFEager.datasetToGraphV2( + inputDataset: inputDataset, externalStatePolicy: externalStatePolicy, + stripDeviceAssignment: stripDeviceAssignment) + } + + /// Outputs the single element from the given dataset. + /// + /// - Parameter dataset: A handle to a dataset that contains a single element. + /// + /// - Output components: The components of the single element of `input`. + @inlinable @inline(__always) + public static func datasetToSingleElement( + dataset: VariantHandle, + outputShapes: [TensorShape?] + ) -> OutputTypes { + _RawTFEager.datasetToSingleElement(dataset: dataset, outputShapes: outputShapes) + } + + /// Writes the given dataset to the given file using the TFRecord format. + /// + /// - Parameters: + /// - input_dataset: A variant tensor representing the dataset to write. + /// - filename: A scalar string tensor representing the filename to use. + /// - compression_type: A scalar string tensor containing either (i) the empty string (no + /// compression), (ii) "ZLIB", or (iii) "GZIP". + @inlinable @inline(__always) + public static func datasetToTFRecord( + inputDataset: VariantHandle, + filename: StringTensor, + compressionType: StringTensor + ) { + _RawTFEager.datasetToTFRecord( + inputDataset: inputDataset, filename: filename, compressionType: compressionType) + } + + /// Identity op for gradient debugging. + /// + /// This op is hidden from public in Python. It is used by TensorFlow Debugger to + /// register gradient tensors for gradient debugging. + /// This op operates on non-reference-type tensors. + @inlinable @inline(__always) + public static func debugGradientIdentity( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.debugGradientIdentity(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.debugGradientIdentity(input) + } + + } + + /// Provides an identity mapping of the non-Ref type input tensor for debugging. + /// + /// Provides an identity mapping of the non-Ref type input tensor for debugging. + /// + /// - Parameter input: Input tensor, non-Reference type + /// + /// - Attrs: + /// - device_name: Name of the device on which the tensor resides. + /// - tensor_name: Name of the input tensor. + /// - debug_urls: List of URLs to debug targets, e.g., + /// file:///foo/tfdbg_dump, grpc:://localhost:11011 + /// - gated_grpc: Whether this op will be gated. If any of the debug_urls of this + /// debug node is of the grpc:// scheme, when the value of this attribute is set + /// to True, the data will not actually be sent via the grpc stream unless this + /// debug op has been enabled at the debug_url. If all of the debug_urls of this + /// debug node are of the grpc:// scheme and the debug op is enabled at none of + /// them, the output will be an empty Tensor. + @inlinable @inline(__always) + public static func debugIdentity( + _ input: Tensor, + deviceName: String, + tensorName: String, + debugUrls: [String], + gatedGrpc: Bool = false + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.debugIdentity( + input, deviceName: deviceName, tensorName: tensorName, debugUrls: debugUrls, + gatedGrpc: gatedGrpc), to: output_device) + case .TF_EAGER: + return _RawTFEager.debugIdentity( + input, deviceName: deviceName, tensorName: tensorName, debugUrls: debugUrls, + gatedGrpc: gatedGrpc) + } + + } + + /// Debug Identity V2 Op. + /// + /// Provides an identity mapping from input to output, while writing the content of + /// the input tensor by calling DebugEventsWriter. + /// + /// The semantics of the input tensor depends on tensor_debug_mode. In typical + /// usage, the input tensor comes directly from the user computation only when + /// graph_debug_mode is FULL_TENSOR (see protobuf/debug_event.proto for a + /// list of all the possible values of graph_debug_mode). For the other debug modes, + /// the input tensor should be produced by an additional op or subgraph that + /// computes summary information about one or more tensors. + /// + /// - Parameter input: Input tensor, non-Reference type + /// + /// - Attrs: + /// - tfdbg_context_id: A tfdbg-generated ID for the context that the op belongs to, + /// e.g., a concrete compiled tf.function. + /// - op_name: Optional. Name of the op that the debug op is concerned with. + /// Used only for single-tensor trace. + /// - output_slot: Optional. Output slot index of the tensor that the debug op + /// is concerned with. Used only for single-tensor trace. + /// - tensor_debug_mode: TensorDebugMode enum value. See debug_event.proto for details. + /// - debug_urls: List of URLs to debug targets, e.g., file:///foo/tfdbg_dump. + @inlinable @inline(__always) + public static func debugIdentityV2( + _ input: Tensor, + tfdbgContextId: String, + opName: String, + outputSlot: Int64 = -1, + tensorDebugMode: Int64 = -1, + debugUrls: [String] + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.debugIdentityV2( + input, tfdbgContextId: tfdbgContextId, opName: opName, outputSlot: outputSlot, + tensorDebugMode: tensorDebugMode, debugUrls: debugUrls), to: output_device) + case .TF_EAGER: + return _RawTFEager.debugIdentityV2( + input, tfdbgContextId: tfdbgContextId, opName: opName, outputSlot: outputSlot, + tensorDebugMode: tensorDebugMode, debugUrls: debugUrls) + } + + } + + /// Debug NaN Value Counter Op. + /// + /// Counts number of NaNs in the input tensor, for debugging. + /// + /// - Parameter input: Input tensor, non-Reference type. + /// + /// - Attrs: + /// - tensor_name: Name of the input tensor. + /// - debug_urls: List of URLs to debug targets, e.g., + /// file:///foo/tfdbg_dump, grpc:://localhost:11011. + /// - gated_grpc: Whether this op will be gated. If any of the debug_urls of this + /// debug node is of the grpc:// scheme, when the value of this attribute is set + /// to True, the data will not actually be sent via the grpc stream unless this + /// debug op has been enabled at the debug_url. If all of the debug_urls of this + /// debug node are of the grpc:// scheme and the debug op is enabled at none of + /// them, the output will be an empty Tensor. + @inlinable @inline(__always) + public static func debugNanCount( + _ input: Tensor, + deviceName: String, + tensorName: String, + debugUrls: [String], + gatedGrpc: Bool = false + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.debugNanCount( + input, deviceName: deviceName, tensorName: tensorName, debugUrls: debugUrls, + gatedGrpc: gatedGrpc), to: output_device) + case .TF_EAGER: + return _RawTFEager.debugNanCount( + input, deviceName: deviceName, tensorName: tensorName, debugUrls: debugUrls, + gatedGrpc: gatedGrpc) + } + + } + + /// Debug Numeric Summary Op. + /// + /// Provide a basic summary of numeric value types, range and distribution. + /// + /// output: A double tensor of shape [14 + nDimensions], where nDimensions is the + /// the number of dimensions of the tensor's shape. The elements of output are: + /// [0]: is initialized (1.0) or not (0.0). + /// [1]: total number of elements + /// [2]: NaN element count + /// [3]: generalized -inf count: elements <= lower_bound. lower_bound is -inf by + /// default. + /// [4]: negative element count (excluding -inf), if lower_bound is the default + /// -inf. Otherwise, this is the count of elements > lower_bound and < 0. + /// [5]: zero element count + /// [6]: positive element count (excluding +inf), if upper_bound is the default + /// -inf. Otherwise, this is the count of elements < upper_bound and > 0. + /// [7]: generalized +inf count, elements >= upper_bound. upper_bound is +inf by + /// default. + /// Output elements [1:8] are all zero, if the tensor is uninitialized. + /// [8]: minimum of all non-inf and non-NaN elements. + /// If uninitialized or no such element exists: +inf. + /// [9]: maximum of all non-inf and non-NaN elements. + /// If uninitialized or no such element exists: -inf. + /// [10]: mean of all non-inf and non-NaN elements. + /// If uninitialized or no such element exists: NaN. + /// [11]: variance of all non-inf and non-NaN elements. + /// If uninitialized or no such element exists: NaN. + /// [12]: Data type of the tensor encoded as an enum integer. See the DataType + /// proto for more details. + /// [13]: Number of dimensions of the tensor (ndims). + /// [14+]: Sizes of the dimensions. + /// + /// + /// - Parameter input: Input tensor, non-Reference type. + /// + /// - Attrs: + /// - tensor_name: Name of the input tensor. + /// - debug_urls: List of URLs to debug targets, e.g., + /// file:///foo/tfdbg_dump, grpc:://localhost:11011. + /// - lower_bound: (float) The lower bound <= which values will be included in the + /// generalized -inf count. Default: -inf. + /// - upper_bound: (float) The upper bound >= which values will be included in the + /// generalized +inf count. Default: +inf. + /// - mute_if_healthy: (bool) Do not send data to the debug URLs unless at least one + /// of elements [2], [3] and [7] (i.e., the nan count and the generalized -inf and + /// inf counts) is non-zero. + /// - gated_grpc: Whether this op will be gated. If any of the debug_urls of this + /// debug node is of the grpc:// scheme, when the value of this attribute is set + /// to True, the data will not actually be sent via the grpc stream unless this + /// debug op has been enabled at the debug_url. If all of the debug_urls of this + /// debug node are of the grpc:// scheme and the debug op is enabled at none of + /// them, the output will be an empty Tensor. + @inlinable @inline(__always) + public static func debugNumericSummary( + _ input: Tensor, + deviceName: String, + tensorName: String, + debugUrls: [String], + lowerBound: Double = -Double.infinity, + upperBound: Double = Double.infinity, + muteIfHealthy: Bool = false, + gatedGrpc: Bool = false + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.debugNumericSummary( + input, deviceName: deviceName, tensorName: tensorName, debugUrls: debugUrls, + lowerBound: lowerBound, upperBound: upperBound, muteIfHealthy: muteIfHealthy, + gatedGrpc: gatedGrpc), to: output_device) + case .TF_EAGER: + return _RawTFEager.debugNumericSummary( + input, deviceName: deviceName, tensorName: tensorName, debugUrls: debugUrls, + lowerBound: lowerBound, upperBound: upperBound, muteIfHealthy: muteIfHealthy, + gatedGrpc: gatedGrpc) + } + + } + + /// + /// - Parameter input: Input tensor, to be summarized by the op. + /// + /// - Attrs: + /// - tensor_debug_mode: Tensor debug mode: the mode in which the input tensor is summarized + /// by the op. See the TensorDebugMode enum in + /// tensorflow/core/protobuf/debug_event.proto for details. + /// + /// Supported values: + /// 2 (CURT_HEALTH): Output a float32/64 tensor of shape [2]. The 1st + /// element is the tensor_id, if provided, and -1 otherwise. The 2nd + /// element is a bit which is set to 1 if the input tensor has an + /// infinity or nan value, or zero otherwise. + /// + /// 3 (CONCISE_HEALTH): Output a float32/64 tensor of shape [5]. The 1st + /// element is the tensor_id, if provided, and -1 otherwise. The + /// remaining four slots are the total number of elements, -infs, + /// +infs, and nans in the input tensor respectively. + /// + /// 4 (FULL_HEALTH): Output a float32/64 tensor of shape [11]. The 1st + /// element is the tensor_id, if provided, and -1 otherwise. The 2nd + /// element is the device_id, if provided, and -1 otherwise. The 3rd + /// element holds the datatype value of the input tensor as according + /// to the enumerated type in tensorflow/core/framework/types.proto. + /// The remaining elements hold the total number of elements, -infs, + /// +infs, nans, negative finite numbers, zeros, and positive finite + /// numbers in the input tensor respectively. + /// + /// 5 (SHAPE): Output a float32/64 tensor of shape [10]. The 1st + /// element is the tensor_id, if provided, and -1 otherwise. The 2nd + /// element holds the datatype value of the input tensor as according + /// to the enumerated type in tensorflow/core/framework/types.proto. + /// The 3rd element holds the rank of the tensor. The 4th element holds + /// the number of elements within the tensor. Finally the remaining 6 + /// elements hold the shape of the tensor. If the rank of the tensor + /// is lower than 6, the shape is right padded with zeros. If the rank + /// is greater than 6, the head of the shape is truncated. + /// + /// 6 (FULL_NUMERICS): Output a float32/64 tensor of shape [22]. The 1st + /// element is the tensor_id, if provided, and -1 otherwise. The 2nd + /// element is the device_id, if provided, and -1 otherwise. The 3rd + /// element holds the datatype value of the input tensor as according + /// to the enumerated type in tensorflow/core/framework/types.proto. + /// The 4th element holds the rank of the tensor. The 5th to 11th + /// elements hold the shape of the tensor. If the rank of the tensor + /// is lower than 6, the shape is right padded with zeros. If the rank + /// is greater than 6, the head of the shape is truncated. The 12th to + /// 18th elements hold the number of elements, -infs, +infs, nans, + /// denormal floats, negative finite numbers, zeros, and positive + /// finite numbers in the input tensor respectively. The final four + /// elements hold the min value, max value, mean, and variance of the + /// input tensor. + /// + /// 8 (REDUCE_INF_NAN_THREE_SLOTS): Output a float32/64 tensor of shape + /// [3]. The 1st element is -inf if any elements of the input tensor + /// is -inf, or zero otherwise. The 2nd element is +inf if any elements + /// of the input tensor is +inf, or zero otherwise. The 3rd element is + /// nan if any element of the input tensor is nan, or zero otherwise. + /// - tensor_id: Optional. An integer identifier for the tensor being summarized by this op. + @inlinable @inline(__always) + public static func debugNumericSummaryV2( + _ input: Tensor, + tensorDebugMode: Int64 = -1, + tensorId: Int64 = -1 + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.debugNumericSummaryV2( + input, tensorDebugMode: tensorDebugMode, tensorId: tensorId), to: output_device) + case .TF_EAGER: + return _RawTFEager.debugNumericSummaryV2( + input, tensorDebugMode: tensorDebugMode, tensorId: tensorId) + } + + } + + /// Decode and Crop a JPEG-encoded image to a uint8 tensor. + /// + /// The attr `channels` indicates the desired number of color channels for the + /// decoded image. + /// + /// Accepted values are: + /// + /// * 0: Use the number of channels in the JPEG-encoded image. + /// * 1: output a grayscale image. + /// * 3: output an RGB image. + /// + /// If needed, the JPEG-encoded image is transformed to match the requested number + /// of color channels. + /// + /// The attr `ratio` allows downscaling the image by an integer factor during + /// decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than + /// downscaling the image later. + /// + /// + /// It is equivalent to a combination of decode and crop, but much faster by only + /// decoding partial jpeg image. + /// + /// - Parameters: + /// - contents: 0-D. The JPEG-encoded image. + /// - crop_window: 1-D. The crop window: [crop_y, crop_x, crop_height, crop_width]. + /// + /// - Attrs: + /// - channels: Number of color channels for the decoded image. + /// - ratio: Downscaling ratio. + /// - fancy_upscaling: If true use a slower but nicer upscaling of the + /// chroma planes (yuv420/422 only). + /// - try_recover_truncated: If true try to recover an image from truncated input. + /// - acceptable_fraction: The minimum required fraction of lines before a truncated + /// input is accepted. + /// - dct_method: string specifying a hint about the algorithm used for + /// decompression. Defaults to "" which maps to a system-specific + /// default. Currently valid values are ["INTEGER_FAST", + /// "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal + /// jpeg library changes to a version that does not have that specific + /// option.) + /// + /// - Output image: 3-D with shape `[height, width, channels]`.. + @inlinable @inline(__always) + public static func decodeAndCropJpeg( + contents: StringTensor, + cropWindow: Tensor, + channels: Int64 = 0, + ratio: Int64 = 1, + fancyUpscaling: Bool = true, + tryRecoverTruncated: Bool = false, + acceptableFraction: Double = 1, + dctMethod: String + ) -> Tensor { + switch cropWindow.handle.backend { + case .XLA: + let output_device = cropWindow.device + let cropWindow = Tensor(copying: cropWindow, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.decodeAndCropJpeg( + contents: contents, cropWindow: cropWindow, channels: channels, ratio: ratio, + fancyUpscaling: fancyUpscaling, tryRecoverTruncated: tryRecoverTruncated, + acceptableFraction: acceptableFraction, dctMethod: dctMethod), to: output_device) + case .TF_EAGER: + return _RawTFEager.decodeAndCropJpeg( + contents: contents, cropWindow: cropWindow, channels: channels, ratio: ratio, + fancyUpscaling: fancyUpscaling, tryRecoverTruncated: tryRecoverTruncated, + acceptableFraction: acceptableFraction, dctMethod: dctMethod) + } + + } + + /// Decode web-safe base64-encoded strings. + /// + /// Input may or may not have padding at the end. See EncodeBase64 for padding. + /// Web-safe means that input must use - and _ instead of + and /. + /// + /// - Parameter input: Base64 strings to decode. + /// + /// - Output output: Decoded strings. + @inlinable @inline(__always) + public static func decodeBase64( + _ input: StringTensor + ) -> StringTensor { + _RawTFEager.decodeBase64(input) + } + + /// Decode the first frame of a BMP-encoded image to a uint8 tensor. + /// + /// The attr `channels` indicates the desired number of color channels for the + /// decoded image. + /// + /// Accepted values are: + /// + /// * 0: Use the number of channels in the BMP-encoded image. + /// * 3: output an RGB image. + /// * 4: output an RGBA image. + /// + /// - Parameter contents: 0-D. The BMP-encoded image. + /// + /// - Output image: 3-D with shape `[height, width, channels]`. RGB order + @inlinable @inline(__always) + public static func decodeBmp( + contents: StringTensor, + channels: Int64 = 0 + ) -> Tensor { + _RawTFEager.decodeBmp(contents: contents, channels: channels) + } + + /// Convert CSV records to tensors. Each column maps to one tensor. + /// + /// RFC 4180 format is expected for the CSV records. + /// (https://tools.ietf.org/html/rfc4180) + /// Note that we allow leading and trailing spaces with int or float field. + /// + /// - Parameters: + /// - records: Each string is a record/row in the csv and all records should have + /// the same format. + /// - record_defaults: One tensor per column of the input record, with either a + /// scalar default value for that column or an empty vector if the column is + /// required. + /// + /// - Attrs: + /// - field_delim: char delimiter to separate fields in a record. + /// - use_quote_delim: If false, treats double quotation marks as regular + /// characters inside of the string fields (ignoring RFC 4180, Section 2, + /// Bullet 5). + /// - na_value: Additional string to recognize as NA/NaN. + /// + /// - Output output: Each tensor will have the same shape as records. + @inlinable @inline(__always) + public static func decodeCSV( + records: StringTensor, + recordDefaults: OutType, + fieldDelim: String = ",", + useQuoteDelim: Bool = true, + naValue: String, + selectCols: [Int32] + ) -> OutType { + _RawTFEager.decodeCSV( + records: records, recordDefaults: recordDefaults, fieldDelim: fieldDelim, + useQuoteDelim: useQuoteDelim, naValue: naValue, selectCols: selectCols) + } + + /// Decompress strings. + /// + /// This op decompresses each element of the `bytes` input `Tensor`, which + /// is assumed to be compressed using the given `compression_type`. + /// + /// The `output` is a string `Tensor` of the same shape as `bytes`, + /// each element containing the decompressed data from the corresponding + /// element in `bytes`. + /// + /// - Parameter bytes: A Tensor of string which is compressed. + /// + /// - Attr compression_type: A scalar containing either (i) the empty string (no + /// compression), (ii) "ZLIB", or (iii) "GZIP". + /// + /// - Output output: A Tensor with the same shape as input `bytes`, uncompressed + /// from bytes. + @inlinable @inline(__always) + public static func decodeCompressed( + bytes: StringTensor, + compressionType: String + ) -> StringTensor { + _RawTFEager.decodeCompressed(bytes: bytes, compressionType: compressionType) + } + + /// Decode the frame(s) of a GIF-encoded image to a uint8 tensor. + /// + /// GIF images with frame or transparency compression are not supported. + /// On Linux and MacOS systems, convert animated GIFs from compressed to + /// uncompressed by running: + /// + /// convert $src.gif -coalesce $dst.gif + /// + /// This op also supports decoding JPEGs and PNGs, though it is cleaner to use + /// `tf.image.decode_image`. + /// + /// - Parameter contents: 0-D. The GIF-encoded image. + /// + /// - Output image: 4-D with shape `[num_frames, height, width, 3]`. RGB channel order. + @inlinable @inline(__always) + public static func decodeGif( + contents: StringTensor + ) -> Tensor { + _RawTFEager.decodeGif(contents: contents) + } + + /// Convert JSON-encoded Example records to binary protocol buffer strings. + /// + /// This op translates a tensor containing Example records, encoded using + /// the [standard JSON + /// mapping](https://developers.google.com/protocol-buffers/docs/proto3#json), + /// into a tensor containing the same records encoded as binary protocol + /// buffers. The resulting tensor can then be fed to any of the other + /// Example-parsing ops. + /// + /// - Parameter json_examples: Each string is a JSON object serialized according to the JSON + /// mapping of the Example proto. + /// + /// - Output binary_examples: Each string is a binary Example protocol buffer corresponding + /// to the respective element of `json_examples`. + @inlinable @inline(__always) + public static func decodeJSONExample( + jsonExamples: StringTensor + ) -> StringTensor { + _RawTFEager.decodeJSONExample(jsonExamples: jsonExamples) + } + + /// Decode a JPEG-encoded image to a uint8 tensor. + /// + /// The attr `channels` indicates the desired number of color channels for the + /// decoded image. + /// + /// Accepted values are: + /// + /// * 0: Use the number of channels in the JPEG-encoded image. + /// * 1: output a grayscale image. + /// * 3: output an RGB image. + /// + /// If needed, the JPEG-encoded image is transformed to match the requested number + /// of color channels. + /// + /// The attr `ratio` allows downscaling the image by an integer factor during + /// decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than + /// downscaling the image later. + /// + /// + /// This op also supports decoding PNGs and non-animated GIFs since the interface is + /// the same, though it is cleaner to use `tf.image.decode_image`. + /// + /// - Parameter contents: 0-D. The JPEG-encoded image. + /// + /// - Attrs: + /// - channels: Number of color channels for the decoded image. + /// - ratio: Downscaling ratio. + /// - fancy_upscaling: If true use a slower but nicer upscaling of the + /// chroma planes (yuv420/422 only). + /// - try_recover_truncated: If true try to recover an image from truncated input. + /// - acceptable_fraction: The minimum required fraction of lines before a truncated + /// input is accepted. + /// - dct_method: string specifying a hint about the algorithm used for + /// decompression. Defaults to "" which maps to a system-specific + /// default. Currently valid values are ["INTEGER_FAST", + /// "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal + /// jpeg library changes to a version that does not have that specific + /// option.) + /// + /// - Output image: 3-D with shape `[height, width, channels]`.. + @inlinable @inline(__always) + public static func decodeJpeg( + contents: StringTensor, + channels: Int64 = 0, + ratio: Int64 = 1, + fancyUpscaling: Bool = true, + tryRecoverTruncated: Bool = false, + acceptableFraction: Double = 1, + dctMethod: String + ) -> Tensor { + _RawTFEager.decodeJpeg( + contents: contents, channels: channels, ratio: ratio, fancyUpscaling: fancyUpscaling, + tryRecoverTruncated: tryRecoverTruncated, acceptableFraction: acceptableFraction, + dctMethod: dctMethod) + } + + /// Reinterpret the bytes of a string as a vector of numbers. + /// + /// - Parameters: + /// - input_bytes: Tensor of string to be decoded. + /// - fixed_length: Length in bytes for each element of the decoded output. Must be a multiple + /// of the size of the output type. + /// + /// - Attr little_endian: Whether the input `input_bytes` is in little-endian order. Ignored for + /// `out_type` values that are stored in a single byte, like `uint8` + /// + /// - Output output: A Tensor with one more dimension than the input `bytes`. The added dimension + /// will have size equal to the length of the elements of `bytes` divided by the + /// number of bytes to represent `out_type`. + @inlinable @inline(__always) + public static func decodePaddedRaw( + inputBytes: StringTensor, + fixedLength: Tensor, + littleEndian: Bool = true + ) -> Tensor { + switch fixedLength.handle.backend { + case .XLA: + let output_device = fixedLength.device + let fixedLength = Tensor(copying: fixedLength, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.decodePaddedRaw( + inputBytes: inputBytes, fixedLength: fixedLength, littleEndian: littleEndian), + to: output_device) + case .TF_EAGER: + return _RawTFEager.decodePaddedRaw( + inputBytes: inputBytes, fixedLength: fixedLength, littleEndian: littleEndian) + } + + } + + /// Decode a PNG-encoded image to a uint8 or uint16 tensor. + /// + /// The attr `channels` indicates the desired number of color channels for the + /// decoded image. + /// + /// Accepted values are: + /// + /// * 0: Use the number of channels in the PNG-encoded image. + /// * 1: output a grayscale image. + /// * 3: output an RGB image. + /// * 4: output an RGBA image. + /// + /// If needed, the PNG-encoded image is transformed to match the requested number + /// of color channels. + /// + /// This op also supports decoding JPEGs and non-animated GIFs since the interface + /// is the same, though it is cleaner to use `tf.image.decode_image`. + /// + /// - Parameter contents: 0-D. The PNG-encoded image. + /// + /// - Attr channels: Number of color channels for the decoded image. + /// + /// - Output image: 3-D with shape `[height, width, channels]`. + @inlinable @inline(__always) + public static func decodePng( + contents: StringTensor, + channels: Int64 = 0 + ) -> Tensor { + _RawTFEager.decodePng(contents: contents, channels: channels) + } + + /// The op extracts fields from a serialized protocol buffers message into tensors. + /// + /// The `decode_proto` op extracts fields from a serialized protocol buffers + /// message into tensors. The fields in `field_names` are decoded and converted + /// to the corresponding `output_types` if possible. + /// + /// A `message_type` name must be provided to give context for the field names. + /// The actual message descriptor can be looked up either in the linked-in + /// descriptor pool or a filename provided by the caller using the + /// `descriptor_source` attribute. + /// + /// Each output tensor is a dense tensor. This means that it is padded to hold + /// the largest number of repeated elements seen in the input minibatch. (The + /// shape is also padded by one to prevent zero-sized dimensions). The actual + /// repeat counts for each example in the minibatch can be found in the `sizes` + /// output. In many cases the output of `decode_proto` is fed immediately into + /// tf.squeeze if missing values are not a concern. When using tf.squeeze, always + /// pass the squeeze dimension explicitly to avoid surprises. + /// + /// For the most part, the mapping between Proto field types and TensorFlow dtypes + /// is straightforward. However, there are a few special cases: + /// + /// - A proto field that contains a submessage or group can only be converted + /// to `DT_STRING` (the serialized submessage). This is to reduce the complexity + /// of the API. The resulting string can be used as input to another instance of + /// the decode_proto op. + /// + /// - TensorFlow lacks support for unsigned integers. The ops represent uint64 + /// types as a `DT_INT64` with the same twos-complement bit pattern (the obvious + /// way). Unsigned int32 values can be represented exactly by specifying type + /// `DT_INT64`, or using twos-complement if the caller specifies `DT_INT32` in + /// the `output_types` attribute. + /// + /// Both binary and text proto serializations are supported, and can be + /// chosen using the `format` attribute. + /// + /// The `descriptor_source` attribute selects the source of protocol + /// descriptors to consult when looking up `message_type`. This may be: + /// + /// - An empty string or "local://", in which case protocol descriptors are + /// created for C++ (not Python) proto definitions linked to the binary. + /// + /// - A file, in which case protocol descriptors are created from the file, + /// which is expected to contain a `FileDescriptorSet` serialized as a string. + /// NOTE: You can build a `descriptor_source` file using the `--descriptor_set_out` + /// and `--include_imports` options to the protocol compiler `protoc`. + /// + /// - A "bytes://", in which protocol descriptors are created from ``, + /// which is expected to be a `FileDescriptorSet` serialized as a string. + /// + /// - Parameter bytes: Tensor of serialized protos with shape `batch_shape`. + /// + /// - Attrs: + /// - message_type: Name of the proto message type to decode. + /// - field_names: List of strings containing proto field names. An extension field can be decoded + /// by using its full name, e.g. EXT_PACKAGE.EXT_FIELD_NAME. + /// - output_types: List of TF types to use for the respective field in field_names. + /// - descriptor_source: Either the special value `local://` or a path to a file containing + /// a serialized `FileDescriptorSet`. + /// - message_format: Either `binary` or `text`. + /// - sanitize: Whether to sanitize the result or not. + /// + /// - Outputs: + /// - sizes: Tensor of int32 with shape `[batch_shape, len(field_names)]`. + /// Each entry is the number of values found for the corresponding field. + /// Optional fields may have 0 or 1 values. + /// - values: List of tensors containing values for the corresponding field. + /// `values[i]` has datatype `output_types[i]` + /// and shape `[batch_shape, max(sizes[...,i])]`. + @inlinable @inline(__always) + public static func decodeProtoV2( + bytes: StringTensor, + messageType: String, + fieldNames: [String], + descriptorSource: String = "local://", + messageFormat: String = "binary", + sanitize: Bool = false + ) -> (sizes: Tensor, values: OutputTypes) { + _RawTFEager.decodeProtoV2( + bytes: bytes, messageType: messageType, fieldNames: fieldNames, + descriptorSource: descriptorSource, messageFormat: messageFormat, sanitize: sanitize) + } + + /// Reinterpret the bytes of a string as a vector of numbers. + /// + /// - Parameter bytes: All the elements must have the same length. + /// + /// - Attr little_endian: Whether the input `bytes` are in little-endian order. + /// Ignored for `out_type` values that are stored in a single byte like + /// `uint8`. + /// + /// - Output output: A Tensor with one more dimension than the input `bytes`. The + /// added dimension will have size equal to the length of the elements + /// of `bytes` divided by the number of bytes to represent `out_type`. + @inlinable @inline(__always) + public static func decodeRaw( + bytes: StringTensor, + littleEndian: Bool = true + ) -> Tensor { + _RawTFEager.decodeRaw(bytes: bytes, littleEndian: littleEndian) + } + + /// Decode a 16-bit PCM WAV file to a float tensor. + /// + /// The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float. + /// + /// When desired_channels is set, if the input contains fewer channels than this + /// then the last channel will be duplicated to give the requested number, else if + /// the input has more channels than requested then the additional channels will be + /// ignored. + /// + /// If desired_samples is set, then the audio will be cropped or padded with zeroes + /// to the requested length. + /// + /// The first output contains a Tensor with the content of the audio samples. The + /// lowest dimension will be the number of channels, and the second will be the + /// number of samples. For example, a ten-sample-long stereo WAV file should give an + /// output shape of [10, 2]. + /// + /// - Parameter contents: The WAV-encoded audio, usually from a file. + /// + /// - Attrs: + /// - desired_channels: Number of sample channels wanted. + /// - desired_samples: Length of audio requested. + /// + /// - Outputs: + /// - audio: 2-D with shape `[length, channels]`. + /// - sample_rate: Scalar holding the sample rate found in the WAV header. + @inlinable @inline(__always) + public static func decodeWav( + contents: StringTensor, + desiredChannels: Int64 = -1, + desiredSamples: Int64 = -1 + ) -> (audio: Tensor, sampleRate: Tensor) { + _RawTFEager.decodeWav( + contents: contents, desiredChannels: desiredChannels, desiredSamples: desiredSamples) + } + + /// Makes a copy of `x`. + /// + /// - Parameter x: The source tensor of type `T`. + /// + /// - Output y: y: A `Tensor` of type `T`. A copy of `x`. Guaranteed that `y` + /// is not an alias of `x`. + @inlinable @inline(__always) + public static func deepCopy( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + let output_device = x.device + let x = Tensor(copying: x, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.deepCopy(x), to: output_device) + case .TF_EAGER: + return _RawTFEager.deepCopy(x) + } + + } + + /// A container for an iterator resource. + /// + /// - Parameters: + /// - handle: A handle to the iterator to delete. + /// - deleter: A variant deleter. + @inlinable @inline(__always) + public static func deleteIterator( + handle: ResourceHandle, + deleter: VariantHandle + ) { + _RawTFEager.deleteIterator(handle: handle, deleter: deleter) + } + + @inlinable @inline(__always) + public static func deleteMemoryCache( + handle: ResourceHandle, + deleter: VariantHandle + ) { + _RawTFEager.deleteMemoryCache(handle: handle, deleter: deleter) + } + + /// A container for an iterator resource. + /// + /// - Parameters: + /// - multi_device_iterator: A handle to the multi device iterator to delete. + /// - iterators: A list of iterator handles (unused). This is added so that automatic control dependencies get added during function tracing that ensure this op runs after all the dependent iterators are deleted. + /// - deleter: A variant deleter. + @inlinable @inline(__always) + public static func deleteMultiDeviceIterator( + multiDeviceIterator: ResourceHandle, + iterators: [ResourceHandle], + deleter: VariantHandle + ) { + _RawTFEager.deleteMultiDeviceIterator( + multiDeviceIterator: multiDeviceIterator, iterators: iterators, deleter: deleter) + } + + @inlinable @inline(__always) + public static func deleteRandomSeedGenerator( + handle: ResourceHandle, + deleter: VariantHandle + ) { + _RawTFEager.deleteRandomSeedGenerator(handle: handle, deleter: deleter) + } + + /// Delete the tensor specified by its handle in the session. + /// + /// - Parameter handle: The handle for a tensor stored in the session state. + @inlinable @inline(__always) + public static func deleteSessionTensor( + handle: StringTensor + ) { + _RawTFEager.deleteSessionTensor(handle: handle) + } + + /// Converts a dense tensor to a (possibly batched) CSRSparseMatrix. + /// + /// - Parameters: + /// - dense_input: A Dense tensor. + /// - indices: Indices of nonzero elements. + /// + /// - Output sparse_output: A (possibly batched) CSRSparseMatrix. + @inlinable @inline(__always) + public static func denseToCSRSparseMatrix( + denseInput: Tensor, + indices: Tensor + ) -> VariantHandle { + _RawTFEager.denseToCSRSparseMatrix(denseInput: denseInput, indices: indices) + } + + /// Applies set operation along last dimension of 2 `Tensor` inputs. + /// + /// See SetOperationOp::SetOperationFromContext for values of `set_operation`. + /// + /// Output `result` is a `SparseTensor` represented by `result_indices`, + /// `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this + /// has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` + /// dimension contains the result of `set_operation` applied to the corresponding + /// `[0...n-1]` dimension of `set`. + /// + /// - Parameters: + /// - set1: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`. + /// Dimension `n` contains values in a set, duplicates are allowed but ignored. + /// - set2: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`. + /// Dimension `n` contains values in a set, duplicates are allowed but ignored. + /// + /// - Outputs: + /// - result_indices: 2D indices of a `SparseTensor`. + /// - result_values: 1D values of a `SparseTensor`. + /// - result_shape: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is + /// the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]` + /// is the max result set size across all `0...n-1` dimensions. + @inlinable @inline(__always) + public static func denseToDenseSetOperation( + set1: Tensor, + set2: Tensor, + setOperation: String, + validateIndices: Bool = true + ) -> (resultIndices: Tensor, resultValues: Tensor, resultShape: Tensor) { + _RawTFEager.denseToDenseSetOperation( + set1: set1, set2: set2, setOperation: setOperation, validateIndices: validateIndices) + } + + /// Applies set operation along last dimension of 2 `Tensor` inputs. + /// + /// See SetOperationOp::SetOperationFromContext for values of `set_operation`. + /// + /// Output `result` is a `SparseTensor` represented by `result_indices`, + /// `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this + /// has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` + /// dimension contains the result of `set_operation` applied to the corresponding + /// `[0...n-1]` dimension of `set`. + /// + /// - Parameters: + /// - set1: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`. + /// Dimension `n` contains values in a set, duplicates are allowed but ignored. + /// - set2: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`. + /// Dimension `n` contains values in a set, duplicates are allowed but ignored. + /// + /// - Outputs: + /// - result_indices: 2D indices of a `SparseTensor`. + /// - result_values: 1D values of a `SparseTensor`. + /// - result_shape: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is + /// the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]` + /// is the max result set size across all `0...n-1` dimensions. + @inlinable @inline(__always) + public static func denseToDenseSetOperation( + set1: StringTensor, + set2: StringTensor, + setOperation: String, + validateIndices: Bool = true + ) -> (resultIndices: Tensor, resultValues: StringTensor, resultShape: Tensor) { + _RawTFEager.denseToDenseSetOperation( + set1: set1, set2: set2, setOperation: setOperation, validateIndices: validateIndices) + } + + /// Creates a dataset that batches input elements into a SparseTensor. + /// + /// - Parameters: + /// - input_dataset: A handle to an input dataset. Must have a single component. + /// - batch_size: A scalar representing the number of elements to accumulate in a + /// batch. + /// - row_shape: A vector representing the dense shape of each row in the produced + /// SparseTensor. The shape may be partially specified, using `-1` to indicate + /// that a particular dimension should use the maximum size of all batch elements. + @inlinable @inline(__always) + public static func denseToSparseBatchDataset( + inputDataset: VariantHandle, + batchSize: Tensor, + rowShape: Tensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.denseToSparseBatchDataset( + inputDataset: inputDataset, batchSize: batchSize, rowShape: rowShape, + outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Applies set operation along last dimension of `Tensor` and `SparseTensor`. + /// + /// See SetOperationOp::SetOperationFromContext for values of `set_operation`. + /// + /// Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`, + /// and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same + /// as `set1`. Dimension `n` contains values in a set, duplicates are allowed but + /// ignored. + /// + /// If `validate_indices` is `True`, this op validates the order and range of `set2` + /// indices. + /// + /// Output `result` is a `SparseTensor` represented by `result_indices`, + /// `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this + /// has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` + /// dimension contains the result of `set_operation` applied to the corresponding + /// `[0...n-1]` dimension of `set`. + /// + /// - Parameters: + /// - set1: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`. + /// Dimension `n` contains values in a set, duplicates are allowed but ignored. + /// - set2_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major + /// order. + /// - set2_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major + /// order. + /// - set2_shape: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must + /// be the same as the 1st `n-1` dimensions of `set1`, `result_shape[n]` is the + /// max set size across `n-1` dimensions. + /// + /// - Outputs: + /// - result_indices: 2D indices of a `SparseTensor`. + /// - result_values: 1D values of a `SparseTensor`. + /// - result_shape: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is + /// the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]` + /// is the max result set size across all `0...n-1` dimensions. + @inlinable @inline(__always) + public static func denseToSparseSetOperation( + set1: Tensor, + set2Indices: Tensor, + set2Values: Tensor, + set2Shape: Tensor, + setOperation: String, + validateIndices: Bool = true + ) -> (resultIndices: Tensor, resultValues: Tensor, resultShape: Tensor) { + _RawTFEager.denseToSparseSetOperation( + set1: set1, set2Indices: set2Indices, set2Values: set2Values, set2Shape: set2Shape, + setOperation: setOperation, validateIndices: validateIndices) + } + + /// Applies set operation along last dimension of `Tensor` and `SparseTensor`. + /// + /// See SetOperationOp::SetOperationFromContext for values of `set_operation`. + /// + /// Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`, + /// and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same + /// as `set1`. Dimension `n` contains values in a set, duplicates are allowed but + /// ignored. + /// + /// If `validate_indices` is `True`, this op validates the order and range of `set2` + /// indices. + /// + /// Output `result` is a `SparseTensor` represented by `result_indices`, + /// `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this + /// has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` + /// dimension contains the result of `set_operation` applied to the corresponding + /// `[0...n-1]` dimension of `set`. + /// + /// - Parameters: + /// - set1: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`. + /// Dimension `n` contains values in a set, duplicates are allowed but ignored. + /// - set2_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major + /// order. + /// - set2_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major + /// order. + /// - set2_shape: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must + /// be the same as the 1st `n-1` dimensions of `set1`, `result_shape[n]` is the + /// max set size across `n-1` dimensions. + /// + /// - Outputs: + /// - result_indices: 2D indices of a `SparseTensor`. + /// - result_values: 1D values of a `SparseTensor`. + /// - result_shape: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is + /// the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]` + /// is the max result set size across all `0...n-1` dimensions. + @inlinable @inline(__always) + public static func denseToSparseSetOperation( + set1: StringTensor, + set2Indices: Tensor, + set2Values: StringTensor, + set2Shape: Tensor, + setOperation: String, + validateIndices: Bool = true + ) -> (resultIndices: Tensor, resultValues: StringTensor, resultShape: Tensor) { + _RawTFEager.denseToSparseSetOperation( + set1: set1, set2Indices: set2Indices, set2Values: set2Values, set2Shape: set2Shape, + setOperation: setOperation, validateIndices: validateIndices) + } + + /// DepthToSpace for tensors of type T. + /// + /// Rearranges data from depth into blocks of spatial data. + /// This is the reverse transformation of SpaceToDepth. More specifically, + /// this op outputs a copy of the input tensor where values from the `depth` + /// dimension are moved in spatial blocks to the `height` and `width` dimensions. + /// The attr `block_size` indicates the input block size and how the data is moved. + /// + /// * Chunks of data of size `block_size * block_size` from depth are rearranged + /// into non-overlapping blocks of size `block_size x block_size` + /// * The width the output tensor is `input_depth * block_size`, whereas the + /// height is `input_height * block_size`. + /// * The Y, X coordinates within each block of the output image are determined + /// by the high order component of the input channel index. + /// * The depth of the input tensor must be divisible by + /// `block_size * block_size`. + /// + /// The `data_format` attr specifies the layout of the input and output tensors + /// with the following options: + /// "NHWC": `[ batch, height, width, channels ]` + /// "NCHW": `[ batch, channels, height, width ]` + /// "NCHW_VECT_C": + /// `qint8 [ batch, channels / 4, height, width, 4 ]` + /// + /// It is useful to consider the operation as transforming a 6-D Tensor. + /// e.g. for data_format = NHWC, + /// Each element in the input tensor can be specified via 6 coordinates, + /// ordered by decreasing memory layout significance as: + /// n,iY,iX,bY,bX,oC (where n=batch index, iX, iY means X or Y coordinates + /// within the input image, bX, bY means coordinates + /// within the output block, oC means output channels). + /// The output would be the input transposed to the following layout: + /// n,iY,bY,iX,bX,oC + /// + /// This operation is useful for resizing the activations between convolutions + /// (but keeping all data), e.g. instead of pooling. It is also useful for training + /// purely convolutional models. + /// + /// For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" and + /// block_size = 2: + /// + /// ``` + /// x = [[[[1, 2, 3, 4]]]] + /// + /// ``` + /// + /// This operation will output a tensor of shape `[1, 2, 2, 1]`: + /// + /// ``` + /// [[[[1], [2]], + /// [[3], [4]]]] + /// ``` + /// + /// Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`, + /// the corresponding output will have 2x2 elements and will have a depth of + /// 1 channel (1 = `4 / (block_size * block_size)`). + /// The output element shape is `[2, 2, 1]`. + /// + /// For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g. + /// + /// ``` + /// x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] + /// ``` + /// + /// This operation, for block size of 2, will return the following tensor of shape + /// `[1, 2, 2, 3]` + /// + /// ``` + /// [[[[1, 2, 3], [4, 5, 6]], + /// [[7, 8, 9], [10, 11, 12]]]] + /// + /// ``` + /// + /// Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2: + /// + /// ``` + /// x = [[[[1, 2, 3, 4], + /// [5, 6, 7, 8]], + /// [[9, 10, 11, 12], + /// [13, 14, 15, 16]]]] + /// ``` + /// + /// the operator will return the following tensor of shape `[1 4 4 1]`: + /// + /// ``` + /// x = [[[ [1], [2], [5], [6]], + /// [ [3], [4], [7], [8]], + /// [ [9], [10], [13], [14]], + /// [ [11], [12], [15], [16]]]] + /// + /// ``` + /// + /// - Attr block_size: The size of the spatial block, same as in Space2Depth. + @inlinable @inline(__always) + public static func depthToSpace( + _ input: Tensor, + blockSize: Int64, + dataFormat: DataFormat2 = .nhwc + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.depthToSpace(input, blockSize: blockSize, dataFormat: dataFormat), + to: output_device) + case .TF_EAGER: + return _RawTFEager.depthToSpace(input, blockSize: blockSize, dataFormat: dataFormat) + } + + } + + /// Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors. + /// + /// Given an input tensor of shape `[batch, in_height, in_width, in_channels]` + /// and a filter / kernel tensor of shape + /// `[filter_height, filter_width, in_channels, channel_multiplier]`, containing + /// `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies + /// a different filter to each input channel (expanding from 1 channel to + /// `channel_multiplier` channels for each), then concatenates the results + /// together. Thus, the output has `in_channels * channel_multiplier` channels. + /// + /// ``` + /// for k in 0..in_channels-1 + /// for q in 0..channel_multiplier-1 + /// output[b, i, j, k * channel_multiplier + q] = + /// sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] * + /// filter[di, dj, k, q] + /// ``` + /// + /// Must have `strides[0] = strides[3] = 1`. For the most common case of the same + /// horizontal and vertices strides, `strides = [1, stride, stride, 1]`. + /// + /// - Attrs: + /// - strides: 1-D of length 4. The stride of the sliding window for each dimension + /// of `input`. + /// - padding: The type of padding algorithm to use. + /// - data_format: Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, height, width, channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, channels, height, width]. + /// - dilations: 1-D tensor of length 4. The dilation factor for each dimension of + /// `input`. If set to k > 1, there will be k-1 skipped cells between each filter + /// element on that dimension. The dimension order is determined by the value of + /// `data_format`, see above for details. Dilations in the batch and depth + /// dimensions must be 1. + @inlinable @inline(__always) + public static func depthwiseConv2dNative( + _ input: Tensor, + filter: Tensor, + strides: [Int32], + padding: Padding, + dataFormat: DataFormat = .nhwc, + dilations: [Int32] = [1, 1, 1, 1] + ) -> Tensor { + switch commonBackend(input.handle.backend, filter.handle.backend) { + case .XLA: + return _RawXLA.depthwiseConv2dNative( + input, filter: filter, strides: strides, padding: padding, dataFormat: dataFormat, + dilations: dilations) + case .TF_EAGER: + return _RawTFEager.depthwiseConv2dNative( + input, filter: filter, strides: strides, padding: padding, dataFormat: dataFormat, + dilations: dilations) + } + + } + + /// Computes the gradients of depthwise convolution with respect to the filter. + /// + /// - Parameters: + /// - input: 4-D with shape based on `data_format`. For example, if + /// `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height, + /// in_width, in_channels]` tensor. + /// - filter_sizes: An integer vector representing the tensor shape of `filter`, + /// where `filter` is a 4-D + /// `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor. + /// - out_backprop: 4-D with shape based on `data_format`. + /// For example, if `data_format` is 'NHWC' then + /// out_backprop shape is `[batch, out_height, out_width, out_channels]`. + /// Gradients w.r.t. the output of the convolution. + /// + /// - Attrs: + /// - strides: The stride of the sliding window for each dimension of the input + /// of the convolution. + /// - padding: The type of padding algorithm to use. + /// - data_format: Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, height, width, channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, channels, height, width]. + /// - dilations: 1-D tensor of length 4. The dilation factor for each dimension of + /// `input`. If set to k > 1, there will be k-1 skipped cells between each filter + /// element on that dimension. The dimension order is determined by the value of + /// `data_format`, see above for details. Dilations in the batch and depth + /// dimensions must be 1. + /// + /// - Output output: 4-D with shape + /// `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t. + /// the `filter` input of the convolution. + @inlinable @inline(__always) + public static func depthwiseConv2dNativeBackpropFilter( + _ input: Tensor, + filterSizes: Tensor, + outBackprop: Tensor, + strides: [Int32], + padding: Padding, + dataFormat: DataFormat = .nhwc, + dilations: [Int32] = [1, 1, 1, 1] + ) -> Tensor { + switch commonBackend( + commonBackend(input.handle.backend, filterSizes.handle.backend), outBackprop.handle.backend) + { + case .XLA: + return _RawXLA.depthwiseConv2dNativeBackpropFilter( + input, filterSizes: filterSizes, outBackprop: outBackprop, strides: strides, + padding: padding, dataFormat: dataFormat, dilations: dilations) + case .TF_EAGER: + return _RawTFEager.depthwiseConv2dNativeBackpropFilter( + input, filterSizes: filterSizes, outBackprop: outBackprop, strides: strides, + padding: padding, dataFormat: dataFormat, dilations: dilations) + } + + } + + /// Computes the gradients of depthwise convolution with respect to the input. + /// + /// - Parameters: + /// - input_sizes: An integer vector representing the shape of `input`, based + /// on `data_format`. For example, if `data_format` is 'NHWC' then + /// `input` is a 4-D `[batch, height, width, channels]` tensor. + /// - filter: 4-D with shape + /// `[filter_height, filter_width, in_channels, depthwise_multiplier]`. + /// - out_backprop: 4-D with shape based on `data_format`. + /// For example, if `data_format` is 'NHWC' then + /// out_backprop shape is `[batch, out_height, out_width, out_channels]`. + /// Gradients w.r.t. the output of the convolution. + /// + /// - Attrs: + /// - strides: The stride of the sliding window for each dimension of the input + /// of the convolution. + /// - padding: The type of padding algorithm to use. + /// - data_format: Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, height, width, channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, channels, height, width]. + /// - dilations: 1-D tensor of length 4. The dilation factor for each dimension of + /// `input`. If set to k > 1, there will be k-1 skipped cells between each filter + /// element on that dimension. The dimension order is determined by the value of + /// `data_format`, see above for details. Dilations in the batch and depth + /// dimensions must be 1. + /// + /// - Output output: 4-D with shape according to `data_format`. For example, if + /// `data_format` is 'NHWC', output shape is `[batch, in_height, + /// in_width, in_channels]`. Gradient w.r.t. the input of the + /// convolution. + @inlinable @inline(__always) + public static func depthwiseConv2dNativeBackpropInput( + inputSizes: Tensor, + filter: Tensor, + outBackprop: Tensor, + strides: [Int32], + padding: Padding, + dataFormat: DataFormat = .nhwc, + dilations: [Int32] = [1, 1, 1, 1] + ) -> Tensor { + switch commonBackend( + commonBackend(inputSizes.handle.backend, filter.handle.backend), outBackprop.handle.backend) + { + case .XLA: + return _RawXLA.depthwiseConv2dNativeBackpropInput( + inputSizes: inputSizes, filter: filter, outBackprop: outBackprop, strides: strides, + padding: padding, dataFormat: dataFormat, dilations: dilations) + case .TF_EAGER: + return _RawTFEager.depthwiseConv2dNativeBackpropInput( + inputSizes: inputSizes, filter: filter, outBackprop: outBackprop, strides: strides, + padding: padding, dataFormat: dataFormat, dilations: dilations) + } + + } + + /// + /// - Parameters: + /// - min_range: The minimum scalar value possibly produced for the input. + /// - max_range: The maximum scalar value possibly produced for the input. + @inlinable @inline(__always) + public static func dequantize( + _ input: Tensor, + minRange: Tensor, + maxRange: Tensor, + mode: Mode = .minCombined, + narrowRange: Bool = false, + axis: Int64 = -1 + ) -> Tensor { + switch commonBackend( + commonBackend(input.handle.backend, minRange.handle.backend), maxRange.handle.backend) + { + case .XLA: + let output_device = maxRange.device + let input = Tensor(copying: input, to: .defaultTFEager) + let minRange = Tensor(copying: minRange, to: .defaultTFEager) + let maxRange = Tensor(copying: maxRange, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.dequantize( + input, minRange: minRange, maxRange: maxRange, mode: mode, narrowRange: narrowRange, + axis: axis), to: output_device) + case .TF_EAGER: + return _RawTFEager.dequantize( + input, minRange: minRange, maxRange: maxRange, mode: mode, narrowRange: narrowRange, + axis: axis) + } + + } + + /// Converts the given variant tensor to an iterator and stores it in the given resource. + /// + /// - Parameters: + /// - resource_handle: A handle to an iterator resource. + /// - serialized: A variant tensor storing the state of the iterator contained in the + /// resource. + @inlinable @inline(__always) + public static func deserializeIterator( + resourceHandle: ResourceHandle, + serialized: VariantHandle + ) { + _RawTFEager.deserializeIterator(resourceHandle: resourceHandle, serialized: serialized) + } + + /// Deserialize and concatenate `SparseTensors` from a serialized minibatch. + /// + /// The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where + /// `N` is the minibatch size and the rows correspond to packed outputs of + /// `SerializeSparse`. The ranks of the original `SparseTensor` objects + /// must all match. When the final `SparseTensor` is created, it has rank one + /// higher than the ranks of the incoming `SparseTensor` objects + /// (they have been concatenated along a new row dimension). + /// + /// The output `SparseTensor` object's shape values for all dimensions but the + /// first are the max across the input `SparseTensor` objects' shape values + /// for the corresponding dimensions. Its first shape value is `N`, the minibatch + /// size. + /// + /// The input `SparseTensor` objects' indices are assumed ordered in + /// standard lexicographic order. If this is not the case, after this + /// step run `SparseReorder` to restore index ordering. + /// + /// For example, if the serialized input is a `[2 x 3]` matrix representing two + /// original `SparseTensor` objects: + /// + /// index = [ 0] + /// [10] + /// [20] + /// values = [1, 2, 3] + /// shape = [50] + /// + /// and + /// + /// index = [ 2] + /// [10] + /// values = [4, 5] + /// shape = [30] + /// + /// then the final deserialized `SparseTensor` will be: + /// + /// index = [0 0] + /// [0 10] + /// [0 20] + /// [1 2] + /// [1 10] + /// values = [1, 2, 3, 4, 5] + /// shape = [2 50] + /// + /// - Parameter serialized_sparse: 2-D, The `N` serialized `SparseTensor` objects. + /// Must have 3 columns. + /// + /// - Attr dtype: The `dtype` of the serialized `SparseTensor` objects. + @inlinable @inline(__always) + public static func deserializeManySparse( + serializedSparse: StringTensor + ) -> (sparseIndices: Tensor, sparseValues: Tensor, sparseShape: Tensor) { + _RawTFEager.deserializeManySparse(serializedSparse: serializedSparse) + } + + /// Deserialize `SparseTensor` objects. + /// + /// The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where + /// the last dimension stores serialized `SparseTensor` objects and the other N + /// dimensions (N >= 0) correspond to a batch. The ranks of the original + /// `SparseTensor` objects must all match. When the final `SparseTensor` is + /// created, its rank is the rank of the incoming `SparseTensor` objects plus N; + /// the sparse tensors have been concatenated along new dimensions, one for each + /// batch. + /// + /// The output `SparseTensor` object's shape values for the original dimensions + /// are the max across the input `SparseTensor` objects' shape values for the + /// corresponding dimensions. The new dimensions match the size of the batch. + /// + /// The input `SparseTensor` objects' indices are assumed ordered in + /// standard lexicographic order. If this is not the case, after this + /// step run `SparseReorder` to restore index ordering. + /// + /// For example, if the serialized input is a `[2 x 3]` matrix representing two + /// original `SparseTensor` objects: + /// + /// index = [ 0] + /// [10] + /// [20] + /// values = [1, 2, 3] + /// shape = [50] + /// + /// and + /// + /// index = [ 2] + /// [10] + /// values = [4, 5] + /// shape = [30] + /// + /// then the final deserialized `SparseTensor` will be: + /// + /// index = [0 0] + /// [0 10] + /// [0 20] + /// [1 2] + /// [1 10] + /// values = [1, 2, 3, 4, 5] + /// shape = [2 50] + /// + /// - Parameter serialized_sparse: The serialized `SparseTensor` objects. The last dimension + /// must have 3 columns. + /// + /// - Attr dtype: The `dtype` of the serialized `SparseTensor` objects. + @inlinable @inline(__always) + public static func deserializeSparse< + Dtype: TensorFlowScalar, + Tserialized: TensorFlowScalar + >( + serializedSparse: Tensor + ) -> (sparseIndices: Tensor, sparseValues: Tensor, sparseShape: Tensor) { + _RawTFEager.deserializeSparse(serializedSparse: serializedSparse) + } + + /// Deserialize `SparseTensor` objects. + /// + /// The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where + /// the last dimension stores serialized `SparseTensor` objects and the other N + /// dimensions (N >= 0) correspond to a batch. The ranks of the original + /// `SparseTensor` objects must all match. When the final `SparseTensor` is + /// created, its rank is the rank of the incoming `SparseTensor` objects plus N; + /// the sparse tensors have been concatenated along new dimensions, one for each + /// batch. + /// + /// The output `SparseTensor` object's shape values for the original dimensions + /// are the max across the input `SparseTensor` objects' shape values for the + /// corresponding dimensions. The new dimensions match the size of the batch. + /// + /// The input `SparseTensor` objects' indices are assumed ordered in + /// standard lexicographic order. If this is not the case, after this + /// step run `SparseReorder` to restore index ordering. + /// + /// For example, if the serialized input is a `[2 x 3]` matrix representing two + /// original `SparseTensor` objects: + /// + /// index = [ 0] + /// [10] + /// [20] + /// values = [1, 2, 3] + /// shape = [50] + /// + /// and + /// + /// index = [ 2] + /// [10] + /// values = [4, 5] + /// shape = [30] + /// + /// then the final deserialized `SparseTensor` will be: + /// + /// index = [0 0] + /// [0 10] + /// [0 20] + /// [1 2] + /// [1 10] + /// values = [1, 2, 3, 4, 5] + /// shape = [2 50] + /// + /// - Parameter serialized_sparse: The serialized `SparseTensor` objects. The last dimension + /// must have 3 columns. + /// + /// - Attr dtype: The `dtype` of the serialized `SparseTensor` objects. + @inlinable @inline(__always) + public static func deserializeSparse( + serializedSparse: StringTensor + ) -> (sparseIndices: Tensor, sparseValues: Tensor, sparseShape: Tensor) { + _RawTFEager.deserializeSparse(serializedSparse: serializedSparse) + } + + /// Deletes the resource specified by the handle. + /// + /// All subsequent operations using the resource will result in a NotFound + /// error status. + /// + /// - Parameter resource: handle to the resource to delete. + /// + /// - Attr ignore_lookup_error: whether to ignore the error when the resource + /// doesn't exist. + @inlinable @inline(__always) + public static func destroyResourceOp( + resource: ResourceHandle, + ignoreLookupError: Bool = true + ) { + _RawTFEager.destroyResourceOp(resource: resource, ignoreLookupError: ignoreLookupError) + } + + @inlinable @inline(__always) + public static func devicePlacementOp() -> StringTensor { + _RawTFEager.devicePlacementOp() + } + + /// Returns a diagonal tensor with a given diagonal values. + /// + /// Given a `diagonal`, this operation returns a tensor with the `diagonal` and + /// everything else padded with zeros. The diagonal is computed as follows: + /// + /// Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of + /// rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where: + /// + /// `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else. + /// + /// For example: + /// + /// ``` + /// # 'diagonal' is [1, 2, 3, 4] + /// tf.diag(diagonal) ==> [[1, 0, 0, 0] + /// [0, 2, 0, 0] + /// [0, 0, 3, 0] + /// [0, 0, 0, 4]] + /// ``` + /// + /// - Parameter diagonal: Rank k tensor where k is at most 1. + @inlinable @inline(__always) + public static func diag( + diagonal: Tensor + ) -> Tensor { + switch diagonal.handle.backend { + case .XLA: + let output_device = diagonal.device + let diagonal = Tensor(copying: diagonal, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.diag(diagonal: diagonal), to: output_device) + case .TF_EAGER: + return _RawTFEager.diag(diagonal: diagonal) + } + + } + + /// Returns the diagonal part of the tensor. + /// + /// This operation returns a tensor with the `diagonal` part + /// of the `input`. The `diagonal` part is computed as follows: + /// + /// Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a + /// tensor of rank `k` with dimensions `[D1,..., Dk]` where: + /// + /// `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`. + /// + /// For example: + /// + /// ``` + /// # 'input' is [[1, 0, 0, 0] + /// [0, 2, 0, 0] + /// [0, 0, 3, 0] + /// [0, 0, 0, 4]] + /// + /// tf.diag_part(input) ==> [1, 2, 3, 4] + /// ``` + /// + /// - Parameter input: Rank k tensor where k is even and not zero. + /// + /// - Output diagonal: The extracted diagonal. + @inlinable @inline(__always) + public static func diagPart( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + return _RawXLA.diagPart(input) + case .TF_EAGER: + return _RawTFEager.diagPart(input) + } + + } + + /// Computes Psi, the derivative of Lgamma (the log of the absolute value of + /// + /// `Gamma(x)`), element-wise. + @inlinable @inline(__always) + public static func digamma( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + let output_device = x.device + let x = Tensor(copying: x, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.digamma(x), to: output_device) + case .TF_EAGER: + return _RawTFEager.digamma(x) + } + + } + + /// Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors. + /// + /// The `input` tensor has shape `[batch, in_height, in_width, depth]` and the + /// `filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each + /// input channel is processed independently of the others with its own structuring + /// function. The `output` tensor has shape + /// `[batch, out_height, out_width, depth]`. The spatial dimensions of the output + /// tensor depend on the `padding` algorithm. We currently only support the default + /// "NHWC" `data_format`. + /// + /// In detail, the grayscale morphological 2-D dilation is the max-sum correlation + /// (for consistency with `conv2d`, we use unmirrored filters): + /// + /// output[b, y, x, c] = + /// max_{dy, dx} input[b, + /// strides[1] * y + rates[1] * dy, + /// strides[2] * x + rates[2] * dx, + /// c] + + /// filter[dy, dx, c] + /// + /// Max-pooling is a special case when the filter has size equal to the pooling + /// kernel size and contains all zeros. + /// + /// Note on duality: The dilation of `input` by the `filter` is equal to the + /// negation of the erosion of `-input` by the reflected `filter`. + /// + /// - Parameters: + /// - input: 4-D with shape `[batch, in_height, in_width, depth]`. + /// - filter: 3-D with shape `[filter_height, filter_width, depth]`. + /// + /// - Attrs: + /// - strides: The stride of the sliding window for each dimension of the input + /// tensor. Must be: `[1, stride_height, stride_width, 1]`. + /// - rates: The input stride for atrous morphological dilation. Must be: + /// `[1, rate_height, rate_width, 1]`. + /// - padding: The type of padding algorithm to use. + /// + /// - Output output: 4-D with shape `[batch, out_height, out_width, depth]`. + @inlinable @inline(__always) + public static func dilation2D( + _ input: Tensor, + filter: Tensor, + strides: [Int32], + rates: [Int32], + padding: Padding + ) -> Tensor { + switch commonBackend(input.handle.backend, filter.handle.backend) { + case .XLA: + let output_device = filter.device + let input = Tensor(copying: input, to: .defaultTFEager) + let filter = Tensor(copying: filter, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.dilation2D( + input, filter: filter, strides: strides, rates: rates, padding: padding), + to: output_device) + case .TF_EAGER: + return _RawTFEager.dilation2D( + input, filter: filter, strides: strides, rates: rates, padding: padding) + } + + } + + /// Computes the gradient of morphological 2-D dilation with respect to the filter. + /// + /// - Parameters: + /// - input: 4-D with shape `[batch, in_height, in_width, depth]`. + /// - filter: 3-D with shape `[filter_height, filter_width, depth]`. + /// - out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`. + /// + /// - Attrs: + /// - strides: 1-D of length 4. The stride of the sliding window for each dimension of + /// the input tensor. Must be: `[1, stride_height, stride_width, 1]`. + /// - rates: 1-D of length 4. The input stride for atrous morphological dilation. + /// Must be: `[1, rate_height, rate_width, 1]`. + /// - padding: The type of padding algorithm to use. + /// + /// - Output filter_backprop: 3-D with shape `[filter_height, filter_width, depth]`. + @inlinable @inline(__always) + public static func dilation2DBackpropFilter( + _ input: Tensor, + filter: Tensor, + outBackprop: Tensor, + strides: [Int32], + rates: [Int32], + padding: Padding + ) -> Tensor { + switch commonBackend( + commonBackend(input.handle.backend, filter.handle.backend), outBackprop.handle.backend) + { + case .XLA: + let output_device = outBackprop.device + let input = Tensor(copying: input, to: .defaultTFEager) + let filter = Tensor(copying: filter, to: .defaultTFEager) + let outBackprop = Tensor(copying: outBackprop, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.dilation2DBackpropFilter( + input, filter: filter, outBackprop: outBackprop, strides: strides, rates: rates, + padding: padding), to: output_device) + case .TF_EAGER: + return _RawTFEager.dilation2DBackpropFilter( + input, filter: filter, outBackprop: outBackprop, strides: strides, rates: rates, + padding: padding) + } + + } + + /// Computes the gradient of morphological 2-D dilation with respect to the input. + /// + /// - Parameters: + /// - input: 4-D with shape `[batch, in_height, in_width, depth]`. + /// - filter: 3-D with shape `[filter_height, filter_width, depth]`. + /// - out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`. + /// + /// - Attrs: + /// - strides: 1-D of length 4. The stride of the sliding window for each dimension of + /// the input tensor. Must be: `[1, stride_height, stride_width, 1]`. + /// - rates: 1-D of length 4. The input stride for atrous morphological dilation. + /// Must be: `[1, rate_height, rate_width, 1]`. + /// - padding: The type of padding algorithm to use. + /// + /// - Output in_backprop: 4-D with shape `[batch, in_height, in_width, depth]`. + @inlinable @inline(__always) + public static func dilation2DBackpropInput( + _ input: Tensor, + filter: Tensor, + outBackprop: Tensor, + strides: [Int32], + rates: [Int32], + padding: Padding + ) -> Tensor { + switch commonBackend( + commonBackend(input.handle.backend, filter.handle.backend), outBackprop.handle.backend) + { + case .XLA: + let output_device = outBackprop.device + let input = Tensor(copying: input, to: .defaultTFEager) + let filter = Tensor(copying: filter, to: .defaultTFEager) + let outBackprop = Tensor(copying: outBackprop, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.dilation2DBackpropInput( + input, filter: filter, outBackprop: outBackprop, strides: strides, rates: rates, + padding: padding), to: output_device) + case .TF_EAGER: + return _RawTFEager.dilation2DBackpropInput( + input, filter: filter, outBackprop: outBackprop, strides: strides, rates: rates, + padding: padding) + } + + } + + /// A substitute for `InterleaveDataset` on a fixed list of `N` datasets. + /// + /// - Parameters: + /// - selector_input_dataset: A dataset of scalar `DT_INT64` elements that determines which of the + /// `N` data inputs should produce the next output element. + /// - data_input_datasets: `N` datasets with the same type that will be interleaved according to + /// the values of `selector_input_dataset`. + @inlinable @inline(__always) + public static func directedInterleaveDataset( + selectorInputDataset: VariantHandle, + dataInputDatasets: [VariantHandle], + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.directedInterleaveDataset( + selectorInputDataset: selectorInputDataset, dataInputDatasets: dataInputDatasets, + outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Returns x / y element-wise. + /// + /// *NOTE*: `Div` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + @inlinable @inline(__always) + public static func div( + _ x: Tensor, + _ y: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + return _RawXLA.div(x, y) + case .TF_EAGER: + return _RawTFEager.div(x, y) + } + + } + + /// Returns 0 if the denominator is zero. + /// + /// + /// *NOTE*: `DivNoNan` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + @inlinable @inline(__always) + public static func divNoNan( + _ x: Tensor, + _ y: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + let output_device = y.device + let x = Tensor(copying: x, to: .defaultTFEager) + let y = Tensor(copying: y, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.divNoNan(x, y), to: output_device) + case .TF_EAGER: + return _RawTFEager.divNoNan(x, y) + } + + } + + /// Draw bounding boxes on a batch of images. + /// + /// Outputs a copy of `images` but draws on top of the pixels zero or more bounding + /// boxes specified by the locations in `boxes`. The coordinates of the each + /// bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The + /// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and + /// height of the underlying image. + /// + /// For example, if an image is 100 x 200 pixels (height x width) and the bounding + /// box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of + /// the bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates). + /// + /// Parts of the bounding box may fall outside the image. + /// + /// - Parameters: + /// - images: 4-D with shape `[batch, height, width, depth]`. A batch of images. + /// - boxes: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding + /// boxes. + /// + /// - Output output: 4-D with the same shape as `images`. The batch of input images with + /// bounding boxes drawn on the images. + @inlinable @inline(__always) + public static func drawBoundingBoxes( + images: Tensor, + boxes: Tensor + ) -> Tensor { + switch commonBackend(images.handle.backend, boxes.handle.backend) { + case .XLA: + let output_device = boxes.device + let images = Tensor(copying: images, to: .defaultTFEager) + let boxes = Tensor(copying: boxes, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.drawBoundingBoxes(images: images, boxes: boxes), to: output_device) + case .TF_EAGER: + return _RawTFEager.drawBoundingBoxes(images: images, boxes: boxes) + } + + } + + /// Draw bounding boxes on a batch of images. + /// + /// Outputs a copy of `images` but draws on top of the pixels zero or more bounding + /// boxes specified by the locations in `boxes`. The coordinates of the each + /// bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The + /// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and + /// height of the underlying image. + /// + /// For example, if an image is 100 x 200 pixels (height x width) and the bounding + /// box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of + /// the bounding box will be `(40, 10)` to `(100, 50)` (in (x,y) coordinates). + /// + /// Parts of the bounding box may fall outside the image. + /// + /// - Parameters: + /// - images: 4-D with shape `[batch, height, width, depth]`. A batch of images. + /// - boxes: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding + /// boxes. + /// - colors: 2-D. A list of RGBA colors to cycle through for the boxes. + /// + /// - Output output: 4-D with the same shape as `images`. The batch of input images with + /// bounding boxes drawn on the images. + @inlinable @inline(__always) + public static func drawBoundingBoxesV2( + images: Tensor, + boxes: Tensor, + colors: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(images.handle.backend, boxes.handle.backend), colors.handle.backend) + { + case .XLA: + let output_device = colors.device + let images = Tensor(copying: images, to: .defaultTFEager) + let boxes = Tensor(copying: boxes, to: .defaultTFEager) + let colors = Tensor(copying: colors, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.drawBoundingBoxesV2(images: images, boxes: boxes, colors: colors), + to: output_device) + case .TF_EAGER: + return _RawTFEager.drawBoundingBoxesV2(images: images, boxes: boxes, colors: colors) + } + + } + + /// Partitions `data` into `num_partitions` tensors using indices from `partitions`. + /// + /// For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]` + /// becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = i` + /// are placed in `outputs[i]` in lexicographic order of `js`, and the first + /// dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`. + /// In detail, + /// + /// ```python + /// outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:] + /// + /// outputs[i] = pack([data[js, ...] for js if partitions[js] == i]) + /// ``` + /// + /// `data.shape` must start with `partitions.shape`. + /// + /// For example: + /// + /// ```python + /// # Scalar partitions. + /// partitions = 1 + /// num_partitions = 2 + /// data = [10, 20] + /// outputs[0] = [] # Empty with shape [0, 2] + /// outputs[1] = [[10, 20]] + /// + /// # Vector partitions. + /// partitions = [0, 0, 1, 1, 0] + /// num_partitions = 2 + /// data = [10, 20, 30, 40, 50] + /// outputs[0] = [10, 20, 50] + /// outputs[1] = [30, 40] + /// ``` + /// + /// See `dynamic_stitch` for an example on how to merge partitions back. + /// + ///
+ /// + ///
+ /// + /// - Parameter partitions: Any shape. Indices in the range `[0, num_partitions)`. + /// + /// - Attr num_partitions: The number of partitions to output. + @inlinable @inline(__always) + public static func dynamicPartition( + data: Tensor, + partitions: Tensor, + numPartitions: Int64 + ) -> [Tensor] { + switch commonBackend(data.handle.backend, partitions.handle.backend) { + case .XLA: + let output_device = partitions.device + let data = Tensor(copying: data, to: .defaultTFEager) + let partitions = Tensor(copying: partitions, to: .defaultTFEager) + return [Tensor]( + copying: _RawTFEager.dynamicPartition( + data: data, partitions: partitions, numPartitions: numPartitions), to: output_device) + case .TF_EAGER: + return _RawTFEager.dynamicPartition( + data: data, partitions: partitions, numPartitions: numPartitions) + } + + } + + /// Interleave the values from the `data` tensors into a single tensor. + /// + /// Builds a merged tensor such that + /// + /// ```python + /// merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] + /// ``` + /// + /// For example, if each `indices[m]` is scalar or vector, we have + /// + /// ```python + /// # Scalar indices: + /// merged[indices[m], ...] = data[m][...] + /// + /// # Vector indices: + /// merged[indices[m][i], ...] = data[m][i, ...] + /// ``` + /// + /// Each `data[i].shape` must start with the corresponding `indices[i].shape`, + /// and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we + /// must have `data[i].shape = indices[i].shape + constant`. In terms of this + /// `constant`, the output shape is + /// + /// merged.shape = [max(indices)] + constant + /// + /// Values are merged in order, so if an index appears in both `indices[m][i]` and + /// `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the + /// merged result. If you do not need this guarantee, ParallelDynamicStitch might + /// perform better on some devices. + /// + /// For example: + /// + /// ```python + /// indices[0] = 6 + /// indices[1] = [4, 1] + /// indices[2] = [[5, 2], [0, 3]] + /// data[0] = [61, 62] + /// data[1] = [[41, 42], [11, 12]] + /// data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]] + /// merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], + /// [51, 52], [61, 62]] + /// ``` + /// + /// This method can be used to merge partitions created by `dynamic_partition` + /// as illustrated on the following example: + /// + /// ```python + /// # Apply function (increments x_i) on elements for which a certain condition + /// # apply (x_i != -1 in this example). + /// x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4]) + /// condition_mask=tf.not_equal(x,tf.constant(-1.)) + /// partitioned_data = tf.dynamic_partition( + /// x, tf.cast(condition_mask, tf.int32) , 2) + /// partitioned_data[1] = partitioned_data[1] + 1.0 + /// condition_indices = tf.dynamic_partition( + /// tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2) + /// x = tf.dynamic_stitch(condition_indices, partitioned_data) + /// # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain + /// # unchanged. + /// ``` + /// + ///
+ /// + ///
+ @inlinable @inline(__always) + public static func dynamicStitch( + indices: [Tensor], + data: [Tensor] + ) -> Tensor { + _RawTFEager.dynamicStitch(indices: indices, data: data) + } + + /// Eagerly executes a python function to compute func(input)->output. The + /// + /// semantics of the input, output, and attributes are the same as those for + /// PyFunc. + @inlinable @inline(__always) + public static func eagerPyFunc< + Tin: TensorArrayProtocol, + Tout: TensorGroup + >( + _ input: Tin, + token: String, + isAsync: Bool = false + ) -> Tout { + _RawTFEager.eagerPyFunc(input, token: token, isAsync: isAsync) + } + + /// Computes the (possibly normalized) Levenshtein Edit Distance. + /// + /// The inputs are variable-length sequences provided by SparseTensors + /// (hypothesis_indices, hypothesis_values, hypothesis_shape) + /// and + /// (truth_indices, truth_values, truth_shape). + /// + /// The inputs are: + /// + /// - Parameters: + /// - hypothesis_indices: The indices of the hypothesis list SparseTensor. + /// This is an N x R int64 matrix. + /// - hypothesis_values: The values of the hypothesis list SparseTensor. + /// This is an N-length vector. + /// - hypothesis_shape: The shape of the hypothesis list SparseTensor. + /// This is an R-length vector. + /// - truth_indices: The indices of the truth list SparseTensor. + /// This is an M x R int64 matrix. + /// - truth_values: The values of the truth list SparseTensor. + /// This is an M-length vector. + /// - truth_shape: truth indices, vector. + /// + /// - Attr normalize: boolean (if true, edit distances are normalized by length of truth). + /// + /// The output is: + /// + /// - Output output: A dense float tensor with rank R - 1. + /// + /// For the example input: + /// + /// // hypothesis represents a 2x1 matrix with variable-length values: + /// // (0,0) = ["a"] + /// // (1,0) = ["b"] + /// hypothesis_indices = [[0, 0, 0], + /// [1, 0, 0]] + /// hypothesis_values = ["a", "b"] + /// hypothesis_shape = [2, 1, 1] + /// + /// // truth represents a 2x2 matrix with variable-length values: + /// // (0,0) = [] + /// // (0,1) = ["a"] + /// // (1,0) = ["b", "c"] + /// // (1,1) = ["a"] + /// truth_indices = [[0, 1, 0], + /// [1, 0, 0], + /// [1, 0, 1], + /// [1, 1, 0]] + /// truth_values = ["a", "b", "c", "a"] + /// truth_shape = [2, 2, 2] + /// normalize = true + /// + /// The output will be: + /// + /// // output is a 2x2 matrix with edit distances normalized by truth lengths. + /// output = [[inf, 1.0], // (0,0): no truth, (0,1): no hypothesis + /// [0.5, 1.0]] // (1,0): addition, (1,1): no hypothesis + @inlinable @inline(__always) + public static func editDistance( + hypothesisIndices: Tensor, + hypothesisValues: Tensor, + hypothesisShape: Tensor, + truthIndices: Tensor, + truthValues: Tensor, + truthShape: Tensor, + normalize: Bool = true + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend( + commonBackend( + commonBackend(hypothesisIndices.handle.backend, hypothesisValues.handle.backend), + hypothesisShape.handle.backend), truthIndices.handle.backend), + truthValues.handle.backend), truthShape.handle.backend) + { + case .XLA: + let output_device = truthShape.device + let hypothesisIndices = Tensor(copying: hypothesisIndices, to: .defaultTFEager) + let hypothesisValues = Tensor(copying: hypothesisValues, to: .defaultTFEager) + let hypothesisShape = Tensor(copying: hypothesisShape, to: .defaultTFEager) + let truthIndices = Tensor(copying: truthIndices, to: .defaultTFEager) + let truthValues = Tensor(copying: truthValues, to: .defaultTFEager) + let truthShape = Tensor(copying: truthShape, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.editDistance( + hypothesisIndices: hypothesisIndices, hypothesisValues: hypothesisValues, + hypothesisShape: hypothesisShape, truthIndices: truthIndices, truthValues: truthValues, + truthShape: truthShape, normalize: normalize), to: output_device) + case .TF_EAGER: + return _RawTFEager.editDistance( + hypothesisIndices: hypothesisIndices, hypothesisValues: hypothesisValues, + hypothesisShape: hypothesisShape, truthIndices: truthIndices, truthValues: truthValues, + truthShape: truthShape, normalize: normalize) + } + + } + + /// Computes the eigen decomposition of one or more square matrices. + /// + /// Computes the eigenvalues and (optionally) right eigenvectors of each inner matrix in + /// `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The eigenvalues + /// are sorted in non-decreasing order. + /// + /// ```python + /// # a is a tensor. + /// # e is a tensor of eigenvalues. + /// # v is a tensor of eigenvectors. + /// e, v = eig(a) + /// e = eig(a, compute_v=False) + /// ``` + /// + /// - Parameter input: `Tensor` input of shape `[N, N]`. + /// + /// - Attr compute_v: If `True` then eigenvectors will be computed and returned in `v`. + /// Otherwise, only the eigenvalues will be computed. + /// + /// - Outputs: + /// - e: Eigenvalues. Shape is `[N]`. + /// - v: Eigenvectors. Shape is `[N, N]`. + @inlinable @inline(__always) + public static func eig< + T: FloatingPoint & TensorFlowScalar, + Tout: TensorFlowScalar + >( + _ input: Tensor, + computeV: Bool = true + ) -> (e: Tensor, v: Tensor) { + _RawTFEager.eig(input, computeV: computeV) + } + + /// Tensor contraction according to Einstein summation convention. + /// + /// Implements generalized Tensor contraction and reduction. Each input Tensor must + /// have a corresponding input subscript appearing in the comma-separated left-hand + /// side of the equation. The right-hand side of the equation consists of the + /// output subscript. The input subscripts and the output subscript should consist + /// of zero or more named axis labels and at most one ellipsis (`...`). + /// + /// The named axis labels may be any single character other than those having + /// special meaning, namely `,.->`. The behavior of this Op is undefined if it + /// receives an ill-formatted equation; since the validation is done at + /// graph-building time, we omit format validation checks at runtime. + /// + /// Note: This Op is *not* intended to be called by the user; instead users should + /// call `tf.einsum` directly. It is a hidden Op used by `tf.einsum`. + /// + /// Operations are applied to the input(s) according to the following rules: + /// + /// (a) Generalized Diagonals: For input dimensions corresponding to axis labels + /// appearing more than once in the same input subscript, we take the + /// generalized (`k`-dimensional) diagonal. + /// For example, in the equation `iii->i` with input shape `[3, 3, 3]`, the + /// generalized diagonal would consist of `3` elements at indices `(0, 0, 0)`, + /// `(1, 1, 1)` and `(2, 2, 2)` to create a Tensor of shape `[3]`. + /// + /// (b) Reduction: Axes corresponding to labels appearing only in one input + /// subscript but not in the output subscript are summed over prior to Tensor + /// contraction. + /// For example, in the equation `ab,bc->b`, the axis labels `a` and `c` are + /// the reduction axis labels. + /// + /// (c) Batch Dimensions: Axes corresponding to labels appearing in each of the + /// input subscripts and also in the output subscript make up the batch + /// dimensions in Tensor contraction. Unnamed axis labels corresponding to + /// ellipsis (`...`) also correspond to batch dimensions. + /// For example, for the equation denoting batch matrix multiplication, + /// `bij,bjk->bik`, the axis label `b` corresponds to a batch dimension. + /// + /// (d) Contraction: In case of binary einsum, axes corresponding to labels + /// appearing in two different inputs (and not in the output) are contracted + /// against each other. + /// Considering the batch matrix multiplication equation again + /// (`bij,bjk->bik`), the contracted axis label is `j`. + /// + /// (e) Expand Diagonal: If the output subscripts contain repeated (explicit) axis + /// labels, the opposite operation of (a) is applied. For example, in the + /// equation `i->iii`, and input shape `[3]`, the output of shape `[3, 3, 3]` + /// are all zeros, except for the (generalized) diagonal which is populated + /// with values from the input. + /// Note: This operation is not supported by `np.einsum` or `tf.einsum`; it is + /// provided to enable computing the symbolic gradient of `tf.einsum`. + /// + /// The output subscripts must contain only labels appearing in at least one of the + /// input subscripts. Furthermore, all dimensions mapping to the same axis label + /// must be equal. + /// + /// Any of the input and output subscripts may contain at most a single ellipsis + /// (`...`). These ellipsis are mapped against dimensions not corresponding to any + /// named axis label. If two inputs contain ellipsis, then they are broadcasted + /// according to standard NumPy broadcasting + /// [rules](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). + /// + /// The broadcasted dimensions are placed in the corresponding location of the + /// ellipsis in the output subscript. If the broadcasted dimensions are non-empty + /// and the output subscripts do not contain ellipsis, then an InvalidArgument error + /// is raised. + /// + /// @compatibility(numpy) + /// Similar to [`numpy.einsum`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html). + /// + /// Comparison with `numpy.einsum`: + /// + /// * This Op only supports unary and binary forms of `numpy.einsum`. + /// * This Op does not support implicit form. (i.e. equations without `->`). + /// * This Op also supports repeated indices in the output subscript, which is not + /// supported by `numpy.einsum`. + /// @end_compatibility + /// + /// + /// - Parameter inputs: List of 1 or 2 Tensors. + /// + /// - Attr equation: String describing the Einstein Summation operation; in the format of np.einsum. + /// + /// - Output output: Output Tensor with shape depending upon `equation`. + @inlinable @inline(__always) + public static func einsum( + inputs: [Tensor], + equation: String + ) -> Tensor { + _RawTFEager.einsum(inputs: inputs, equation: equation) + } + + /// Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise. + /// + /// See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) + /// ](http://arxiv.org/abs/1511.07289) + @inlinable @inline(__always) + public static func elu( + features: Tensor + ) -> Tensor { + switch features.handle.backend { + case .XLA: + return _RawXLA.elu(features: features) + case .TF_EAGER: + return _RawTFEager.elu(features: features) + } + + } + + /// Computes gradients for the exponential linear (Elu) operation. + /// + /// - Parameters: + /// - gradients: The backpropagated gradients to the corresponding Elu operation. + /// - outputs: The outputs of the corresponding Elu operation. + /// + /// - Output backprops: The gradients: `gradients * (outputs + 1)` if outputs < 0, + /// `gradients` otherwise. + @inlinable @inline(__always) + public static func eluGrad( + gradients: Tensor, + outputs: Tensor + ) -> Tensor { + switch commonBackend(gradients.handle.backend, outputs.handle.backend) { + case .XLA: + return _RawXLA.eluGrad(gradients: gradients, outputs: outputs) + case .TF_EAGER: + return _RawTFEager.eluGrad(gradients: gradients, outputs: outputs) + } + + } + + /// Creates a tensor with the given shape. + /// + /// This operation creates a tensor of `shape` and `dtype`. + /// + /// - Parameter shape: 1-D. Represents the shape of the output tensor. + /// + /// - Attr init: If True, initialize the returned tensor with the default value of dtype. Otherwise, the implementation is free not to initializethe tensor's content. + /// + /// - Output output: A `Tensor` of type `T`. + @inlinable @inline(__always) + public static func empty( + shape: Tensor, + init_: Bool = false + ) -> Tensor { + switch shape.handle.backend { + case .XLA: + let output_device = shape.device + let shape = Tensor(copying: shape, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.empty(shape: shape, init_: init_), to: output_device) + case .TF_EAGER: + return _RawTFEager.empty(shape: shape, init_: init_) + } + + } + + /// Creates and returns an empty tensor list. + /// + /// All list elements must be tensors of dtype element_dtype and shape compatible + /// with element_shape. + /// + /// handle: an empty tensor list. + /// element_dtype: the type of elements in the list. + /// element_shape: a shape compatible with that of elements in the list. + @inlinable @inline(__always) + public static func emptyTensorList( + elementShape: Tensor, + maxNumElements: Tensor, + elementDtype: TensorDataType + ) -> VariantHandle { + _RawTFEager.emptyTensorList( + elementShape: elementShape, maxNumElements: maxNumElements, elementDtype: elementDtype) + } + + /// Encode strings into web-safe base64 format. + /// + /// Refer to the following article for more information on base64 format: + /// en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the + /// end so that the encoded has length multiple of 4. See Padding section of the + /// link above. + /// + /// Web-safe means that the encoder uses - and _ instead of + and /. + /// + /// - Parameter input: Strings to be encoded. + /// + /// - Attr pad: Bool whether padding is applied at the ends. + /// + /// - Output output: Input strings encoded in base64. + @inlinable @inline(__always) + public static func encodeBase64( + _ input: StringTensor, + pad: Bool = false + ) -> StringTensor { + _RawTFEager.encodeBase64(input, pad: pad) + } + + /// JPEG-encode an image. + /// + /// `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`. + /// + /// The attr `format` can be used to override the color format of the encoded + /// output. Values can be: + /// + /// * `''`: Use a default format based on the number of channels in the image. + /// * `grayscale`: Output a grayscale JPEG image. The `channels` dimension + /// of `image` must be 1. + /// * `rgb`: Output an RGB JPEG image. The `channels` dimension + /// of `image` must be 3. + /// + /// If `format` is not specified or is the empty string, a default format is picked + /// in function of the number of channels in `image`: + /// + /// * 1: Output a grayscale image. + /// * 3: Output an RGB image. + /// + /// - Parameter image: 3-D with shape `[height, width, channels]`. + /// + /// - Attrs: + /// - format: Per pixel image format. + /// - quality: Quality of the compression from 0 to 100 (higher is better and slower). + /// - progressive: If True, create a JPEG that loads progressively (coarse to fine). + /// - optimize_size: If True, spend CPU/RAM to reduce size with no quality change. + /// - chroma_downsampling: See http://en.wikipedia.org/wiki/Chroma_subsampling. + /// - density_unit: Unit used to specify `x_density` and `y_density`: + /// pixels per inch (`'in'`) or centimeter (`'cm'`). + /// - x_density: Horizontal pixels per density unit. + /// - y_density: Vertical pixels per density unit. + /// - xmp_metadata: If not empty, embed this XMP metadata in the image header. + /// + /// - Output contents: 0-D. JPEG-encoded image. + @inlinable @inline(__always) + public static func encodeJpeg( + image: Tensor, + format: Format, + quality: Int64 = 95, + progressive: Bool = false, + optimizeSize: Bool = false, + chromaDownsampling: Bool = true, + densityUnit: DensityUnit = .in_, + xDensity: Int64 = 300, + yDensity: Int64 = 300, + xmpMetadata: String + ) -> StringTensor { + _RawTFEager.encodeJpeg( + image: image, format: format, quality: quality, progressive: progressive, + optimizeSize: optimizeSize, chromaDownsampling: chromaDownsampling, + densityUnit: densityUnit, xDensity: xDensity, yDensity: yDensity, xmpMetadata: xmpMetadata) + } + + /// JPEG encode input image with provided compression quality. + /// + /// `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`. + /// `quality` is an int32 jpeg compression quality value between 0 and 100. + /// + /// + /// - Parameters: + /// - images: Images to adjust. At least 3-D. + /// - quality: An int quality to encode to. + /// + /// - Output contents: 0-D. JPEG-encoded image. + @inlinable @inline(__always) + public static func encodeJpegVariableQuality( + images: Tensor, + quality: Tensor + ) -> StringTensor { + _RawTFEager.encodeJpegVariableQuality(images: images, quality: quality) + } + + /// PNG-encode an image. + /// + /// `image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]` + /// where `channels` is: + /// + /// * 1: for grayscale. + /// * 2: for grayscale + alpha. + /// * 3: for RGB. + /// * 4: for RGBA. + /// + /// The ZLIB compression level, `compression`, can be -1 for the PNG-encoder + /// default or a value from 0 to 9. 9 is the highest compression level, generating + /// the smallest output, but is slower. + /// + /// - Parameter image: 3-D with shape `[height, width, channels]`. + /// + /// - Attr compression: Compression level. + /// + /// - Output contents: 0-D. PNG-encoded image. + @inlinable @inline(__always) + public static func encodePng( + image: Tensor, + compression: Int64 = -1 + ) -> StringTensor { + _RawTFEager.encodePng(image: image, compression: compression) + } + + /// The op serializes protobuf messages provided in the input tensors. + /// + /// The types of the tensors in `values` must match the schema for the fields + /// specified in `field_names`. All the tensors in `values` must have a common + /// shape prefix, *batch_shape*. + /// + /// The `sizes` tensor specifies repeat counts for each field. The repeat count + /// (last dimension) of a each tensor in `values` must be greater than or equal + /// to corresponding repeat count in `sizes`. + /// + /// A `message_type` name must be provided to give context for the field names. + /// The actual message descriptor can be looked up either in the linked-in + /// descriptor pool or a filename provided by the caller using the + /// `descriptor_source` attribute. + /// + /// For the most part, the mapping between Proto field types and TensorFlow dtypes + /// is straightforward. However, there are a few special cases: + /// + /// - A proto field that contains a submessage or group can only be converted + /// to `DT_STRING` (the serialized submessage). This is to reduce the complexity + /// of the API. The resulting string can be used as input to another instance of + /// the decode_proto op. + /// + /// - TensorFlow lacks support for unsigned integers. The ops represent uint64 + /// types as a `DT_INT64` with the same twos-complement bit pattern (the obvious + /// way). Unsigned int32 values can be represented exactly by specifying type + /// `DT_INT64`, or using twos-complement if the caller specifies `DT_INT32` in + /// the `output_types` attribute. + /// + /// The `descriptor_source` attribute selects the source of protocol + /// descriptors to consult when looking up `message_type`. This may be: + /// + /// - An empty string or "local://", in which case protocol descriptors are + /// created for C++ (not Python) proto definitions linked to the binary. + /// + /// - A file, in which case protocol descriptors are created from the file, + /// which is expected to contain a `FileDescriptorSet` serialized as a string. + /// NOTE: You can build a `descriptor_source` file using the `--descriptor_set_out` + /// and `--include_imports` options to the protocol compiler `protoc`. + /// + /// - A "bytes://", in which protocol descriptors are created from ``, + /// which is expected to be a `FileDescriptorSet` serialized as a string. + /// + /// - Parameters: + /// - sizes: Tensor of int32 with shape `[batch_shape, len(field_names)]`. + /// - values: List of tensors containing values for the corresponding field. + /// + /// - Attrs: + /// - field_names: List of strings containing proto field names. + /// - message_type: Name of the proto message type to decode. + /// - Tinput_types: The input types. + /// + /// - Output bytes: Tensor of serialized protos with shape `batch_shape`. + @inlinable @inline(__always) + public static func encodeProto( + sizes: Tensor, + _ values: TinputTypes, + fieldNames: [String], + messageType: String, + descriptorSource: String = "local://" + ) -> StringTensor { + _RawTFEager.encodeProto( + sizes: sizes, values, fieldNames: fieldNames, messageType: messageType, + descriptorSource: descriptorSource) + } + + /// Encode audio data using the WAV file format. + /// + /// This operation will generate a string suitable to be saved out to create a .wav + /// audio file. It will be encoded in the 16-bit PCM format. It takes in float + /// values in the range -1.0f to 1.0f, and any outside that value will be clamped to + /// that range. + /// + /// `audio` is a 2-D float Tensor of shape `[length, channels]`. + /// `sample_rate` is a scalar Tensor holding the rate to use (e.g. 44100). + /// + /// - Parameters: + /// - audio: 2-D with shape `[length, channels]`. + /// - sample_rate: Scalar containing the sample frequency. + /// + /// - Output contents: 0-D. WAV-encoded file contents. + @inlinable @inline(__always) + public static func encodeWav( + audio: Tensor, + sampleRate: Tensor + ) -> StringTensor { + _RawTFEager.encodeWav(audio: audio, sampleRate: sampleRate) + } + + /// An op that enqueues a list of input batch tensors to TPUEmbedding. + /// + /// - Parameters: + /// - batch: A list of 1D tensors, one for each embedding table, containing the + /// indices into the tables. + /// - mode_override: A string input that overrides the mode specified in the + /// TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', + /// 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set + /// in TPUEmbeddingConfiguration is used, otherwise mode_override is used. + /// + /// - Attr device_ordinal: The TPU device to use. Should be >= 0 and less than the number + /// of TPU cores in the task on which the node is placed. + @inlinable @inline(__always) + public static func enqueueTPUEmbeddingIntegerBatch( + batch: [Tensor], + modeOverride: StringTensor, + deviceOrdinal: Int64 = -1 + ) { + _RawTFEager.enqueueTPUEmbeddingIntegerBatch( + batch: batch, modeOverride: modeOverride, deviceOrdinal: deviceOrdinal) + } + + /// An op that enqueues TPUEmbedding input indices from a SparseTensor. + /// + /// This Op eases the porting of code that uses embedding_lookup_sparse(), + /// although some Python preprocessing of the SparseTensor arguments to + /// embedding_lookup_sparse() is required to produce the arguments to this Op, + /// since only a single EnqueueTPUEmbeddingSparseBatch Op is allowed per training + /// step. + /// + /// The tensors at corresponding positions in the three input lists + /// must have the same shape, i.e. rank 1 with dim_size() equal to the total + /// number of lookups into the table described by the corresponding table_id. + /// + /// - Parameters: + /// - sample_indices: A list of rank 1 Tensors specifying the training example and + /// feature to which the corresponding embedding_indices and aggregation_weights + /// values belong. sample_indices[i] must equal b * nf + f, where nf is the + /// number of features from the corresponding table, f is in [0, nf), and + /// b is in [0, batch size). + /// - embedding_indices: A list of rank 1 Tensors, indices into the embedding tables. + /// - aggregation_weights: A list of rank 1 Tensors containing per sample -- i.e. per + /// (training example, feature) -- aggregation weights. + /// - mode_override: A string input that overrides the mode specified in the + /// TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', + /// 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set + /// in TPUEmbeddingConfiguration is used, otherwise mode_override is used. + /// + /// - Attrs: + /// - device_ordinal: The TPU device to use. Should be >= 0 and less than the number + /// of TPU cores in the task on which the node is placed. + /// - combiners: A list of string scalars, one for each embedding table that specify + /// how to normalize the embedding activations after weighted summation. + /// Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have + /// the sum of the weights be 0 for 'mean' or the sum of the squared weights be + /// 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for + /// all tables. + @inlinable @inline(__always) + public static func enqueueTPUEmbeddingSparseBatch< + T1: TensorFlowIndex, + T2: TensorFlowIndex, + T3: FloatingPoint & TensorFlowScalar + >( + sampleIndices: [Tensor], + embeddingIndices: [Tensor], + aggregationWeights: [Tensor], + modeOverride: StringTensor, + deviceOrdinal: Int64 = -1, + combiners: [String] + ) { + _RawTFEager.enqueueTPUEmbeddingSparseBatch( + sampleIndices: sampleIndices, embeddingIndices: embeddingIndices, + aggregationWeights: aggregationWeights, modeOverride: modeOverride, + deviceOrdinal: deviceOrdinal, combiners: combiners) + } + + /// Eases the porting of code that uses tf.nn.embedding_lookup_sparse(). + /// + /// sample_indices[i], embedding_indices[i] and aggregation_weights[i] correspond + /// to the ith feature. table_ids[i] indicates which embedding table to look up ith + /// feature. + /// + /// The tensors at corresponding positions in the three input lists (sample_indices, + /// embedding_indices and aggregation_weights) must have the same shape, i.e. rank 1 + /// with dim_size() equal to the total number of lookups into the table described by + /// the corresponding feature. + /// + /// - Parameters: + /// - sample_indices: A list of rank 1 Tensors specifying the training example to + /// which the corresponding embedding_indices and aggregation_weights values + /// belong. It corresponds to sp_ids.indices[:,0] in embedding_lookup_sparse(). + /// - embedding_indices: A list of rank 1 Tensors, indices into the embedding tables. + /// It corresponds to sp_ids.values in embedding_lookup_sparse(). + /// - aggregation_weights: A list of rank 1 Tensors containing per training example + /// aggregation weights. It corresponds to sp_weights.values in + /// embedding_lookup_sparse(). + /// - mode_override: A string input that overrides the mode specified in the + /// TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', + /// 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set + /// in TPUEmbeddingConfiguration is used, otherwise mode_override is used. + /// + /// - Attrs: + /// - device_ordinal: The TPU device to use. Should be >= 0 and less than the number + /// of TPU cores in the task on which the node is placed. + /// - combiners: A list of string scalars, one for each embedding table that specify + /// how to normalize the embedding activations after weighted summation. + /// Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have + /// the sum of the weights be 0 for 'mean' or the sum of the squared weights be + /// 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for + /// all tables. + /// - table_ids: A list of integers specifying the identifier of the embedding table + /// (offset of TableDescriptor in the TPUEmbeddingConfiguration) to lookup the + /// corresponding input. The ith input is looked up using table_ids[i]. The size + /// of the table_ids list must be equal to that of sample_indices, + /// embedding_indices and aggregation_weights. + @inlinable @inline(__always) + public static func enqueueTPUEmbeddingSparseTensorBatch< + T1: TensorFlowIndex, + T2: TensorFlowIndex, + T3: FloatingPoint & TensorFlowScalar + >( + sampleIndices: [Tensor], + embeddingIndices: [Tensor], + aggregationWeights: [Tensor], + modeOverride: StringTensor, + deviceOrdinal: Int64 = -1, + combiners: [String], + tableIds: [Int32], + maxSequenceLengths: [Int32] + ) { + _RawTFEager.enqueueTPUEmbeddingSparseTensorBatch( + sampleIndices: sampleIndices, embeddingIndices: embeddingIndices, + aggregationWeights: aggregationWeights, modeOverride: modeOverride, + deviceOrdinal: deviceOrdinal, combiners: combiners, tableIds: tableIds, + maxSequenceLengths: maxSequenceLengths) + } + + /// Ensures that the tensor's shape matches the expected shape. + /// + /// Raises an error if the input tensor's shape does not match the specified shape. + /// Returns the input tensor otherwise. + /// + /// - Parameter input: A tensor, whose shape is to be validated. + /// + /// - Attr shape: The expected (possibly partially specified) shape of the input tensor. + /// + /// - Output output: A tensor with the same shape and contents as the input tensor or value. + @inlinable @inline(__always) + public static func ensureShape( + _ input: Tensor, + shape: TensorShape? + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.ensureShape(input, shape: shape), to: output_device) + case .TF_EAGER: + return _RawTFEager.ensureShape(input, shape: shape) + } + + } + + /// Creates or finds a child frame, and makes `data` available to the child frame. + /// + /// This op is used together with `Exit` to create loops in the graph. + /// The unique `frame_name` is used by the `Executor` to identify frames. If + /// `is_constant` is true, `output` is a constant in the child frame; otherwise + /// it may be changed in the child frame. At most `parallel_iterations` iterations + /// are run in parallel in the child frame. + /// + /// - Parameter data: The tensor to be made available to the child frame. + /// + /// - Attrs: + /// - frame_name: The name of the child frame. + /// - is_constant: If true, the output is constant within the child frame. + /// - parallel_iterations: The number of iterations allowed to run in parallel. + /// + /// - Output output: The same tensor as `data`. + @inlinable @inline(__always) + public static func enter( + data: Tensor, + frameName: String, + isConstant: Bool = false, + parallelIterations: Int64 = 10 + ) -> Tensor { + switch data.handle.backend { + case .XLA: + let output_device = data.device + let data = Tensor(copying: data, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.enter( + data: data, frameName: frameName, isConstant: isConstant, + parallelIterations: parallelIterations), to: output_device) + case .TF_EAGER: + return _RawTFEager.enter( + data: data, frameName: frameName, isConstant: isConstant, + parallelIterations: parallelIterations) + } + + } + + /// Returns the truth value of (x == y) element-wise. + /// + /// *NOTE*: `Equal` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// ```python + /// x = tf.constant([2, 4]) + /// y = tf.constant(2) + /// tf.math.equal(x, y) ==> array([True, False]) + /// + /// x = tf.constant([2, 4]) + /// y = tf.constant([2, 4]) + /// tf.math.equal(x, y) ==> array([True, True]) + /// ``` + @inlinable @inline(__always) + public static func equal( + _ x: Tensor, + _ y: Tensor, + incompatibleShapeError: Bool = true + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + return _RawXLA.equal(x, y, incompatibleShapeError: incompatibleShapeError) + case .TF_EAGER: + return _RawTFEager.equal(x, y, incompatibleShapeError: incompatibleShapeError) + } + + } + + /// Returns the truth value of (x == y) element-wise. + /// + /// *NOTE*: `Equal` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// ```python + /// x = tf.constant([2, 4]) + /// y = tf.constant(2) + /// tf.math.equal(x, y) ==> array([True, False]) + /// + /// x = tf.constant([2, 4]) + /// y = tf.constant([2, 4]) + /// tf.math.equal(x, y) ==> array([True, True]) + /// ``` + @inlinable @inline(__always) + public static func equal( + _ x: StringTensor, + _ y: StringTensor, + incompatibleShapeError: Bool = true + ) -> Tensor { + _RawTFEager.equal(x, y, incompatibleShapeError: incompatibleShapeError) + } + + /// Computes the Gauss error function of `x` element-wise. + @inlinable @inline(__always) + public static func erf( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + let output_device = x.device + let x = Tensor(copying: x, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.erf(x), to: output_device) + case .TF_EAGER: + return _RawTFEager.erf(x) + } + + } + + /// Computes the complementary error function of `x` element-wise. + @inlinable @inline(__always) + public static func erfc( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + let output_device = x.device + let x = Tensor(copying: x, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.erfc(x), to: output_device) + case .TF_EAGER: + return _RawTFEager.erfc(x) + } + + } + + @inlinable @inline(__always) + public static func erfinv( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + let output_device = x.device + let x = Tensor(copying: x, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.erfinv(x), to: output_device) + case .TF_EAGER: + return _RawTFEager.erfinv(x) + } + + } + + /// Computes the euclidean norm of elements across dimensions of a tensor. + /// + /// Reduces `input` along the dimensions given in `axis`. Unless + /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + /// `axis`. If `keep_dims` is true, the reduced dimensions are + /// retained with length 1. + /// + /// - Parameters: + /// - input: The tensor to reduce. + /// - reduction_indices: The dimensions to reduce. Must be in the range + /// `[-rank(input), rank(input))`. + /// + /// - Attr keep_dims: If true, retain reduced dimensions with length 1. + /// + /// - Output output: The reduced tensor. + @inlinable @inline(__always) + public static func euclideanNorm< + T: TensorFlowNumeric, + Tidx: TensorFlowIndex + >( + _ input: Tensor, + reductionIndices: Tensor, + keepDims: Bool = false + ) -> Tensor { + switch commonBackend(input.handle.backend, reductionIndices.handle.backend) { + case .XLA: + let output_device = reductionIndices.device + let input = Tensor(copying: input, to: .defaultTFEager) + let reductionIndices = Tensor(copying: reductionIndices, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.euclideanNorm( + input, reductionIndices: reductionIndices, keepDims: keepDims), to: output_device) + case .TF_EAGER: + return _RawTFEager.euclideanNorm( + input, reductionIndices: reductionIndices, keepDims: keepDims) + } + + } + + /// Exits the current frame to its parent frame. + /// + /// Exit makes its input `data` available to the parent frame. + /// + /// - Parameter data: The tensor to be made available to the parent frame. + /// + /// - Output output: The same tensor as `data`. + @inlinable @inline(__always) + public static func exit( + data: Tensor + ) -> Tensor { + switch data.handle.backend { + case .XLA: + let output_device = data.device + let data = Tensor(copying: data, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.exit(data: data), to: output_device) + case .TF_EAGER: + return _RawTFEager.exit(data: data) + } + + } + + /// Computes exponential of x element-wise. \\(y = e^x\\). + /// + /// This function computes the exponential of every element in the input tensor. + /// i.e. `exp(x)` or `e^(x)`, where `x` is the input tensor. + /// `e` denotes Euler's number and is approximately equal to 2.718281. + /// Output is positive for any real input. + /// + /// ```python + /// x = tf.constant(2.0) + /// tf.math.exp(x) ==> 7.389056 + /// + /// x = tf.constant([2.0, 8.0]) + /// tf.math.exp(x) ==> array([7.389056, 2980.958], dtype=float32) + /// ``` + /// + /// For complex numbers, the exponential value is calculated as follows: + /// + /// ``` + /// e^(x+iy) = e^x * e^iy = e^x * (cos y + i sin y) + /// ``` + /// + /// Let's consider complex number 1+1j as an example. + /// e^1 * (cos 1 + i sin 1) = 2.7182818284590 * (0.54030230586+0.8414709848j) + /// + /// ```python + /// x = tf.constant(1 + 1j) + /// tf.math.exp(x) ==> 1.4686939399158851+2.2873552871788423j + /// ``` + @inlinable @inline(__always) + public static func exp( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.exp(x) + case .TF_EAGER: + return _RawTFEager.exp(x) + } + + } + + /// Inserts a dimension of 1 into a tensor's shape. + /// + /// Given a tensor `input`, this operation inserts a dimension of 1 at the + /// dimension index `axis` of `input`'s shape. The dimension index `axis` starts at + /// zero; if you specify a negative number for `axis` it is counted backward from + /// the end. + /// + /// This operation is useful if you want to add a batch dimension to a single + /// element. For example, if you have a single image of shape `[height, width, + /// channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`, + /// which will make the shape `[1, height, width, channels]`. + /// + /// Other examples: + /// + /// ``` + /// # 't' is a tensor of shape [2] + /// shape(expand_dims(t, 0)) ==> [1, 2] + /// shape(expand_dims(t, 1)) ==> [2, 1] + /// shape(expand_dims(t, -1)) ==> [2, 1] + /// + /// # 't2' is a tensor of shape [2, 3, 5] + /// shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5] + /// shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5] + /// shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1] + /// ``` + /// + /// This operation requires that: + /// + /// `-1-input.dims() <= dim <= input.dims()` + /// + /// This operation is related to `squeeze()`, which removes dimensions of + /// size 1. + /// + /// - Parameter dim: 0-D (scalar). Specifies the dimension index at which to + /// expand the shape of `input`. Must be in the range + /// `[-rank(input) - 1, rank(input)]`. + /// + /// - Output output: Contains the same data as `input`, but its shape has an additional + /// dimension of size 1 added. + @inlinable @inline(__always) + public static func expandDims< + T: TensorFlowScalar, + Tdim: TensorFlowIndex + >( + _ input: Tensor, + dim: Tensor + ) -> Tensor { + switch commonBackend(input.handle.backend, dim.handle.backend) { + case .XLA: + return _RawXLA.expandDims(input, dim: dim) + case .TF_EAGER: + return _RawTFEager.expandDims(input, dim: dim) + } + + } + + @inlinable @inline(__always) + public static func experimentalAssertNextDataset( + inputDataset: VariantHandle, + transformations: StringTensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.experimentalAssertNextDataset( + inputDataset: inputDataset, transformations: transformations, outputTypes: outputTypes, + outputShapes: outputShapes) + } + + /// Creates a dataset that shards the input dataset. + /// + /// Creates a dataset that shards the input dataset by num_workers, returning a + /// sharded dataset for the index-th worker. This attempts to automatically shard + /// a dataset by examining the Dataset graph and inserting a shard op before the + /// inputs to a reader Dataset (e.g. CSVDataset, TFRecordDataset). + /// + /// This dataset will throw a NotFound error if we cannot shard the dataset + /// automatically. + /// + /// - Parameters: + /// - input_dataset: A variant tensor representing the input dataset. + /// - num_workers: A scalar representing the number of workers to distribute this dataset across. + /// - index: A scalar representing the index of the current worker out of num_workers. + @inlinable @inline(__always) + public static func experimentalAutoShardDataset( + inputDataset: VariantHandle, + numWorkers: Tensor, + index: Tensor, + autoShardPolicy: Int64 = 0, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.experimentalAutoShardDataset( + inputDataset: inputDataset, numWorkers: numWorkers, index: index, + autoShardPolicy: autoShardPolicy, outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Records the bytes size of each element of `input_dataset` in a StatsAggregator. + @inlinable @inline(__always) + public static func experimentalBytesProducedStatsDataset( + inputDataset: VariantHandle, + tag: StringTensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.experimentalBytesProducedStatsDataset( + inputDataset: inputDataset, tag: tag, outputTypes: outputTypes, outputShapes: outputShapes) + } + + @inlinable @inline(__always) + public static func experimentalCSVDataset( + filenames: StringTensor, + compressionType: StringTensor, + bufferSize: Tensor, + header: Tensor, + fieldDelim: StringTensor, + useQuoteDelim: Tensor, + naValue: StringTensor, + selectCols: Tensor, + recordDefaults: OutputTypes, + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.experimentalCSVDataset( + filenames: filenames, compressionType: compressionType, bufferSize: bufferSize, + header: header, fieldDelim: fieldDelim, useQuoteDelim: useQuoteDelim, naValue: naValue, + selectCols: selectCols, recordDefaults: recordDefaults, outputShapes: outputShapes) + } + + @inlinable @inline(__always) + public static func experimentalChooseFastestDataset( + inputDatasets: [VariantHandle], + numExperiments: Int64, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.experimentalChooseFastestDataset( + inputDatasets: inputDatasets, numExperiments: numExperiments, outputTypes: outputTypes, + outputShapes: outputShapes) + } + + /// Returns the cardinality of `input_dataset`. + /// + /// Returns the cardinality of `input_dataset`. + /// + /// - Parameter input_dataset: A variant tensor representing the dataset to return cardinality for. + /// + /// - Output cardinality: The cardinality of `input_dataset`. Named constants are used to represent + /// infinite and unknown cardinality. + @inlinable @inline(__always) + public static func experimentalDatasetCardinality( + inputDataset: VariantHandle + ) -> Tensor { + _RawTFEager.experimentalDatasetCardinality(inputDataset: inputDataset) + } + + /// Writes the given dataset to the given file using the TFRecord format. + /// + /// - Parameters: + /// - input_dataset: A variant tensor representing the dataset to write. + /// - filename: A scalar string tensor representing the filename to use. + /// - compression_type: A scalar string tensor containing either (i) the empty string (no + /// compression), (ii) "ZLIB", or (iii) "GZIP". + @inlinable @inline(__always) + public static func experimentalDatasetToTFRecord( + inputDataset: VariantHandle, + filename: StringTensor, + compressionType: StringTensor + ) { + _RawTFEager.experimentalDatasetToTFRecord( + inputDataset: inputDataset, filename: filename, compressionType: compressionType) + } + + /// Creates a dataset that batches input elements into a SparseTensor. + /// + /// - Parameters: + /// - input_dataset: A handle to an input dataset. Must have a single component. + /// - batch_size: A scalar representing the number of elements to accumulate in a + /// batch. + /// - row_shape: A vector representing the dense shape of each row in the produced + /// SparseTensor. The shape may be partially specified, using `-1` to indicate + /// that a particular dimension should use the maximum size of all batch elements. + @inlinable @inline(__always) + public static func experimentalDenseToSparseBatchDataset( + inputDataset: VariantHandle, + batchSize: Tensor, + rowShape: Tensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.experimentalDenseToSparseBatchDataset( + inputDataset: inputDataset, batchSize: batchSize, rowShape: rowShape, + outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// A substitute for `InterleaveDataset` on a fixed list of `N` datasets. + /// + /// - Parameters: + /// - selector_input_dataset: A dataset of scalar `DT_INT64` elements that determines which of the + /// `N` data inputs should produce the next output element. + /// - data_input_datasets: `N` datasets with the same type that will be interleaved according to + /// the values of `selector_input_dataset`. + @inlinable @inline(__always) + public static func experimentalDirectedInterleaveDataset( + selectorInputDataset: VariantHandle, + dataInputDatasets: [VariantHandle], + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.experimentalDirectedInterleaveDataset( + selectorInputDataset: selectorInputDataset, dataInputDatasets: dataInputDatasets, + outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Creates a dataset that computes a group-by on `input_dataset`. + /// + /// Creates a dataset that computes a group-by on `input_dataset`. + /// + /// - Parameters: + /// - input_dataset: A variant tensor representing the input dataset. + /// - key_func_other_arguments: A list of tensors, typically values that were captured when + /// building a closure for `key_func`. + /// - init_func_other_arguments: A list of tensors, typically values that were captured when + /// building a closure for `init_func`. + /// - reduce_func_other_arguments: A list of tensors, typically values that were captured when + /// building a closure for `reduce_func`. + /// - finalize_func_other_arguments: A list of tensors, typically values that were captured when + /// building a closure for `finalize_func`. + /// + /// - Attrs: + /// - key_func: A function mapping an element of `input_dataset`, concatenated + /// with `key_func_other_arguments` to a scalar value of type DT_INT64. + /// - init_func: A function mapping a key of type DT_INT64, concatenated with + /// `init_func_other_arguments` to the initial reducer state. + /// - reduce_func: A function mapping the current reducer state and an element of `input_dataset`, + /// concatenated with `reduce_func_other_arguments` to a new reducer state. + /// - finalize_func: A function mapping the final reducer state to an output element. + @inlinable @inline(__always) + public static func experimentalGroupByReducerDataset< + KeyfuncIn: TensorGroup, + KeyfuncOut: TensorGroup, + InitfuncIn: TensorGroup, + InitfuncOut: TensorGroup, + ReducefuncIn: TensorGroup, + ReducefuncOut: TensorGroup, + FinalizefuncIn: TensorGroup, + FinalizefuncOut: TensorGroup, + TkeyFuncOtherArguments: TensorArrayProtocol, + TinitFuncOtherArguments: TensorArrayProtocol, + TreduceFuncOtherArguments: TensorArrayProtocol, + TfinalizeFuncOtherArguments: TensorArrayProtocol + >( + inputDataset: VariantHandle, + keyFuncOtherArguments: TkeyFuncOtherArguments, + initFuncOtherArguments: TinitFuncOtherArguments, + reduceFuncOtherArguments: TreduceFuncOtherArguments, + finalizeFuncOtherArguments: TfinalizeFuncOtherArguments, + keyFunc: (KeyfuncIn) -> KeyfuncOut, + initFunc: (InitfuncIn) -> InitfuncOut, + reduceFunc: (ReducefuncIn) -> ReducefuncOut, + finalizeFunc: (FinalizefuncIn) -> FinalizefuncOut, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.experimentalGroupByReducerDataset( + inputDataset: inputDataset, keyFuncOtherArguments: keyFuncOtherArguments, + initFuncOtherArguments: initFuncOtherArguments, + reduceFuncOtherArguments: reduceFuncOtherArguments, + finalizeFuncOtherArguments: finalizeFuncOtherArguments, keyFunc: keyFunc, + initFunc: initFunc, reduceFunc: reduceFunc, finalizeFunc: finalizeFunc, + outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Creates a dataset that computes a windowed group-by on `input_dataset`. + /// + /// // TODO(mrry): Support non-int64 keys. + /// + /// - Attr key_func: A function mapping an element of `input_dataset`, concatenated + /// with `key_func_other_arguments` to a scalar value of type DT_INT64. + @inlinable @inline(__always) + public static func experimentalGroupByWindowDataset< + KeyfuncIn: TensorGroup, + KeyfuncOut: TensorGroup, + ReducefuncIn: TensorGroup, + ReducefuncOut: TensorGroup, + WindowsizefuncIn: TensorGroup, + WindowsizefuncOut: TensorGroup, + TkeyFuncOtherArguments: TensorArrayProtocol, + TreduceFuncOtherArguments: TensorArrayProtocol, + TwindowSizeFuncOtherArguments: TensorArrayProtocol + >( + inputDataset: VariantHandle, + keyFuncOtherArguments: TkeyFuncOtherArguments, + reduceFuncOtherArguments: TreduceFuncOtherArguments, + windowSizeFuncOtherArguments: TwindowSizeFuncOtherArguments, + keyFunc: (KeyfuncIn) -> KeyfuncOut, + reduceFunc: (ReducefuncIn) -> ReducefuncOut, + windowSizeFunc: (WindowsizefuncIn) -> WindowsizefuncOut, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.experimentalGroupByWindowDataset( + inputDataset: inputDataset, keyFuncOtherArguments: keyFuncOtherArguments, + reduceFuncOtherArguments: reduceFuncOtherArguments, + windowSizeFuncOtherArguments: windowSizeFuncOtherArguments, keyFunc: keyFunc, + reduceFunc: reduceFunc, windowSizeFunc: windowSizeFunc, outputTypes: outputTypes, + outputShapes: outputShapes) + } + + /// Creates a dataset that contains the elements of `input_dataset` ignoring errors. + @inlinable @inline(__always) + public static func experimentalIgnoreErrorsDataset( + inputDataset: VariantHandle, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.experimentalIgnoreErrorsDataset( + inputDataset: inputDataset, outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Returns the name of the device on which `resource` has been placed. + @inlinable @inline(__always) + public static func experimentalIteratorGetDevice( + resource: ResourceHandle + ) -> StringTensor { + _RawTFEager.experimentalIteratorGetDevice(resource: resource) + } + + @inlinable @inline(__always) + public static func experimentalLMDBDataset( + filenames: StringTensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.experimentalLMDBDataset( + filenames: filenames, outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Records the latency of producing `input_dataset` elements in a StatsAggregator. + @inlinable @inline(__always) + public static func experimentalLatencyStatsDataset( + inputDataset: VariantHandle, + tag: StringTensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.experimentalLatencyStatsDataset( + inputDataset: inputDataset, tag: tag, outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Creates a dataset that fuses mapping with batching. + /// + /// Creates a dataset that applies `f` to the outputs of `input_dataset` and then + /// batches `batch_size` of them. + /// + /// Unlike a "MapDataset", which applies `f` sequentially, this dataset invokes up + /// to `batch_size * num_parallel_batches` copies of `f` in parallel. + /// + /// - Parameters: + /// - input_dataset: A variant tensor representing the input dataset. + /// - other_arguments: A list of tensors, typically values that were captured when building a closure + /// for `f`. + /// - batch_size: A scalar representing the number of elements to accumulate in a + /// batch. It determines the number of concurrent invocations of `f` that process + /// elements from `input_dataset` in parallel. + /// - num_parallel_calls: A scalar representing the maximum number of parallel invocations of the `map_fn` + /// function. Applying the `map_fn` on consecutive input elements in parallel has + /// the potential to improve input pipeline throughput. + /// - drop_remainder: A scalar representing whether the last batch should be dropped in case its size + /// is smaller than desired. + /// + /// - Attr f: A function to apply to the outputs of `input_dataset`. + @inlinable @inline(__always) + public static func experimentalMapAndBatchDataset< + FIn: TensorGroup, + FOut: TensorGroup, + Targuments: TensorArrayProtocol + >( + inputDataset: VariantHandle, + otherArguments: Targuments, + batchSize: Tensor, + numParallelCalls: Tensor, + dropRemainder: Tensor, + f: (FIn) -> FOut, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?], + preserveCardinality: Bool = false + ) -> VariantHandle { + _RawTFEager.experimentalMapAndBatchDataset( + inputDataset: inputDataset, otherArguments: otherArguments, batchSize: batchSize, + numParallelCalls: numParallelCalls, dropRemainder: dropRemainder, f: f, + outputTypes: outputTypes, outputShapes: outputShapes, + preserveCardinality: preserveCardinality) + } + + /// Creates a dataset that applies `f` to the outputs of `input_dataset`. + @inlinable @inline(__always) + public static func experimentalMapDataset< + FIn: TensorGroup, + FOut: TensorGroup, + Targuments: TensorArrayProtocol + >( + inputDataset: VariantHandle, + otherArguments: Targuments, + f: (FIn) -> FOut, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?], + useInterOpParallelism: Bool = true, + preserveCardinality: Bool = false + ) -> VariantHandle { + _RawTFEager.experimentalMapDataset( + inputDataset: inputDataset, otherArguments: otherArguments, f: f, outputTypes: outputTypes, + outputShapes: outputShapes, useInterOpParallelism: useInterOpParallelism, + preserveCardinality: preserveCardinality) + } + + @inlinable @inline(__always) + public static func experimentalMatchingFilesDataset( + patterns: StringTensor + ) -> VariantHandle { + _RawTFEager.experimentalMatchingFilesDataset(patterns: patterns) + } + + /// Creates a dataset that overrides the maximum intra-op parallelism. + /// + /// - Parameter max_intra_op_parallelism: Identifies the maximum intra-op parallelism to use. + @inlinable @inline(__always) + public static func experimentalMaxIntraOpParallelismDataset( + inputDataset: VariantHandle, + maxIntraOpParallelism: Tensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.experimentalMaxIntraOpParallelismDataset( + inputDataset: inputDataset, maxIntraOpParallelism: maxIntraOpParallelism, + outputTypes: outputTypes, outputShapes: outputShapes) + } + + @inlinable @inline(__always) + public static func experimentalNonSerializableDataset( + inputDataset: VariantHandle, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.experimentalNonSerializableDataset( + inputDataset: inputDataset, outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Creates a dataset that applies `f` to the outputs of `input_dataset`. + /// + /// The resulting dataset is similar to the `InterleaveDataset`, with the exception + /// that if retrieving the next value from a dataset would cause the requester to + /// block, it will skip that input dataset. This dataset is especially useful + /// when loading data from a variable-latency datastores (e.g. HDFS, GCS), as it + /// allows the training step to proceed so long as some data is available. + /// + /// !! WARNING !! This dataset is not deterministic! + /// + /// - Attr f: A function mapping elements of `input_dataset`, concatenated with + /// `other_arguments`, to a Dataset variant that contains elements matching + /// `output_types` and `output_shapes`. + @inlinable @inline(__always) + public static func experimentalParallelInterleaveDataset< + FIn: TensorGroup, + FOut: TensorGroup, + Targuments: TensorArrayProtocol + >( + inputDataset: VariantHandle, + otherArguments: Targuments, + cycleLength: Tensor, + blockLength: Tensor, + sloppy: Tensor, + bufferOutputElements: Tensor, + prefetchInputElements: Tensor, + f: (FIn) -> FOut, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.experimentalParallelInterleaveDataset( + inputDataset: inputDataset, otherArguments: otherArguments, cycleLength: cycleLength, + blockLength: blockLength, sloppy: sloppy, bufferOutputElements: bufferOutputElements, + prefetchInputElements: prefetchInputElements, f: f, outputTypes: outputTypes, + outputShapes: outputShapes) + } + + /// Transforms `input_dataset` containing `Example` protos as vectors of DT_STRING into a dataset of `Tensor` or `SparseTensor` objects representing the parsed features. + /// + /// - Parameter dense_defaults: A dict mapping string keys to `Tensor`s. + /// The keys of the dict must match the dense_keys of the feature. + /// + /// - Attrs: + /// - sparse_keys: A list of string keys in the examples features. + /// The results for these keys will be returned as `SparseTensor` objects. + /// - dense_keys: A list of Ndense string Tensors (scalars). + /// The keys expected in the Examples features associated with dense values. + /// - sparse_types: A list of `DTypes` of the same length as `sparse_keys`. + /// Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), + /// and `tf.string` (`BytesList`) are supported. + /// - Tdense: A list of DTypes of the same length as `dense_keys`. + /// Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), + /// and `tf.string` (`BytesList`) are supported. + /// + /// - dense_shapes: List of tuples with the same length as `dense_keys`. + /// The shape of the data for each dense feature referenced by `dense_keys`. + /// Required for any input tensors identified by `dense_keys`. Must be + /// either fully defined, or may contain an unknown first dimension. + /// An unknown first dimension means the feature is treated as having + /// a variable number of blocks, and the output shape along this dimension + /// is considered unknown at graph build time. Padding is applied for + /// minibatch elements smaller than the maximum number of blocks for the + /// given feature along this dimension. + /// - output_types: The type list for the return values. + /// - output_shapes: The list of shapes being produced. + @inlinable @inline(__always) + public static func experimentalParseExampleDataset( + inputDataset: VariantHandle, + numParallelCalls: Tensor, + denseDefaults: Tdense, + sparseKeys: [String], + denseKeys: [String], + sparseTypes: [TensorDataType], + denseShapes: [TensorShape?], + outputTypes: [TensorDataType], + outputShapes: [TensorShape?], + sloppy: Bool = false + ) -> VariantHandle { + _RawTFEager.experimentalParseExampleDataset( + inputDataset: inputDataset, numParallelCalls: numParallelCalls, + denseDefaults: denseDefaults, sparseKeys: sparseKeys, denseKeys: denseKeys, + sparseTypes: sparseTypes, denseShapes: denseShapes, outputTypes: outputTypes, + outputShapes: outputShapes, sloppy: sloppy) + } + + /// Creates a dataset that uses a custom thread pool to compute `input_dataset`. + /// + /// - Parameter num_threads: Identifies the number of threads to use for the private threadpool. + @inlinable @inline(__always) + public static func experimentalPrivateThreadPoolDataset( + inputDataset: VariantHandle, + numThreads: Tensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.experimentalPrivateThreadPoolDataset( + inputDataset: inputDataset, numThreads: numThreads, outputTypes: outputTypes, + outputShapes: outputShapes) + } + + /// Creates a Dataset that returns pseudorandom numbers. + /// + /// - Parameters: + /// - seed: A scalar seed for the random number generator. If either seed or + /// seed2 is set to be non-zero, the random number generator is seeded + /// by the given seed. Otherwise, a random seed is used. + /// - seed2: A second scalar seed to avoid seed collision. + @inlinable @inline(__always) + public static func experimentalRandomDataset( + seed: Tensor, + seed2: Tensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.experimentalRandomDataset( + seed: seed, seed2: seed2, outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Creates a dataset that changes the batch size. + /// + /// Creates a dataset that changes the batch size of the dataset to current batch + /// size // num_replicas. + /// + /// - Parameters: + /// - input_dataset: A variant tensor representing the input dataset. + /// - num_replicas: A scalar representing the number of replicas to distribute this batch across. As + /// a result of this transformation the current batch size would end up being + /// divided by this parameter. + @inlinable @inline(__always) + public static func experimentalRebatchDataset( + inputDataset: VariantHandle, + numReplicas: Tensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?], + useFallback: Bool = true + ) -> VariantHandle { + _RawTFEager.experimentalRebatchDataset( + inputDataset: inputDataset, numReplicas: numReplicas, outputTypes: outputTypes, + outputShapes: outputShapes, useFallback: useFallback) + } + + /// Creates a dataset successively reduces `f` over the elements of `input_dataset`. + @inlinable @inline(__always) + public static func experimentalScanDataset< + FIn: TensorGroup, + FOut: TensorGroup, + Tstate: TensorArrayProtocol, + Targuments: TensorArrayProtocol + >( + inputDataset: VariantHandle, + initialState: Tstate, + otherArguments: Targuments, + f: (FIn) -> FOut, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?], + preserveCardinality: Bool = false + ) -> VariantHandle { + _RawTFEager.experimentalScanDataset( + inputDataset: inputDataset, initialState: initialState, otherArguments: otherArguments, + f: f, outputTypes: outputTypes, outputShapes: outputShapes, + preserveCardinality: preserveCardinality) + } + + @inlinable @inline(__always) + public static func experimentalSetStatsAggregatorDataset( + inputDataset: VariantHandle, + statsAggregator: ResourceHandle, + tag: StringTensor, + counterPrefix: StringTensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.experimentalSetStatsAggregatorDataset( + inputDataset: inputDataset, statsAggregator: statsAggregator, tag: tag, + counterPrefix: counterPrefix, outputTypes: outputTypes, outputShapes: outputShapes) + } + + @inlinable @inline(__always) + public static func experimentalSleepDataset( + inputDataset: VariantHandle, + sleepMicroseconds: Tensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.experimentalSleepDataset( + inputDataset: inputDataset, sleepMicroseconds: sleepMicroseconds, outputTypes: outputTypes, + outputShapes: outputShapes) + } + + /// Creates a dataset that passes a sliding window over `input_dataset`. + /// + /// - Parameters: + /// - window_size: A scalar representing the number of elements in the + /// sliding window. + /// - window_shift: A scalar representing the steps moving the sliding window + /// forward in one iteration. It must be positive. + /// - window_stride: A scalar representing the stride of the input elements of the sliding window. + /// It must be positive. + @inlinable @inline(__always) + public static func experimentalSlidingWindowDataset( + inputDataset: VariantHandle, + windowSize: Tensor, + windowShift: Tensor, + windowStride: Tensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.experimentalSlidingWindowDataset( + inputDataset: inputDataset, windowSize: windowSize, windowShift: windowShift, + windowStride: windowStride, outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Creates a dataset that executes a SQL query and emits rows of the result set. + /// + /// - Parameters: + /// - driver_name: The database type. Currently, the only supported type is 'sqlite'. + /// - data_source_name: A connection string to connect to the database. + /// - query: A SQL query to execute. + @inlinable @inline(__always) + public static func experimentalSqlDataset( + driverName: StringTensor, + dataSourceName: StringTensor, + query: StringTensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.experimentalSqlDataset( + driverName: driverName, dataSourceName: dataSourceName, query: query, + outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Creates a statistics manager resource. + @inlinable @inline(__always) + public static func experimentalStatsAggregatorHandle( + container: String, + sharedName: String + ) -> ResourceHandle { + _RawTFEager.experimentalStatsAggregatorHandle(container: container, sharedName: sharedName) + } + + /// Produces a summary of any statistics recorded by the given statistics manager. + @inlinable @inline(__always) + public static func experimentalStatsAggregatorSummary( + iterator: ResourceHandle + ) -> StringTensor { + _RawTFEager.experimentalStatsAggregatorSummary(iterator: iterator) + } + + /// Creates a dataset that stops iteration when predicate` is false. + /// + /// The `predicate` function must return a scalar boolean and accept the + /// following arguments: + /// + /// * One tensor for each component of an element of `input_dataset`. + /// * One tensor for each value in `other_arguments`. + /// + /// - Parameter other_arguments: A list of tensors, typically values that were captured when + /// building a closure for `predicate`. + /// + /// - Attr predicate: A function returning a scalar boolean. + @inlinable @inline(__always) + public static func experimentalTakeWhileDataset< + PredicateIn: TensorGroup, + PredicateOut: TensorGroup, + Targuments: TensorArrayProtocol + >( + inputDataset: VariantHandle, + otherArguments: Targuments, + predicate: (PredicateIn) -> PredicateOut, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.experimentalTakeWhileDataset( + inputDataset: inputDataset, otherArguments: otherArguments, predicate: predicate, + outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Creates a dataset that uses a custom thread pool to compute `input_dataset`. + /// + /// - Parameter thread_pool: A resource produced by the ThreadPoolHandle op. + @inlinable @inline(__always) + public static func experimentalThreadPoolDataset( + inputDataset: VariantHandle, + threadPool: ResourceHandle, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.experimentalThreadPoolDataset( + inputDataset: inputDataset, threadPool: threadPool, outputTypes: outputTypes, + outputShapes: outputShapes) + } + + /// Creates a dataset that uses a custom thread pool to compute `input_dataset`. + /// + /// - Attrs: + /// - num_threads: The number of threads in the thread pool. + /// - max_intra_op_parallelism: The maximum degree of parallelism to use within operations that execute on this + /// threadpool. + /// - display_name: A human-readable name for the threads that may be visible in some + /// visualizations. + /// threadpool. + /// + /// - Output handle: A resource that can be consumed by one or more ExperimentalThreadPoolDataset + /// ops. + @inlinable @inline(__always) + public static func experimentalThreadPoolHandle( + numThreads: Int64, + maxIntraOpParallelism: Int64 = 1, + displayName: String, + container: String, + sharedName: String + ) -> ResourceHandle { + _RawTFEager.experimentalThreadPoolHandle( + numThreads: numThreads, maxIntraOpParallelism: maxIntraOpParallelism, + displayName: displayName, container: container, sharedName: sharedName) + } + + /// A dataset that splits the elements of its input into multiple elements. + @inlinable @inline(__always) + public static func experimentalUnbatchDataset( + inputDataset: VariantHandle, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.experimentalUnbatchDataset( + inputDataset: inputDataset, outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Creates a dataset that contains the unique elements of `input_dataset`. + @inlinable @inline(__always) + public static func experimentalUniqueDataset( + inputDataset: VariantHandle, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.experimentalUniqueDataset( + inputDataset: inputDataset, outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Computes `exp(x) - 1` element-wise. + /// + /// i.e. `exp(x) - 1` or `e^(x) - 1`, where `x` is the input tensor. + /// `e` denotes Euler's number and is approximately equal to 2.718281. + /// + /// ```python + /// x = tf.constant(2.0) + /// tf.math.expm1(x) ==> 6.389056 + /// + /// x = tf.constant([2.0, 8.0]) + /// tf.math.expm1(x) ==> array([6.389056, 2979.958], dtype=float32) + /// + /// x = tf.constant(1 + 1j) + /// tf.math.expm1(x) ==> (0.46869393991588515+2.2873552871788423j) + /// ``` + @inlinable @inline(__always) + public static func expm1( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.expm1(x) + case .TF_EAGER: + return _RawTFEager.expm1(x) + } + + } + + /// Extracts a glimpse from the input tensor. + /// + /// Returns a set of windows called glimpses extracted at location + /// `offsets` from the input tensor. If the windows only partially + /// overlaps the inputs, the non overlapping areas will be filled with + /// random noise. + /// + /// The result is a 4-D tensor of shape `[batch_size, glimpse_height, + /// glimpse_width, channels]`. The channels and batch dimensions are the + /// same as that of the input tensor. The height and width of the output + /// windows are specified in the `size` parameter. + /// + /// The argument `normalized` and `centered` controls how the windows are built: + /// + /// * If the coordinates are normalized but not centered, 0.0 and 1.0 + /// correspond to the minimum and maximum of each height and width + /// dimension. + /// * If the coordinates are both normalized and centered, they range from + /// -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper + /// left corner, the lower right corner is located at (1.0, 1.0) and the + /// center is at (0, 0). + /// * If the coordinates are not normalized they are interpreted as + /// numbers of pixels. + /// + /// - Parameters: + /// - input: A 4-D float tensor of shape `[batch_size, height, width, channels]`. + /// - size: A 1-D tensor of 2 elements containing the size of the glimpses + /// to extract. The glimpse height must be specified first, following + /// by the glimpse width. + /// - offsets: A 2-D integer tensor of shape `[batch_size, 2]` containing + /// the y, x locations of the center of each window. + /// + /// - Attrs: + /// - centered: indicates if the offset coordinates are centered relative to + /// the image, in which case the (0, 0) offset is relative to the center + /// of the input images. If false, the (0,0) offset corresponds to the + /// upper left corner of the input images. + /// - normalized: indicates if the offset coordinates are normalized. + /// - uniform_noise: indicates if the noise should be generated using a + /// uniform distribution or a Gaussian distribution. + /// - noise: indicates if the noise should `uniform`, `gaussian`, or + /// `zero`. The default is `uniform` which means the the noise type + /// will be decided by `uniform_noise`. + /// + /// - Output glimpse: A tensor representing the glimpses `[batch_size, + /// glimpse_height, glimpse_width, channels]`. + @inlinable @inline(__always) + public static func extractGlimpse( + _ input: Tensor, + size: Tensor, + offsets: Tensor, + centered: Bool = true, + normalized: Bool = true, + uniformNoise: Bool = true, + noise: String = "uniform" + ) -> Tensor { + switch commonBackend( + commonBackend(input.handle.backend, size.handle.backend), offsets.handle.backend) + { + case .XLA: + let output_device = offsets.device + let input = Tensor(copying: input, to: .defaultTFEager) + let size = Tensor(copying: size, to: .defaultTFEager) + let offsets = Tensor(copying: offsets, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.extractGlimpse( + input, size: size, offsets: offsets, centered: centered, normalized: normalized, + uniformNoise: uniformNoise, noise: noise), to: output_device) + case .TF_EAGER: + return _RawTFEager.extractGlimpse( + input, size: size, offsets: offsets, centered: centered, normalized: normalized, + uniformNoise: uniformNoise, noise: noise) + } + + } + + /// Extract `patches` from `images` and put them in the "depth" output dimension. + /// + /// - Parameter images: 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`. + /// + /// - Attrs: + /// - ksizes: The size of the sliding window for each dimension of `images`. + /// - strides: How far the centers of two consecutive patches are in + /// the images. Must be: `[1, stride_rows, stride_cols, 1]`. + /// - rates: Must be: `[1, rate_rows, rate_cols, 1]`. This is the + /// input stride, specifying how far two consecutive patch samples are in the + /// input. Equivalent to extracting patches with + /// `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by + /// subsampling them spatially by a factor of `rates`. This is equivalent to + /// `rate` in dilated (a.k.a. Atrous) convolutions. + /// - padding: The type of padding algorithm to use. + /// + /// - Output patches: 4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows * + /// ksize_cols * depth]` containing image patches with size + /// `ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension. Note + /// `out_rows` and `out_cols` are the dimensions of the output patches. + @inlinable @inline(__always) + public static func extractImagePatches( + images: Tensor, + ksizes: [Int32], + strides: [Int32], + rates: [Int32], + padding: Padding + ) -> Tensor { + switch images.handle.backend { + case .XLA: + let output_device = images.device + let images = Tensor(copying: images, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.extractImagePatches( + images: images, ksizes: ksizes, strides: strides, rates: rates, padding: padding), + to: output_device) + case .TF_EAGER: + return _RawTFEager.extractImagePatches( + images: images, ksizes: ksizes, strides: strides, rates: rates, padding: padding) + } + + } + + /// Extract the shape information of a JPEG-encoded image. + /// + /// This op only parses the image header, so it is much faster than DecodeJpeg. + /// + /// - Parameter contents: 0-D. The JPEG-encoded image. + /// + /// - Attr output_type: (Optional) The output type of the operation (int32 or int64). + /// Defaults to int32. + /// + /// - Output image_shape: 1-D. The image shape with format [height, width, channels]. + @inlinable @inline(__always) + public static func extractJpegShape( + contents: StringTensor + ) -> Tensor { + _RawTFEager.extractJpegShape(contents: contents) + } + + /// Extract `patches` from `input` and put them in the "depth" output dimension. 3D extension of `extract_image_patches`. + /// + /// - Parameter input: 5-D Tensor with shape `[batch, in_planes, in_rows, in_cols, depth]`. + /// + /// - Attrs: + /// - ksizes: The size of the sliding window for each dimension of `input`. + /// - strides: 1-D of length 5. How far the centers of two consecutive patches are in + /// `input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`. + /// - padding: The type of padding algorithm to use. + /// + /// We specify the size-related attributes as: + /// + /// ```python + /// ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1] + /// strides = [1, stride_planes, strides_rows, strides_cols, 1] + /// ``` + /// + /// - Output patches: 5-D Tensor with shape `[batch, out_planes, out_rows, out_cols, + /// ksize_planes * ksize_rows * ksize_cols * depth]` containing patches + /// with size `ksize_planes x ksize_rows x ksize_cols x depth` vectorized + /// in the "depth" dimension. Note `out_planes`, `out_rows` and `out_cols` + /// are the dimensions of the output patches. + @inlinable @inline(__always) + public static func extractVolumePatches( + _ input: Tensor, + ksizes: [Int32], + strides: [Int32], + padding: Padding + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.extractVolumePatches( + input, ksizes: ksizes, strides: strides, padding: padding), to: output_device) + case .TF_EAGER: + return _RawTFEager.extractVolumePatches( + input, ksizes: ksizes, strides: strides, padding: padding) + } + + } + + /// Fast Fourier transform. + /// + /// Computes the 1-dimensional discrete Fourier transform over the inner-most + /// dimension of `input`. + /// + /// - Parameter input: A complex tensor. + /// + /// - Output output: A complex tensor of the same shape as `input`. The inner-most + /// dimension of `input` is replaced with its 1D Fourier transform. + /// + /// @compatibility(numpy) + /// Equivalent to np.fft.fft + /// @end_compatibility + @inlinable @inline(__always) + public static func fFT( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.fFT(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.fFT(input) + } + + } + + /// 2D fast Fourier transform. + /// + /// Computes the 2-dimensional discrete Fourier transform over the inner-most + /// 2 dimensions of `input`. + /// + /// - Parameter input: A complex tensor. + /// + /// - Output output: A complex tensor of the same shape as `input`. The inner-most 2 + /// dimensions of `input` are replaced with their 2D Fourier transform. + /// + /// @compatibility(numpy) + /// Equivalent to np.fft.fft2 + /// @end_compatibility + @inlinable @inline(__always) + public static func fFT2D( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.fFT2D(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.fFT2D(input) + } + + } + + /// 3D fast Fourier transform. + /// + /// Computes the 3-dimensional discrete Fourier transform over the inner-most 3 + /// dimensions of `input`. + /// + /// - Parameter input: A complex tensor. + /// + /// - Output output: A complex tensor of the same shape as `input`. The inner-most 3 + /// dimensions of `input` are replaced with their 3D Fourier transform. + /// + /// @compatibility(numpy) + /// Equivalent to np.fft.fftn with 3 dimensions. + /// @end_compatibility + @inlinable @inline(__always) + public static func fFT3D( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.fFT3D(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.fFT3D(input) + } + + } + + /// A queue that produces elements in first-in first-out order. + /// + /// - Attrs: + /// - component_types: The type of each component in a value. + /// - shapes: The shape of each component in a value. The length of this attr must + /// be either 0 or the same as the length of component_types. If the length of + /// this attr is 0, the shapes of queue elements are not constrained, and + /// only one element may be dequeued at a time. + /// - capacity: The upper bound on the number of elements in this queue. + /// Negative numbers mean no limit. + /// - container: If non-empty, this queue is placed in the given container. + /// Otherwise, a default container is used. + /// - shared_name: If non-empty, this queue will be shared under the given name + /// across multiple sessions. + /// + /// - Output handle: The handle to the queue. + @inlinable @inline(__always) + public static func fIFOQueueV2( + componentTypes: [TensorDataType], + shapes: [TensorShape?], + capacity: Int64 = -1, + container: String, + sharedName: String + ) -> ResourceHandle { + _RawTFEager.fIFOQueueV2( + componentTypes: componentTypes, shapes: shapes, capacity: capacity, container: container, + sharedName: sharedName) + } + + /// Output a fact about factorials. + @inlinable @inline(__always) + public static func fact() -> StringTensor { + _RawTFEager.fact() + } + + /// This op is used as a placeholder in If branch functions. It doesn't provide a + /// valid output when run, so must either be removed (e.g. replaced with a + /// function input) or guaranteed not to be used (e.g. if mirroring an + /// intermediate output needed for the gradient computation of the other branch). + /// + /// - Attrs: + /// - dtype: The type of the output. + /// - shape: The purported shape of the output. This is only used for shape inference; + /// the output will not necessarily have this shape. Can be a partial shape. + /// + /// - Output output: \"Fake\" output value. This should not be consumed by another op. + @inlinable @inline(__always) + public static func fakeParam( + shape: TensorShape? + ) -> Tensor { + _RawTFEager.fakeParam(shape: shape) + } + + /// Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type. + /// + /// Attributes `[min; max]` define the clamping range for the `inputs` data. + /// `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]` + /// when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and + /// then de-quantized and output as floats in `[min; max]` interval. + /// `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. + /// + /// Before quantization, `min` and `max` values are adjusted with the following + /// logic. + /// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, + /// the behavior can be unexpected: + /// If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. + /// If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. + /// If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, + /// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`. + /// + /// Quantization is called fake since the output is still in floating point. + @inlinable @inline(__always) + public static func fakeQuantWithMinMaxArgs( + inputs: Tensor, + min: Double = -6, + max: Double = 6, + numBits: Int64 = 8, + narrowRange: Bool = false + ) -> Tensor { + switch inputs.handle.backend { + case .XLA: + let output_device = inputs.device + let inputs = Tensor(copying: inputs, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.fakeQuantWithMinMaxArgs( + inputs: inputs, min: min, max: max, numBits: numBits, narrowRange: narrowRange), + to: output_device) + case .TF_EAGER: + return _RawTFEager.fakeQuantWithMinMaxArgs( + inputs: inputs, min: min, max: max, numBits: numBits, narrowRange: narrowRange) + } + + } + + /// Compute gradients for a FakeQuantWithMinMaxArgs operation. + /// + /// - Parameters: + /// - gradients: Backpropagated gradients above the FakeQuantWithMinMaxArgs operation. + /// - inputs: Values passed as inputs to the FakeQuantWithMinMaxArgs operation. + /// + /// - Output backprops: Backpropagated gradients below the FakeQuantWithMinMaxArgs operation: + /// `gradients * (inputs >= min && inputs <= max)`. + @inlinable @inline(__always) + public static func fakeQuantWithMinMaxArgsGradient( + gradients: Tensor, + inputs: Tensor, + min: Double = -6, + max: Double = 6, + numBits: Int64 = 8, + narrowRange: Bool = false + ) -> Tensor { + switch commonBackend(gradients.handle.backend, inputs.handle.backend) { + case .XLA: + let output_device = inputs.device + let gradients = Tensor(copying: gradients, to: .defaultTFEager) + let inputs = Tensor(copying: inputs, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.fakeQuantWithMinMaxArgsGradient( + gradients: gradients, inputs: inputs, min: min, max: max, numBits: numBits, + narrowRange: narrowRange), to: output_device) + case .TF_EAGER: + return _RawTFEager.fakeQuantWithMinMaxArgsGradient( + gradients: gradients, inputs: inputs, min: min, max: max, numBits: numBits, + narrowRange: narrowRange) + } + + } + + /// Fake-quantize the 'inputs' tensor of type float via global float scalars `min` + /// + /// and `max` to 'outputs' tensor of same shape as `inputs`. + /// + /// `[min; max]` define the clamping range for the `inputs` data. + /// `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]` + /// when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and + /// then de-quantized and output as floats in `[min; max]` interval. + /// `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. + /// + /// Before quantization, `min` and `max` values are adjusted with the following + /// logic. + /// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, + /// the behavior can be unexpected: + /// If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. + /// If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. + /// If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, + /// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`. + /// + /// This operation has a gradient and thus allows for training `min` and `max` + /// values. + @inlinable @inline(__always) + public static func fakeQuantWithMinMaxVars( + inputs: Tensor, + min: Tensor, + max: Tensor, + numBits: Int64 = 8, + narrowRange: Bool = false + ) -> Tensor { + switch commonBackend( + commonBackend(inputs.handle.backend, min.handle.backend), max.handle.backend) + { + case .XLA: + let output_device = max.device + let inputs = Tensor(copying: inputs, to: .defaultTFEager) + let min = Tensor(copying: min, to: .defaultTFEager) + let max = Tensor(copying: max, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.fakeQuantWithMinMaxVars( + inputs: inputs, min: min, max: max, numBits: numBits, narrowRange: narrowRange), + to: output_device) + case .TF_EAGER: + return _RawTFEager.fakeQuantWithMinMaxVars( + inputs: inputs, min: min, max: max, numBits: numBits, narrowRange: narrowRange) + } + + } + + /// Compute gradients for a FakeQuantWithMinMaxVars operation. + /// + /// - Parameters: + /// - gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation. + /// - inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation. + /// min, max: Quantization interval, scalar floats. + /// + /// - Attrs: + /// - num_bits: The bitwidth of the quantization; between 2 and 8, inclusive. + /// - narrow_range: Whether to quantize into 2^num_bits - 1 distinct values. + /// + /// - Outputs: + /// - backprops_wrt_input: Backpropagated gradients w.r.t. inputs: + /// `gradients * (inputs >= min && inputs <= max)`. + /// - backprop_wrt_min: Backpropagated gradients w.r.t. min parameter: + /// `sum(gradients * (inputs < min))`. + /// - backprop_wrt_max: Backpropagated gradients w.r.t. max parameter: + /// `sum(gradients * (inputs > max))`. + @inlinable @inline(__always) + public static func fakeQuantWithMinMaxVarsGradient( + gradients: Tensor, + inputs: Tensor, + min: Tensor, + max: Tensor, + numBits: Int64 = 8, + narrowRange: Bool = false + ) -> ( + backpropsWrtInput: Tensor, backpropWrtMin: Tensor, backpropWrtMax: Tensor + ) { + _RawTFEager.fakeQuantWithMinMaxVarsGradient( + gradients: gradients, inputs: inputs, min: min, max: max, numBits: numBits, + narrowRange: narrowRange) + } + + /// Fake-quantize the 'inputs' tensor of type float and one of the shapes: `[d]`, + /// + /// `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` of shape `[d]` + /// to 'outputs' tensor of same shape as `inputs`. + /// + /// `[min; max]` define the clamping range for the `inputs` data. + /// `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]` + /// when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and + /// then de-quantized and output as floats in `[min; max]` interval. + /// `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. + /// + /// Before quantization, `min` and `max` values are adjusted with the following + /// logic. + /// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, + /// the behavior can be unexpected: + /// If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. + /// If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. + /// If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, + /// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`. + /// + /// This operation has a gradient and thus allows for training `min` and `max` + /// values. + @inlinable @inline(__always) + public static func fakeQuantWithMinMaxVarsPerChannel( + inputs: Tensor, + min: Tensor, + max: Tensor, + numBits: Int64 = 8, + narrowRange: Bool = false + ) -> Tensor { + switch commonBackend( + commonBackend(inputs.handle.backend, min.handle.backend), max.handle.backend) + { + case .XLA: + let output_device = max.device + let inputs = Tensor(copying: inputs, to: .defaultTFEager) + let min = Tensor(copying: min, to: .defaultTFEager) + let max = Tensor(copying: max, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.fakeQuantWithMinMaxVarsPerChannel( + inputs: inputs, min: min, max: max, numBits: numBits, narrowRange: narrowRange), + to: output_device) + case .TF_EAGER: + return _RawTFEager.fakeQuantWithMinMaxVarsPerChannel( + inputs: inputs, min: min, max: max, numBits: numBits, narrowRange: narrowRange) + } + + } + + /// Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation. + /// + /// - Parameters: + /// - gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation, + /// shape one of: `[d]`, `[b, d]`, `[b, h, w, d]`. + /// - inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape + /// same as `gradients`. + /// min, max: Quantization interval, floats of shape `[d]`. + /// + /// - Attrs: + /// - num_bits: The bitwidth of the quantization; between 2 and 16, inclusive. + /// - narrow_range: Whether to quantize into 2^num_bits - 1 distinct values. + /// + /// - Outputs: + /// - backprops_wrt_input: Backpropagated gradients w.r.t. inputs, shape same as + /// `inputs`: + /// `gradients * (inputs >= min && inputs <= max)`. + /// - backprop_wrt_min: Backpropagated gradients w.r.t. min parameter, shape `[d]`: + /// `sum_per_d(gradients * (inputs < min))`. + /// - backprop_wrt_max: Backpropagated gradients w.r.t. max parameter, shape `[d]`: + /// `sum_per_d(gradients * (inputs > max))`. + @inlinable @inline(__always) + public static func fakeQuantWithMinMaxVarsPerChannelGradient( + gradients: Tensor, + inputs: Tensor, + min: Tensor, + max: Tensor, + numBits: Int64 = 8, + narrowRange: Bool = false + ) -> ( + backpropsWrtInput: Tensor, backpropWrtMin: Tensor, backpropWrtMax: Tensor + ) { + _RawTFEager.fakeQuantWithMinMaxVarsPerChannelGradient( + gradients: gradients, inputs: inputs, min: min, max: max, numBits: numBits, + narrowRange: narrowRange) + } + + /// Creates a tensor filled with a scalar value. + /// + /// This operation creates a tensor of shape `dims` and fills it with `value`. + /// + /// For example: + /// + /// ``` + /// # Output tensor has shape [2, 3]. + /// fill([2, 3], 9) ==> [[9, 9, 9] + /// [9, 9, 9]] + /// ``` + /// + /// `tf.fill` differs from `tf.constant` in a few ways: + /// + /// * `tf.fill` only supports scalar contents, whereas `tf.constant` supports + /// Tensor values. + /// * `tf.fill` creates an Op in the computation graph that constructs the actual + /// Tensor value at runtime. This is in contrast to `tf.constant` which embeds + /// the entire Tensor into the graph with a `Const` node. + /// * Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes + /// based on other runtime Tensors, unlike `tf.constant`. + /// + /// - Parameters: + /// - dims: 1-D. Represents the shape of the output tensor. + /// - value: 0-D (scalar). Value to fill the returned tensor. + /// + /// @compatibility(numpy) + /// Equivalent to np.full + /// @end_compatibility + @inlinable @inline(__always) + public static func fill< + T: TensorFlowScalar, + IndexType: TensorFlowIndex + >( + dims: Tensor, + value: Tensor + ) -> Tensor { + switch commonBackend(dims.handle.backend, value.handle.backend) { + case .XLA: + return _RawXLA.fill(dims: dims, value: value) + case .TF_EAGER: + return _RawTFEager.fill(dims: dims, value: value) + } + + } + + /// Creates a dataset containing elements of first component of `input_dataset` having true in the last component. + @inlinable @inline(__always) + public static func filterByLastComponentDataset( + inputDataset: VariantHandle, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.filterByLastComponentDataset( + inputDataset: inputDataset, outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Creates a dataset containing elements of `input_dataset` matching `predicate`. + /// + /// The `predicate` function must return a scalar boolean and accept the + /// following arguments: + /// + /// * One tensor for each component of an element of `input_dataset`. + /// * One tensor for each value in `other_arguments`. + /// + /// - Parameter other_arguments: A list of tensors, typically values that were captured when + /// building a closure for `predicate`. + /// + /// - Attr predicate: A function returning a scalar boolean. + @inlinable @inline(__always) + public static func filterDataset< + PredicateIn: TensorGroup, + PredicateOut: TensorGroup, + Targuments: TensorArrayProtocol + >( + inputDataset: VariantHandle, + otherArguments: Targuments, + predicate: (PredicateIn) -> PredicateOut, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.filterDataset( + inputDataset: inputDataset, otherArguments: otherArguments, predicate: predicate, + outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Generates fingerprint values. + /// + /// Generates fingerprint values of `data`. + /// + /// Fingerprint op considers the first dimension of `data` as the batch dimension, + /// and `output[i]` contains the fingerprint value generated from contents in + /// `data[i, ...]` for all `i`. + /// + /// Fingerprint op writes fingerprint values as byte arrays. For example, the + /// default method `farmhash64` generates a 64-bit fingerprint value at a time. + /// This 8-byte value is written out as an `uint8` array of size 8, in little-endian + /// order. + /// + /// For example, suppose that `data` has data type `DT_INT32` and shape (2, 3, 4), + /// and that the fingerprint method is `farmhash64`. In this case, the output shape + /// is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the size of + /// each fingerprint value in bytes. `output[0, :]` is generated from 12 integers in + /// `data[0, :, :]` and similarly `output[1, :]` is generated from other 12 integers + /// in `data[1, :, :]`. + /// + /// Note that this op fingerprints the raw underlying buffer, and it does not + /// fingerprint Tensor's metadata such as data type and/or shape. For example, the + /// fingerprint values are invariant under reshapes and bitcasts as long as the + /// batch dimension remain the same: + /// + /// ``` + /// Fingerprint(data) == Fingerprint(Reshape(data, ...)) + /// Fingerprint(data) == Fingerprint(Bitcast(data, ...)) + /// ``` + /// + /// For string data, one should expect `Fingerprint(data) != + /// Fingerprint(ReduceJoin(data))` in general. + /// + /// - Parameters: + /// - data: Must have rank 1 or higher. + /// - method: Fingerprint method used by this op. Currently available method is + /// `farmhash::fingerprint64`. + /// + /// - Attr T: This can be a POD-type or string type. + /// + /// - Output fingerprint: A two-dimensional `Tensor` of type `tf.uint8`. The first dimension equals to + /// `data`'s first dimension, and the second dimension size depends on the + /// fingerprint algorithm. + @inlinable @inline(__always) + public static func fingerprint( + data: Tensor, + method: StringTensor + ) -> Tensor { + switch data.handle.backend { + case .XLA: + let output_device = data.device + let data = Tensor(copying: data, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.fingerprint(data: data, method: method), to: output_device) + case .TF_EAGER: + return _RawTFEager.fingerprint(data: data, method: method) + } + + } + + @inlinable @inline(__always) + public static func fiveFloatOutputs() -> ( + a: Tensor, b: Tensor, c: Tensor, d: Tensor, e: Tensor + ) { + _RawTFEager.fiveFloatOutputs() + } + + /// Creates a dataset that emits the records from one or more binary files. + /// + /// - Parameters: + /// - filenames: A scalar or a vector containing the name(s) of the file(s) to be + /// read. + /// - header_bytes: A scalar representing the number of bytes to skip at the + /// beginning of a file. + /// - record_bytes: A scalar representing the number of bytes in each record. + /// - footer_bytes: A scalar representing the number of bytes to skip at the end + /// of a file. + /// - buffer_size: A scalar representing the number of bytes to buffer. Must be > 0. + @inlinable @inline(__always) + public static func fixedLengthRecordDataset( + filenames: StringTensor, + headerBytes: Tensor, + recordBytes: Tensor, + footerBytes: Tensor, + bufferSize: Tensor + ) -> VariantHandle { + _RawTFEager.fixedLengthRecordDataset( + filenames: filenames, headerBytes: headerBytes, recordBytes: recordBytes, + footerBytes: footerBytes, bufferSize: bufferSize) + } + + @inlinable @inline(__always) + public static func fixedLengthRecordDatasetV2( + filenames: StringTensor, + headerBytes: Tensor, + recordBytes: Tensor, + footerBytes: Tensor, + bufferSize: Tensor, + compressionType: StringTensor + ) -> VariantHandle { + _RawTFEager.fixedLengthRecordDatasetV2( + filenames: filenames, headerBytes: headerBytes, recordBytes: recordBytes, + footerBytes: footerBytes, bufferSize: bufferSize, compressionType: compressionType) + } + + /// A Reader that outputs fixed-length records from a file. + /// + /// - Attrs: + /// - header_bytes: Number of bytes in the header, defaults to 0. + /// - record_bytes: Number of bytes in the record. + /// - footer_bytes: Number of bytes in the footer, defaults to 0. + /// - hop_bytes: Number of bytes to hop before each read. Default of 0 means using + /// record_bytes. + /// - container: If non-empty, this reader is placed in the given container. + /// Otherwise, a default container is used. + /// - shared_name: If non-empty, this reader is named in the given bucket + /// with this shared_name. Otherwise, the node name is used instead. + /// - encoding: The type of encoding for the file. Currently ZLIB and GZIP + /// are supported. Defaults to none. + /// + /// - Output reader_handle: The handle to reference the Reader. + @inlinable @inline(__always) + public static func fixedLengthRecordReaderV2( + headerBytes: Int64 = 0, + recordBytes: Int64, + footerBytes: Int64 = 0, + hopBytes: Int64 = 0, + container: String, + sharedName: String, + encoding: String + ) -> ResourceHandle { + _RawTFEager.fixedLengthRecordReaderV2( + headerBytes: headerBytes, recordBytes: recordBytes, footerBytes: footerBytes, + hopBytes: hopBytes, container: container, sharedName: sharedName, encoding: encoding) + } + + /// Generates labels for candidate sampling with a learned unigram distribution. + /// + /// A unigram sampler could use a fixed unigram distribution read from a + /// file or passed in as an in-memory array instead of building up the distribution + /// from data on the fly. There is also an option to skew the distribution by + /// applying a distortion power to the weights. + /// + /// The vocabulary file should be in CSV-like format, with the last field + /// being the weight associated with the word. + /// + /// For each batch, this op picks a single set of sampled candidate labels. + /// + /// The advantages of sampling candidates per-batch are simplicity and the + /// possibility of efficient dense matrix multiplication. The disadvantage is that + /// the sampled candidates must be chosen independently of the context and of the + /// true labels. + /// + /// - Parameter true_classes: A batch_size * num_true matrix, in which each row contains the + /// IDs of the num_true target_classes in the corresponding original label. + /// + /// - Attrs: + /// - num_true: Number of true labels per context. + /// - num_sampled: Number of candidates to randomly sample. + /// - unique: If unique is true, we sample with rejection, so that all sampled + /// candidates in a batch are unique. This requires some approximation to + /// estimate the post-rejection sampling probabilities. + /// - range_max: The sampler will sample integers from the interval [0, range_max). + /// - vocab_file: Each valid line in this file (which should have a CSV-like format) + /// corresponds to a valid word ID. IDs are in sequential order, starting from + /// num_reserved_ids. The last entry in each line is expected to be a value + /// corresponding to the count or relative probability. Exactly one of vocab_file + /// and unigrams needs to be passed to this op. + /// - distortion: The distortion is used to skew the unigram probability distribution. + /// Each weight is first raised to the distortion's power before adding to the + /// internal unigram distribution. As a result, distortion = 1.0 gives regular + /// unigram sampling (as defined by the vocab file), and distortion = 0.0 gives + /// a uniform distribution. + /// - num_reserved_ids: Optionally some reserved IDs can be added in the range [0, + /// ..., num_reserved_ids) by the users. One use case is that a special unknown + /// word token is used as ID 0. These IDs will have a sampling probability of 0. + /// - num_shards: A sampler can be used to sample from a subset of the original range + /// in order to speed up the whole computation through parallelism. This parameter + /// (together with 'shard') indicates the number of partitions that are being + /// used in the overall computation. + /// - shard: A sampler can be used to sample from a subset of the original range + /// in order to speed up the whole computation through parallelism. This parameter + /// (together with 'num_shards') indicates the particular partition number of a + /// sampler op, when partitioning is being used. + /// - unigrams: A list of unigram counts or probabilities, one per ID in sequential + /// order. Exactly one of vocab_file and unigrams should be passed to this op. + /// - seed: If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// - seed2: An second seed to avoid seed collision. + /// + /// - Outputs: + /// - sampled_candidates: A vector of length num_sampled, in which each element is + /// the ID of a sampled candidate. + /// - true_expected_count: A batch_size * num_true matrix, representing + /// the number of times each candidate is expected to occur in a batch + /// of sampled candidates. If unique=true, then this is a probability. + /// - sampled_expected_count: A vector of length num_sampled, for each sampled + /// candidate representing the number of times the candidate is expected + /// to occur in a batch of sampled candidates. If unique=true, then this is a + /// probability. + @inlinable @inline(__always) + public static func fixedUnigramCandidateSampler( + trueClasses: Tensor, + numTrue: Int64, + numSampled: Int64, + unique: Bool, + rangeMax: Int64, + vocabFile: String, + distortion: Double = 1, + numReservedIds: Int64 = 0, + numShards: Int64 = 1, + shard: Int64 = 0, + unigrams: [Double], + seed: Int64 = 0, + seed2: Int64 = 0 + ) -> ( + sampledCandidates: Tensor, trueExpectedCount: Tensor, + sampledExpectedCount: Tensor + ) { + _RawTFEager.fixedUnigramCandidateSampler( + trueClasses: trueClasses, numTrue: numTrue, numSampled: numSampled, unique: unique, + rangeMax: rangeMax, vocabFile: vocabFile, distortion: distortion, + numReservedIds: numReservedIds, numShards: numShards, shard: shard, unigrams: unigrams, + seed: seed, seed2: seed2) + } + + /// Creates a dataset that applies `f` to the outputs of `input_dataset`. + /// + /// Unlike MapDataset, the `f` in FlatMapDataset is expected to return a + /// Dataset variant, and FlatMapDataset will flatten successive results + /// into a single Dataset. + /// + /// - Attr f: A function mapping elements of `input_dataset`, concatenated with + /// `other_arguments`, to a Dataset variant that contains elements matching + /// `output_types` and `output_shapes`. + @inlinable @inline(__always) + public static func flatMapDataset< + FIn: TensorGroup, + FOut: TensorGroup, + Targuments: TensorArrayProtocol + >( + inputDataset: VariantHandle, + otherArguments: Targuments, + f: (FIn) -> FOut, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.flatMapDataset( + inputDataset: inputDataset, otherArguments: otherArguments, f: f, outputTypes: outputTypes, + outputShapes: outputShapes) + } + + @inlinable @inline(__always) + public static func floatInput( + _ a: Tensor + ) { + _RawTFEager.floatInput(a) + } + + @inlinable @inline(__always) + public static func floatOutput() -> Tensor { + _RawTFEager.floatOutput() + } + + @inlinable @inline(__always) + public static func floatOutputStringOutput() -> (a: Tensor, b: StringTensor) { + _RawTFEager.floatOutputStringOutput() + } + + /// Returns element-wise largest integer not greater than x. + @inlinable @inline(__always) + public static func floor( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.floor(x) + case .TF_EAGER: + return _RawTFEager.floor(x) + } + + } + + /// Returns x // y element-wise. + /// + /// *NOTE*: `FloorDiv` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + @inlinable @inline(__always) + public static func floorDiv( + _ x: Tensor, + _ y: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + let output_device = y.device + let x = Tensor(copying: x, to: .defaultTFEager) + let y = Tensor(copying: y, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.floorDiv(x, y), to: output_device) + case .TF_EAGER: + return _RawTFEager.floorDiv(x, y) + } + + } + + /// Returns element-wise remainder of division. When `x < 0` xor `y < 0` is + /// + /// true, this follows Python semantics in that the result here is consistent + /// with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`. + /// + /// *NOTE*: `FloorMod` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + @inlinable @inline(__always) + public static func floorMod( + _ x: Tensor, + _ y: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + let output_device = y.device + let x = Tensor(copying: x, to: .defaultTFEager) + let y = Tensor(copying: y, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.floorMod(x, y), to: output_device) + case .TF_EAGER: + return _RawTFEager.floorMod(x, y) + } + + } + + @inlinable @inline(__always) + public static func flushSummaryWriter( + writer: ResourceHandle + ) { + _RawTFEager.flushSummaryWriter(writer: writer) + } + + @inlinable @inline(__always) + public static func foo1( + _ a: Tensor, + _ b: Tensor, + c: Tensor + ) -> (d: Tensor, e: Tensor) { + _RawTFEager.foo1(a, b, c: c) + } + + @inlinable @inline(__always) + public static func foo2( + _ a: Tensor, + _ b: StringTensor, + c: StringTensor + ) -> (d: Tensor, e: Tensor) { + _RawTFEager.foo2(a, b, c: c) + } + + @inlinable @inline(__always) + public static func foo3( + _ a: Tensor, + _ b: StringTensor, + c: Tensor + ) -> (d: Tensor, e: Tensor) { + _RawTFEager.foo3(a, b, c: c) + } + + /// ```python + /// output = input; + /// for i in range(start, limit, delta) + /// output = body(i, output); + /// ``` + /// + /// - Parameters: + /// - start: The lower bound. An int32 + /// - limit: The upper bound. An int32 + /// - delta: The increment. An int32 + /// - input: A list of input tensors whose types are T. + /// + /// - Attrs: + /// - T: A list of dtypes. + /// - body: A function that takes a list of tensors (int32, T) and returns another + /// list of tensors (T). + /// + /// - Output output: A list of output tensors whose types are T. + @inlinable @inline(__always) + public static func for_< + T: TensorArrayProtocol, + BodyIn: TensorGroup, + BodyOut: TensorGroup + >( + start: Tensor, + limit: Tensor, + delta: Tensor, + _ input: T, + body: (BodyIn) -> BodyOut + ) -> T { + _RawTFEager.for_(start: start, limit: limit, delta: delta, input, body: body) + } + + /// Performs fractional average pooling on the input. + /// + /// Fractional average pooling is similar to Fractional max pooling in the pooling + /// region generation step. The only difference is that after pooling regions are + /// generated, a mean operation is performed instead of a max operation in each + /// pooling region. + /// + /// - Parameter value: 4-D with shape `[batch, height, width, channels]`. + /// + /// - Attrs: + /// - pooling_ratio: Pooling ratio for each dimension of `value`, currently only + /// supports row and col dimension and should be >= 1.0. For example, a valid + /// pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements + /// must be 1.0 because we don't allow pooling on batch and channels + /// dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions + /// respectively. + /// - pseudo_random: When set to True, generates the pooling sequence in a + /// pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin + /// Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for + /// difference between pseudorandom and random. + /// - overlapping: When set to True, it means when pooling, the values at the boundary + /// of adjacent pooling cells are used by both cells. For example: + /// + /// `index 0 1 2 3 4` + /// + /// `value 20 5 16 3 7` + /// + /// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. + /// The result would be [41/3, 26/3] for fractional avg pooling. + /// - deterministic: When set to True, a fixed pooling region will be used when + /// iterating over a FractionalAvgPool node in the computation graph. Mainly used + /// in unit test to make FractionalAvgPool deterministic. + /// - seed: If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// - seed2: An second seed to avoid seed collision. + /// + /// - Outputs: + /// - output: output tensor after fractional avg pooling. + /// - row_pooling_sequence: row pooling sequence, needed to calculate gradient. + /// - col_pooling_sequence: column pooling sequence, needed to calculate gradient. + @inlinable @inline(__always) + public static func fractionalAvgPool( + value: Tensor, + poolingRatio: [Double], + pseudoRandom: Bool = false, + overlapping: Bool = false, + deterministic: Bool = false, + seed: Int64 = 0, + seed2: Int64 = 0 + ) -> (output: Tensor, rowPoolingSequence: Tensor, colPoolingSequence: Tensor) { + _RawTFEager.fractionalAvgPool( + value: value, poolingRatio: poolingRatio, pseudoRandom: pseudoRandom, + overlapping: overlapping, deterministic: deterministic, seed: seed, seed2: seed2) + } + + /// Computes gradient of the FractionalAvgPool function. + /// + /// Unlike FractionalMaxPoolGrad, we don't need to find arg_max for + /// FractionalAvgPoolGrad, we just need to evenly back-propagate each element of + /// out_backprop to those indices that form the same pooling cell. Therefore, we + /// just need to know the shape of original input tensor, instead of the whole + /// tensor. + /// + /// - Parameters: + /// - orig_input_tensor_shape: Original input tensor shape for `fractional_avg_pool` + /// - out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients + /// w.r.t. the output of `fractional_avg_pool`. + /// - row_pooling_sequence: row pooling sequence, form pooling region with + /// col_pooling_sequence. + /// - col_pooling_sequence: column pooling sequence, form pooling region with + /// row_pooling sequence. + /// + /// - Attr overlapping: When set to True, it means when pooling, the values at the boundary + /// of adjacent pooling cells are used by both cells. For example: + /// + /// `index 0 1 2 3 4` + /// + /// `value 20 5 16 3 7` + /// + /// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. + /// The result would be [41/3, 26/3] for fractional avg pooling. + /// + /// - Output output: 4-D. Gradients w.r.t. the input of `fractional_avg_pool`. + @inlinable @inline(__always) + public static func fractionalAvgPoolGrad( + origInputTensorShape: Tensor, + outBackprop: Tensor, + rowPoolingSequence: Tensor, + colPoolingSequence: Tensor, + overlapping: Bool = false + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(origInputTensorShape.handle.backend, outBackprop.handle.backend), + rowPoolingSequence.handle.backend), colPoolingSequence.handle.backend) + { + case .XLA: + let output_device = colPoolingSequence.device + let origInputTensorShape = Tensor(copying: origInputTensorShape, to: .defaultTFEager) + let outBackprop = Tensor(copying: outBackprop, to: .defaultTFEager) + let rowPoolingSequence = Tensor(copying: rowPoolingSequence, to: .defaultTFEager) + let colPoolingSequence = Tensor(copying: colPoolingSequence, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.fractionalAvgPoolGrad( + origInputTensorShape: origInputTensorShape, outBackprop: outBackprop, + rowPoolingSequence: rowPoolingSequence, colPoolingSequence: colPoolingSequence, + overlapping: overlapping), to: output_device) + case .TF_EAGER: + return _RawTFEager.fractionalAvgPoolGrad( + origInputTensorShape: origInputTensorShape, outBackprop: outBackprop, + rowPoolingSequence: rowPoolingSequence, colPoolingSequence: colPoolingSequence, + overlapping: overlapping) + } + + } + + /// Performs fractional max pooling on the input. + /// + /// Fractional max pooling is slightly different than regular max pooling. In + /// regular max pooling, you downsize an input set by taking the maximum value of + /// smaller N x N subsections of the set (often 2x2), and try to reduce the set by + /// a factor of N, where N is an integer. Fractional max pooling, as you might + /// expect from the word "fractional", means that the overall reduction ratio N + /// does not have to be an integer. + /// + /// The sizes of the pooling regions are generated randomly but are fairly uniform. + /// For example, let's look at the height dimension, and the constraints on the + /// list of rows that will be pool boundaries. + /// + /// First we define the following: + /// + /// 1. input_row_length : the number of rows from the input set + /// 2. output_row_length : which will be smaller than the input + /// 3. alpha = input_row_length / output_row_length : our reduction ratio + /// 4. K = floor(alpha) + /// 5. row_pooling_sequence : this is the result list of pool boundary rows + /// + /// Then, row_pooling_sequence should satisfy: + /// + /// 1. a[0] = 0 : the first value of the sequence is 0 + /// 2. a[end] = input_row_length : the last value of the sequence is the size + /// 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size + /// 4. length(row_pooling_sequence) = output_row_length+1 + /// + /// For more details on fractional max pooling, see this paper: + /// [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) + /// + /// - Parameter value: 4-D with shape `[batch, height, width, channels]`. + /// + /// - Attrs: + /// - pooling_ratio: Pooling ratio for each dimension of `value`, currently only + /// supports row and col dimension and should be >= 1.0. For example, a valid + /// pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements + /// must be 1.0 because we don't allow pooling on batch and channels + /// dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions + /// respectively. + /// - pseudo_random: When set to True, generates the pooling sequence in a + /// pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin + /// Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for + /// difference between pseudorandom and random. + /// - overlapping: When set to True, it means when pooling, the values at the boundary + /// of adjacent pooling cells are used by both cells. For example: + /// + /// `index 0 1 2 3 4` + /// + /// `value 20 5 16 3 7` + /// + /// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. + /// The result would be [20, 16] for fractional max pooling. + /// - deterministic: When set to True, a fixed pooling region will be used when + /// iterating over a FractionalMaxPool node in the computation graph. Mainly used + /// in unit test to make FractionalMaxPool deterministic. + /// - seed: If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// - seed2: An second seed to avoid seed collision. + /// + /// - Outputs: + /// - output: output tensor after fractional max pooling. + /// - row_pooling_sequence: row pooling sequence, needed to calculate gradient. + /// - col_pooling_sequence: column pooling sequence, needed to calculate gradient. + @inlinable @inline(__always) + public static func fractionalMaxPool( + value: Tensor, + poolingRatio: [Double], + pseudoRandom: Bool = false, + overlapping: Bool = false, + deterministic: Bool = false, + seed: Int64 = 0, + seed2: Int64 = 0 + ) -> (output: Tensor, rowPoolingSequence: Tensor, colPoolingSequence: Tensor) { + _RawTFEager.fractionalMaxPool( + value: value, poolingRatio: poolingRatio, pseudoRandom: pseudoRandom, + overlapping: overlapping, deterministic: deterministic, seed: seed, seed2: seed2) + } + + /// Computes gradient of the FractionalMaxPool function. + /// + /// - Parameters: + /// - orig_input: Original input for `fractional_max_pool` + /// - orig_output: Original output for `fractional_max_pool` + /// - out_backprop: 4-D with shape `[batch, height, width, channels]`. Gradients + /// w.r.t. the output of `fractional_max_pool`. + /// - row_pooling_sequence: row pooling sequence, form pooling region with + /// col_pooling_sequence. + /// - col_pooling_sequence: column pooling sequence, form pooling region with + /// row_pooling sequence. + /// + /// - Attr overlapping: When set to True, it means when pooling, the values at the boundary + /// of adjacent pooling cells are used by both cells. For example: + /// + /// `index 0 1 2 3 4` + /// + /// `value 20 5 16 3 7` + /// + /// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. + /// The result would be [20, 16] for fractional max pooling. + /// + /// - Output output: 4-D. Gradients w.r.t. the input of `fractional_max_pool`. + @inlinable @inline(__always) + public static func fractionalMaxPoolGrad( + origInput: Tensor, + origOutput: Tensor, + outBackprop: Tensor, + rowPoolingSequence: Tensor, + colPoolingSequence: Tensor, + overlapping: Bool = false + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend( + commonBackend(origInput.handle.backend, origOutput.handle.backend), + outBackprop.handle.backend), rowPoolingSequence.handle.backend), + colPoolingSequence.handle.backend) + { + case .XLA: + let output_device = colPoolingSequence.device + let origInput = Tensor(copying: origInput, to: .defaultTFEager) + let origOutput = Tensor(copying: origOutput, to: .defaultTFEager) + let outBackprop = Tensor(copying: outBackprop, to: .defaultTFEager) + let rowPoolingSequence = Tensor(copying: rowPoolingSequence, to: .defaultTFEager) + let colPoolingSequence = Tensor(copying: colPoolingSequence, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.fractionalMaxPoolGrad( + origInput: origInput, origOutput: origOutput, outBackprop: outBackprop, + rowPoolingSequence: rowPoolingSequence, colPoolingSequence: colPoolingSequence, + overlapping: overlapping), to: output_device) + case .TF_EAGER: + return _RawTFEager.fractionalMaxPoolGrad( + origInput: origInput, origOutput: origOutput, outBackprop: outBackprop, + rowPoolingSequence: rowPoolingSequence, colPoolingSequence: colPoolingSequence, + overlapping: overlapping) + } + + } + + @inlinable @inline(__always) + public static func funcAttr< + FIn: TensorGroup, + FOut: TensorGroup + >( + f: (FIn) -> FOut + ) { + _RawTFEager.funcAttr(f: f) + } + + /// Batch normalization. + /// + /// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + /// The size of 1D Tensors matches the dimension C of the 4D Tensors. + /// + /// - Parameters: + /// - x: A 4D Tensor for input data. + /// - scale: A 1D Tensor for scaling factor, to scale the normalized x. + /// - offset: A 1D Tensor for offset, to shift to the normalized x. + /// - mean: A 1D Tensor for population mean. Used for inference only; + /// must be empty for training. + /// - variance: A 1D Tensor for population variance. Used for inference only; + /// must be empty for training. + /// + /// - Attrs: + /// - T: The data type for the elements of input and output Tensors. + /// - epsilon: A small float number added to the variance of x. + /// - data_format: The data format for x and y. Either "NHWC" (default) or "NCHW". + /// - is_training: A bool value to indicate the operation is for training (default) + /// or inference. + /// + /// - Outputs: + /// - y: A 4D Tensor for output data. + /// - batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow + /// to compute the running mean. + /// - batch_variance: A 1D Tensor for the computed batch variance, to be used by + /// TensorFlow to compute the running variance. + /// - reserve_space_1: A 1D Tensor for the computed batch mean, to be reused + /// in the gradient computation. + /// - reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance + /// in the cuDNN case), to be reused in the gradient computation. + @inlinable @inline(__always) + public static func fusedBatchNorm( + _ x: Tensor, + scale: Tensor, + offset: Tensor, + mean: Tensor, + variance: Tensor, + epsilon: Double = 0.0001, + dataFormat: DataFormat = .nhwc, + isTraining: Bool = true + ) -> ( + y: Tensor, batchMean: Tensor, batchVariance: Tensor, reserveSpace1: Tensor, + reserveSpace2: Tensor + ) { + _RawTFEager.fusedBatchNorm( + x, scale: scale, offset: offset, mean: mean, variance: variance, epsilon: epsilon, + dataFormat: dataFormat, isTraining: isTraining) + } + + /// Gradient for batch normalization. + /// + /// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + /// The size of 1D Tensors matches the dimension C of the 4D Tensors. + /// + /// - Parameters: + /// - y_backprop: A 4D Tensor for the gradient with respect to y. + /// - x: A 4D Tensor for input data. + /// - scale: A 1D Tensor for scaling factor, to scale the normalized x. + /// - reserve_space_1: When is_training is True, a 1D Tensor for the computed batch + /// mean to be reused in gradient computation. When is_training is + /// False, a 1D Tensor for the population mean to be reused in both + /// 1st and 2nd order gradient computation. + /// - reserve_space_2: When is_training is True, a 1D Tensor for the computed batch + /// variance (inverted variance in the cuDNN case) to be reused in + /// gradient computation. When is_training is False, a 1D Tensor + /// for the population variance to be reused in both 1st and 2nd + /// order gradient computation. + /// + /// - Attrs: + /// - T: The data type for the elements of input and output Tensors. + /// - epsilon: A small float number added to the variance of x. + /// - data_format: The data format for y_backprop, x, x_backprop. + /// Either "NHWC" (default) or "NCHW". + /// - is_training: A bool value to indicate the operation is for training (default) + /// or inference. + /// + /// - Outputs: + /// - x_backprop: A 4D Tensor for the gradient with respect to x. + /// - scale_backprop: A 1D Tensor for the gradient with respect to scale. + /// - offset_backprop: A 1D Tensor for the gradient with respect to offset. + /// - reserve_space_3: Unused placeholder to match the mean input in FusedBatchNorm. + /// - reserve_space_4: Unused placeholder to match the variance input + /// in FusedBatchNorm. + @inlinable @inline(__always) + public static func fusedBatchNormGrad( + yBackprop: Tensor, + _ x: Tensor, + scale: Tensor, + reserveSpace1: Tensor, + reserveSpace2: Tensor, + epsilon: Double = 0.0001, + dataFormat: DataFormat = .nhwc, + isTraining: Bool = true + ) -> ( + xBackprop: Tensor, scaleBackprop: Tensor, offsetBackprop: Tensor, + reserveSpace3: Tensor, reserveSpace4: Tensor + ) { + _RawTFEager.fusedBatchNormGrad( + yBackprop: yBackprop, x, scale: scale, reserveSpace1: reserveSpace1, + reserveSpace2: reserveSpace2, epsilon: epsilon, dataFormat: dataFormat, + isTraining: isTraining) + } + + /// Gradient for batch normalization. + /// + /// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + /// The size of 1D Tensors matches the dimension C of the 4D Tensors. + /// + /// - Parameters: + /// - y_backprop: A 4D Tensor for the gradient with respect to y. + /// - x: A 4D Tensor for input data. + /// - scale: A 1D Tensor for scaling factor, to scale the normalized x. + /// - reserve_space_1: When is_training is True, a 1D Tensor for the computed batch + /// mean to be reused in gradient computation. When is_training is + /// False, a 1D Tensor for the population mean to be reused in both + /// 1st and 2nd order gradient computation. + /// - reserve_space_2: When is_training is True, a 1D Tensor for the computed batch + /// variance (inverted variance in the cuDNN case) to be reused in + /// gradient computation. When is_training is False, a 1D Tensor + /// for the population variance to be reused in both 1st and 2nd + /// order gradient computation. + /// + /// - Attrs: + /// - T: The data type for the elements of input and output Tensors. + /// - U: The data type for the scale, offset, mean, and variance. + /// - epsilon: A small float number added to the variance of x. + /// - data_format: The data format for y_backprop, x, x_backprop. + /// Either "NHWC" (default) or "NCHW". + /// - is_training: A bool value to indicate the operation is for training (default) + /// or inference. + /// + /// - Outputs: + /// - x_backprop: A 4D Tensor for the gradient with respect to x. + /// - scale_backprop: A 1D Tensor for the gradient with respect to scale. + /// - offset_backprop: A 1D Tensor for the gradient with respect to offset. + /// - reserve_space_3: Unused placeholder to match the mean input in FusedBatchNorm. + /// - reserve_space_4: Unused placeholder to match the variance input + /// in FusedBatchNorm. + @inlinable @inline(__always) + public static func fusedBatchNormGradV2< + T: FloatingPoint & TensorFlowScalar, + U: FloatingPoint & TensorFlowScalar + >( + yBackprop: Tensor, + _ x: Tensor, + scale: Tensor, + reserveSpace1: Tensor, + reserveSpace2: Tensor, + epsilon: Double = 0.0001, + dataFormat: DataFormat = .nhwc, + isTraining: Bool = true + ) -> ( + xBackprop: Tensor, scaleBackprop: Tensor, offsetBackprop: Tensor, + reserveSpace3: Tensor, reserveSpace4: Tensor + ) { + _RawTFEager.fusedBatchNormGradV2( + yBackprop: yBackprop, x, scale: scale, reserveSpace1: reserveSpace1, + reserveSpace2: reserveSpace2, epsilon: epsilon, dataFormat: dataFormat, + isTraining: isTraining) + } + + /// Gradient for batch normalization. + /// + /// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + /// The size of 1D Tensors matches the dimension C of the 4D Tensors. + /// + /// - Parameters: + /// - y_backprop: A 4D Tensor for the gradient with respect to y. + /// - x: A 4D Tensor for input data. + /// - scale: A 1D Tensor for scaling factor, to scale the normalized x. + /// - reserve_space_1: When is_training is True, a 1D Tensor for the computed batch + /// mean to be reused in gradient computation. When is_training is + /// False, a 1D Tensor for the population mean to be reused in both + /// 1st and 2nd order gradient computation. + /// - reserve_space_2: When is_training is True, a 1D Tensor for the computed batch + /// variance (inverted variance in the cuDNN case) to be reused in + /// gradient computation. When is_training is False, a 1D Tensor + /// for the population variance to be reused in both 1st and 2nd + /// order gradient computation. + /// - reserve_space_3: When is_training is True, a 1D Tensor for some intermediate results to be reused + /// in gradient computation. When is_training is False, a dummy empty Tensor will be + /// created. + /// + /// - Attrs: + /// - T: The data type for the elements of input and output Tensors. + /// - U: The data type for the scale, offset, mean, and variance. + /// - epsilon: A small float number added to the variance of x. + /// - data_format: The data format for y_backprop, x, x_backprop. + /// Either "NHWC" (default) or "NCHW". + /// - is_training: A bool value to indicate the operation is for training (default) + /// or inference. + /// + /// - Outputs: + /// - x_backprop: A 4D Tensor for the gradient with respect to x. + /// - scale_backprop: A 1D Tensor for the gradient with respect to scale. + /// - offset_backprop: A 1D Tensor for the gradient with respect to offset. + /// - reserve_space_4: Unused placeholder to match the mean input in FusedBatchNorm. + /// - reserve_space_5: Unused placeholder to match the variance input + /// in FusedBatchNorm. + @inlinable @inline(__always) + public static func fusedBatchNormGradV3< + T: FloatingPoint & TensorFlowScalar, + U: FloatingPoint & TensorFlowScalar + >( + yBackprop: Tensor, + _ x: Tensor, + scale: Tensor, + reserveSpace1: Tensor, + reserveSpace2: Tensor, + reserveSpace3: Tensor, + epsilon: Double = 0.0001, + dataFormat: DataFormat = .nhwc, + isTraining: Bool = true + ) -> ( + xBackprop: Tensor, scaleBackprop: Tensor, offsetBackprop: Tensor, + reserveSpace4: Tensor, reserveSpace5: Tensor + ) { + _RawTFEager.fusedBatchNormGradV3( + yBackprop: yBackprop, x, scale: scale, reserveSpace1: reserveSpace1, + reserveSpace2: reserveSpace2, reserveSpace3: reserveSpace3, epsilon: epsilon, + dataFormat: dataFormat, isTraining: isTraining) + } + + /// Batch normalization. + /// + /// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + /// The size of 1D Tensors matches the dimension C of the 4D Tensors. + /// + /// - Parameters: + /// - x: A 4D Tensor for input data. + /// - scale: A 1D Tensor for scaling factor, to scale the normalized x. + /// - offset: A 1D Tensor for offset, to shift to the normalized x. + /// - mean: A 1D Tensor for population mean. Used for inference only; + /// must be empty for training. + /// - variance: A 1D Tensor for population variance. Used for inference only; + /// must be empty for training. + /// + /// - Attrs: + /// - T: The data type for the elements of input and output Tensors. + /// - U: The data type for the scale, offset, mean, and variance. + /// - epsilon: A small float number added to the variance of x. + /// - data_format: The data format for x and y. Either "NHWC" (default) or "NCHW". + /// - is_training: A bool value to indicate the operation is for training (default) + /// or inference. + /// + /// - Outputs: + /// - y: A 4D Tensor for output data. + /// - batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow + /// to compute the running mean. + /// - batch_variance: A 1D Tensor for the computed batch variance, to be used by + /// TensorFlow to compute the running variance. + /// - reserve_space_1: A 1D Tensor for the computed batch mean, to be reused + /// in the gradient computation. + /// - reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance + /// in the cuDNN case), to be reused in the gradient computation. + @inlinable @inline(__always) + public static func fusedBatchNormV2< + T: FloatingPoint & TensorFlowScalar, + U: FloatingPoint & TensorFlowScalar + >( + _ x: Tensor, + scale: Tensor, + offset: Tensor, + mean: Tensor, + variance: Tensor, + epsilon: Double = 0.0001, + dataFormat: DataFormat = .nhwc, + isTraining: Bool = true + ) -> ( + y: Tensor, batchMean: Tensor, batchVariance: Tensor, reserveSpace1: Tensor, + reserveSpace2: Tensor + ) { + _RawTFEager.fusedBatchNormV2( + x, scale: scale, offset: offset, mean: mean, variance: variance, epsilon: epsilon, + dataFormat: dataFormat, isTraining: isTraining) + } + + /// Batch normalization. + /// + /// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + /// The size of 1D Tensors matches the dimension C of the 4D Tensors. + /// + /// - Parameters: + /// - x: A 4D Tensor for input data. + /// - scale: A 1D Tensor for scaling factor, to scale the normalized x. + /// - offset: A 1D Tensor for offset, to shift to the normalized x. + /// - mean: A 1D Tensor for population mean. Used for inference only; + /// must be empty for training. + /// - variance: A 1D Tensor for population variance. Used for inference only; + /// must be empty for training. + /// + /// - Attrs: + /// - T: The data type for the elements of input and output Tensors. + /// - U: The data type for the scale, offset, mean, and variance. + /// - epsilon: A small float number added to the variance of x. + /// - data_format: The data format for x and y. Either "NHWC" (default) or "NCHW". + /// - is_training: A bool value to indicate the operation is for training (default) + /// or inference. + /// + /// - Outputs: + /// - y: A 4D Tensor for output data. + /// - batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow + /// to compute the running mean. + /// - batch_variance: A 1D Tensor for the computed batch variance, to be used by + /// TensorFlow to compute the running variance. + /// - reserve_space_1: A 1D Tensor for the computed batch mean, to be reused + /// in the gradient computation. + /// - reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance + /// in the cuDNN case), to be reused in the gradient computation. + /// - reserve_space_3: A 1D Tensor for some intermediate results, to be reused in the gradient + /// computation for better efficiency. + @inlinable @inline(__always) + public static func fusedBatchNormV3< + T: FloatingPoint & TensorFlowScalar, + U: FloatingPoint & TensorFlowScalar + >( + _ x: Tensor, + scale: Tensor, + offset: Tensor, + mean: Tensor, + variance: Tensor, + epsilon: Double = 0.0001, + dataFormat: DataFormat = .nhwc, + isTraining: Bool = true + ) -> ( + y: Tensor, batchMean: Tensor, batchVariance: Tensor, reserveSpace1: Tensor, + reserveSpace2: Tensor, reserveSpace3: Tensor + ) { + _RawTFEager.fusedBatchNormV3( + x, scale: scale, offset: offset, mean: mean, variance: variance, epsilon: epsilon, + dataFormat: dataFormat, isTraining: isTraining) + } + + /// Performs a padding as a preprocess during a convolution. + /// + /// Similar to FusedResizeAndPadConv2d, this op allows for an optimized + /// implementation where the spatial padding transformation stage is fused with the + /// im2col lookup, but in this case without the bilinear filtering required for + /// resizing. Fusing the padding prevents the need to write out the intermediate + /// results as whole tensors, reducing memory pressure, and we can get some latency + /// gains by merging the transformation calculations. + /// The data_format attribute for Conv2D isn't supported by this op, and 'NHWC' + /// order is used instead. + /// Internally this op uses a single per-graph scratch buffer, which means that it + /// will block if multiple versions are being run in parallel. This is because this + /// operator is primarily an optimization to minimize memory usage. + /// + /// - Parameters: + /// - input: 4-D with shape `[batch, in_height, in_width, in_channels]`. + /// - paddings: A two-column matrix specifying the padding sizes. The number of + /// rows must be the same as the rank of `input`. + /// - filter: 4-D with shape + /// `[filter_height, filter_width, in_channels, out_channels]`. + /// + /// - Attrs: + /// - strides: 1-D of length 4. The stride of the sliding window for each dimension + /// of `input`. Must be in the same order as the dimension specified with format. + /// - padding: The type of padding algorithm to use. + @inlinable @inline(__always) + public static func fusedPadConv2D( + _ input: Tensor, + paddings: Tensor, + filter: Tensor, + mode: Mode1, + strides: [Int32], + padding: Padding + ) -> Tensor { + switch commonBackend( + commonBackend(input.handle.backend, paddings.handle.backend), filter.handle.backend) + { + case .XLA: + let output_device = filter.device + let input = Tensor(copying: input, to: .defaultTFEager) + let paddings = Tensor(copying: paddings, to: .defaultTFEager) + let filter = Tensor(copying: filter, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.fusedPadConv2D( + input, paddings: paddings, filter: filter, mode: mode, strides: strides, + padding: padding), to: output_device) + case .TF_EAGER: + return _RawTFEager.fusedPadConv2D( + input, paddings: paddings, filter: filter, mode: mode, strides: strides, padding: padding) + } + + } + + /// Performs a resize and padding as a preprocess during a convolution. + /// + /// It's often possible to do spatial transformations more efficiently as part of + /// the packing stage of a convolution, so this op allows for an optimized + /// implementation where these stages are fused together. This prevents the need to + /// write out the intermediate results as whole tensors, reducing memory pressure, + /// and we can get some latency gains by merging the transformation calculations. + /// The data_format attribute for Conv2D isn't supported by this op, and defaults to + /// 'NHWC' order. + /// Internally this op uses a single per-graph scratch buffer, which means that it + /// will block if multiple versions are being run in parallel. This is because this + /// operator is primarily an optimization to minimize memory usage. + /// + /// - Parameters: + /// - input: 4-D with shape `[batch, in_height, in_width, in_channels]`. + /// - size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + /// new size for the images. + /// - paddings: A two-column matrix specifying the padding sizes. The number of + /// rows must be the same as the rank of `input`. + /// - filter: 4-D with shape + /// `[filter_height, filter_width, in_channels, out_channels]`. + /// + /// - Attrs: + /// - resize_align_corners: If true, the centers of the 4 corner pixels of the input and output tensors are + /// aligned, preserving the values at the corner pixels. Defaults to false. + /// - strides: 1-D of length 4. The stride of the sliding window for each dimension + /// of `input`. Must be in the same order as the dimension specified with format. + /// - padding: The type of padding algorithm to use. + @inlinable @inline(__always) + public static func fusedResizeAndPadConv2D( + _ input: Tensor, + size: Tensor, + paddings: Tensor, + filter: Tensor, + resizeAlignCorners: Bool = false, + mode: Mode1, + strides: [Int32], + padding: Padding + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(input.handle.backend, size.handle.backend), paddings.handle.backend), + filter.handle.backend) + { + case .XLA: + let output_device = filter.device + let input = Tensor(copying: input, to: .defaultTFEager) + let size = Tensor(copying: size, to: .defaultTFEager) + let paddings = Tensor(copying: paddings, to: .defaultTFEager) + let filter = Tensor(copying: filter, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.fusedResizeAndPadConv2D( + input, size: size, paddings: paddings, filter: filter, + resizeAlignCorners: resizeAlignCorners, mode: mode, strides: strides, padding: padding), + to: output_device) + case .TF_EAGER: + return _RawTFEager.fusedResizeAndPadConv2D( + input, size: size, paddings: paddings, filter: filter, + resizeAlignCorners: resizeAlignCorners, mode: mode, strides: strides, padding: padding) + } + + } + + /// Computes the GRU cell forward propagation for 1 time step. + /// + /// Args + /// x: Input to the GRU cell. + /// h_prev: State input from the previous GRU cell. + /// w_ru: Weight matrix for the reset and update gate. + /// w_c: Weight matrix for the cell connection gate. + /// b_ru: Bias vector for the reset and update gate. + /// b_c: Bias vector for the cell connection gate. + /// + /// Returns + /// r: Output of the reset gate. + /// u: Output of the update gate. + /// c: Output of the cell connection gate. + /// h: Current state of the GRU cell. + /// + /// Note on notation of the variables: + /// + /// Concatenation of a and b is represented by a_b + /// Element-wise dot product of a and b is represented by ab + /// Element-wise dot product is represented by \circ + /// Matrix multiplication is represented by * + /// + /// Biases are initialized with : + /// `b_ru` - constant_initializer(1.0) + /// `b_c` - constant_initializer(0.0) + /// + /// This kernel op implements the following mathematical equations: + /// + /// ``` + /// x_h_prev = [x, h_prev] + /// + /// [r_bar u_bar] = x_h_prev * w_ru + b_ru + /// + /// r = sigmoid(r_bar) + /// u = sigmoid(u_bar) + /// + /// h_prevr = h_prev \circ r + /// + /// x_h_prevr = [x h_prevr] + /// + /// c_bar = x_h_prevr * w_c + b_c + /// c = tanh(c_bar) + /// + /// h = (1-u) \circ c + u \circ h_prev + /// ``` + @inlinable @inline(__always) + public static func gRUBlockCell( + _ x: Tensor, + hPrev: Tensor, + wRu: Tensor, + wC: Tensor, + bRu: Tensor, + bC: Tensor + ) -> (r: Tensor, u: Tensor, c: Tensor, h: Tensor) { + _RawTFEager.gRUBlockCell(x, hPrev: hPrev, wRu: wRu, wC: wC, bRu: bRu, bC: bC) + } + + /// Computes the GRU cell back-propagation for 1 time step. + /// + /// Args + /// x: Input to the GRU cell. + /// h_prev: State input from the previous GRU cell. + /// w_ru: Weight matrix for the reset and update gate. + /// w_c: Weight matrix for the cell connection gate. + /// b_ru: Bias vector for the reset and update gate. + /// b_c: Bias vector for the cell connection gate. + /// r: Output of the reset gate. + /// u: Output of the update gate. + /// c: Output of the cell connection gate. + /// d_h: Gradients of the h_new wrt to objective function. + /// + /// Returns + /// d_x: Gradients of the x wrt to objective function. + /// d_h_prev: Gradients of the h wrt to objective function. + /// d_c_bar Gradients of the c_bar wrt to objective function. + /// d_r_bar_u_bar Gradients of the r_bar & u_bar wrt to objective function. + /// + /// This kernel op implements the following mathematical equations: + /// + /// Note on notation of the variables: + /// + /// Concatenation of a and b is represented by a_b + /// Element-wise dot product of a and b is represented by ab + /// Element-wise dot product is represented by \circ + /// Matrix multiplication is represented by * + /// + /// Additional notes for clarity: + /// + /// `w_ru` can be segmented into 4 different matrices. + /// ``` + /// w_ru = [w_r_x w_u_x + /// w_r_h_prev w_u_h_prev] + /// ``` + /// Similarly, `w_c` can be segmented into 2 different matrices. + /// ``` + /// w_c = [w_c_x w_c_h_prevr] + /// ``` + /// Same goes for biases. + /// ``` + /// b_ru = [b_ru_x b_ru_h] + /// b_c = [b_c_x b_c_h] + /// ``` + /// Another note on notation: + /// ``` + /// d_x = d_x_component_1 + d_x_component_2 + /// + /// where d_x_component_1 = d_r_bar * w_r_x^T + d_u_bar * w_r_x^T + /// and d_x_component_2 = d_c_bar * w_c_x^T + /// + /// d_h_prev = d_h_prev_component_1 + d_h_prevr \circ r + d_h \circ u + /// where d_h_prev_componenet_1 = d_r_bar * w_r_h_prev^T + d_u_bar * w_r_h_prev^T + /// ``` + /// + /// Mathematics behind the Gradients below: + /// ``` + /// d_c_bar = d_h \circ (1-u) \circ (1-c \circ c) + /// d_u_bar = d_h \circ (h-c) \circ u \circ (1-u) + /// + /// d_r_bar_u_bar = [d_r_bar d_u_bar] + /// + /// [d_x_component_1 d_h_prev_component_1] = d_r_bar_u_bar * w_ru^T + /// + /// [d_x_component_2 d_h_prevr] = d_c_bar * w_c^T + /// + /// d_x = d_x_component_1 + d_x_component_2 + /// + /// d_h_prev = d_h_prev_component_1 + d_h_prevr \circ r + u + /// ``` + /// Below calculation is performed in the python wrapper for the Gradients + /// (not in the gradient kernel.) + /// ``` + /// d_w_ru = x_h_prevr^T * d_c_bar + /// + /// d_w_c = x_h_prev^T * d_r_bar_u_bar + /// + /// d_b_ru = sum of d_r_bar_u_bar along axis = 0 + /// + /// d_b_c = sum of d_c_bar along axis = 0 + /// ``` + @inlinable @inline(__always) + public static func gRUBlockCellGrad( + _ x: Tensor, + hPrev: Tensor, + wRu: Tensor, + wC: Tensor, + bRu: Tensor, + bC: Tensor, + r: Tensor, + u: Tensor, + c: Tensor, + dH: Tensor + ) -> (dX: Tensor, dHPrev: Tensor, dCBar: Tensor, dRBarUBar: Tensor) { + _RawTFEager.gRUBlockCellGrad( + x, hPrev: hPrev, wRu: wRu, wC: wC, bRu: bRu, bC: bC, r: r, u: u, c: c, dH: dH) + } + + /// Gather slices from `params` according to `indices`. + /// + /// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). + /// Produces an output tensor with shape `indices.shape + params.shape[1:]` where: + /// + /// ```python + /// # Scalar indices + /// output[:, ..., :] = params[indices, :, ... :] + /// + /// # Vector indices + /// output[i, :, ..., :] = params[indices[i], :, ... :] + /// + /// # Higher rank indices + /// output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] + /// ``` + /// + /// If `indices` is a permutation and `len(indices) == params.shape[0]` then + /// this operation will permute `params` accordingly. + /// + /// `validate_indices`: DEPRECATED. If this operation is assigned to CPU, values in + /// `indices` are always validated to be within range. If assigned to GPU, + /// out-of-bound indices result in safe but unspecified behavior, which may include + /// raising an error. + /// + ///
+ /// + ///
+ @inlinable @inline(__always) + public static func gather< + Tparams: TensorFlowScalar, + Tindices: TensorFlowIndex + >( + params: Tensor, + indices: Tensor, + validateIndices: Bool = true + ) -> Tensor { + switch commonBackend(params.handle.backend, indices.handle.backend) { + case .XLA: + return _RawXLA.gather(params: params, indices: indices, validateIndices: validateIndices) + case .TF_EAGER: + return _RawTFEager.gather( + params: params, indices: indices, validateIndices: validateIndices) + } + + } + + /// Gather slices from `params` into a Tensor with shape specified by `indices`. + /// + /// `indices` is a K-dimensional integer tensor, best thought of as a + /// (K-1)-dimensional tensor of indices into `params`, where each element defines a + /// slice of `params`: + /// + /// output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]] + /// + /// Whereas in `tf.gather` `indices` defines slices into the `axis` + /// dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the + /// first `N` dimensions of `params`, where `N = indices.shape[-1]`. + /// + /// The last dimension of `indices` can be at most the rank of + /// `params`: + /// + /// indices.shape[-1] <= params.rank + /// + /// The last dimension of `indices` corresponds to elements + /// (if `indices.shape[-1] == params.rank`) or slices + /// (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]` + /// of `params`. The output tensor has shape + /// + /// indices.shape[:-1] + params.shape[indices.shape[-1]:] + /// + /// Note that on CPU, if an out of bound index is found, an error is returned. + /// On GPU, if an out of bound index is found, a 0 is stored in the + /// corresponding output value. + /// + /// Some examples below. + /// + /// Simple indexing into a matrix: + /// + /// ```python + /// indices = [[0, 0], [1, 1]] + /// params = [['a', 'b'], ['c', 'd']] + /// output = ['a', 'd'] + /// ``` + /// + /// Slice indexing into a matrix: + /// + /// ```python + /// indices = [[1], [0]] + /// params = [['a', 'b'], ['c', 'd']] + /// output = [['c', 'd'], ['a', 'b']] + /// ``` + /// + /// Indexing into a 3-tensor: + /// + /// ```python + /// indices = [[1]] + /// params = [[['a0', 'b0'], ['c0', 'd0']], + /// [['a1', 'b1'], ['c1', 'd1']]] + /// output = [[['a1', 'b1'], ['c1', 'd1']]] + /// + /// + /// indices = [[0, 1], [1, 0]] + /// params = [[['a0', 'b0'], ['c0', 'd0']], + /// [['a1', 'b1'], ['c1', 'd1']]] + /// output = [['c0', 'd0'], ['a1', 'b1']] + /// + /// + /// indices = [[0, 0, 1], [1, 0, 1]] + /// params = [[['a0', 'b0'], ['c0', 'd0']], + /// [['a1', 'b1'], ['c1', 'd1']]] + /// output = ['b0', 'b1'] + /// ``` + /// + /// Batched indexing into a matrix: + /// + /// ```python + /// indices = [[[0, 0]], [[0, 1]]] + /// params = [['a', 'b'], ['c', 'd']] + /// output = [['a'], ['b']] + /// ``` + /// + /// Batched slice indexing into a matrix: + /// + /// ```python + /// indices = [[[1]], [[0]]] + /// params = [['a', 'b'], ['c', 'd']] + /// output = [[['c', 'd']], [['a', 'b']]] + /// ``` + /// + /// Batched indexing into a 3-tensor: + /// + /// ```python + /// indices = [[[1]], [[0]]] + /// params = [[['a0', 'b0'], ['c0', 'd0']], + /// [['a1', 'b1'], ['c1', 'd1']]] + /// output = [[[['a1', 'b1'], ['c1', 'd1']]], + /// [[['a0', 'b0'], ['c0', 'd0']]]] + /// + /// indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]] + /// params = [[['a0', 'b0'], ['c0', 'd0']], + /// [['a1', 'b1'], ['c1', 'd1']]] + /// output = [[['c0', 'd0'], ['a1', 'b1']], + /// [['a0', 'b0'], ['c1', 'd1']]] + /// + /// + /// indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]] + /// params = [[['a0', 'b0'], ['c0', 'd0']], + /// [['a1', 'b1'], ['c1', 'd1']]] + /// output = [['b0', 'b1'], ['d0', 'c1']] + /// ``` + /// + /// See also `tf.gather` and `tf.batch_gather`. + /// + /// - Parameters: + /// - params: The tensor from which to gather values. + /// - indices: Index tensor. + /// + /// - Output output: Values from `params` gathered from indices given by `indices`, with + /// shape `indices.shape[:-1] + params.shape[indices.shape[-1]:]`. + @inlinable @inline(__always) + public static func gatherNd< + Tparams: TensorFlowScalar, + Tindices: TensorFlowIndex + >( + params: Tensor, + indices: Tensor + ) -> Tensor { + switch commonBackend(params.handle.backend, indices.handle.backend) { + case .XLA: + let output_device = indices.device + let params = Tensor(copying: params, to: .defaultTFEager) + let indices = Tensor(copying: indices, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.gatherNd(params: params, indices: indices), to: output_device) + case .TF_EAGER: + return _RawTFEager.gatherNd(params: params, indices: indices) + } + + } + + /// Gather slices from `params` axis `axis` according to `indices`. + /// + /// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). + /// Produces an output tensor with shape `params.shape[:axis] + indices.shape + + /// params.shape[axis + 1:]` where: + /// + /// ```python + /// # Scalar indices (output is rank(params) - 1). + /// output[a_0, ..., a_n, b_0, ..., b_n] = + /// params[a_0, ..., a_n, indices, b_0, ..., b_n] + /// + /// # Vector indices (output is rank(params)). + /// output[a_0, ..., a_n, i, b_0, ..., b_n] = + /// params[a_0, ..., a_n, indices[i], b_0, ..., b_n] + /// + /// # Higher rank indices (output is rank(params) + rank(indices) - 1). + /// output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] = + /// params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n] + /// ``` + /// + ///
+ /// + ///
+ /// + /// Note that on CPU, if an out of bound index is found, an error is returned. + /// On GPU, if an out of bound index is found, a 0 is stored in the + /// corresponding output value. + /// + /// See also `tf.batch_gather` and `tf.gather_nd`. + /// + /// - Parameters: + /// - params: The tensor from which to gather values. Must be at least rank + /// `axis + 1`. + /// - indices: Index tensor. Must be in range `[0, params.shape[axis])`. + /// - axis: The axis in `params` to gather `indices` from. Defaults to the first + /// dimension. Supports negative indexes. + /// + /// - Output output: Values from `params` gathered from indices given by `indices`, with + /// shape `params.shape[:axis] + indices.shape + params.shape[axis + 1:]`. + @inlinable @inline(__always) + public static func gatherV2< + Tparams: TensorFlowScalar, + Tindices: TensorFlowIndex, + Taxis: TensorFlowIndex + >( + params: Tensor, + indices: Tensor, + axis: Tensor, + batchDims: Int64 = 0 + ) -> Tensor { + switch commonBackend( + commonBackend(params.handle.backend, indices.handle.backend), axis.handle.backend) + { + case .XLA: + return _RawXLA.gatherV2(params: params, indices: indices, axis: axis, batchDims: batchDims) + case .TF_EAGER: + return _RawTFEager.gatherV2( + params: params, indices: indices, axis: axis, batchDims: batchDims) + } + + } + + /// This op produces Region of Interests from given bounding boxes(bbox_deltas) encoded wrt anchors according to eq.2 in arXiv:1506.01497 + /// + /// The op selects top `pre_nms_topn` scoring boxes, decodes them with respect to anchors, + /// applies non-maximal suppression on overlapping boxes with higher than + /// `nms_threshold` intersection-over-union (iou) value, discarding boxes where shorter + /// side is less than `min_size`. + /// Inputs: + /// `scores`: A 4D tensor of shape [Batch, Height, Width, Num Anchors] containing the scores per anchor at given postion + /// `bbox_deltas`: is a tensor of shape [Batch, Height, Width, 4 x Num Anchors] boxes encoded to each anchor + /// `anchors`: A 1D tensor of shape [4 x Num Anchors], representing the anchors. + /// Outputs: + /// `rois`: output RoIs, a 3D tensor of shape [Batch, post_nms_topn, 4], padded by 0 if less than post_nms_topn candidates found. + /// `roi_probabilities`: probability scores of each roi in 'rois', a 2D tensor of shape [Batch,post_nms_topn], padded with 0 if needed, sorted by scores. + /// + /// - Parameters: + /// - scores: A 4-D float tensor of shape `[num_images, height, width, num_achors]` containing scores of the boxes for given anchors, can be unsorted. + /// - bbox_deltas: A 4-D float tensor of shape `[num_images, height, width, 4 x num_anchors]`. encoding boxes with respec to each anchor. + /// Coordinates are given in the form [dy, dx, dh, dw]. + /// - image_info: A 2-D float tensor of shape `[num_images, 5]` containing image information Height, Width, Scale. + /// - anchors: A 2-D float tensor of shape `[num_anchors, 4]` describing the anchor boxes. Boxes are formatted in the form [y1, x1, y2, x2]. + /// - nms_threshold: A scalar float tensor for non-maximal-suppression threshold. + /// - pre_nms_topn: A scalar int tensor for the number of top scoring boxes to be used as input. + /// - min_size: A scalar float tensor. Any box that has a smaller size than min_size will be discarded. + /// + /// - Attr post_nms_topn: An integer. Maximum number of rois in the output. + /// + /// - Outputs: + /// - rois: A 3-D float tensor of shape `[num_images,post_nms_topn,4]` representing the selected + /// region of interest boxes. Sorted in descending order in scores. + /// - roi_probabilities: A 2-D float tensor of shape `[num_images, post_nms_topn]` representing the score of the + /// region of interest box in `rois` tensor at the same index. + @inlinable @inline(__always) + public static func generateBoundingBoxProposals( + scores: Tensor, + bboxDeltas: Tensor, + imageInfo: Tensor, + anchors: Tensor, + nmsThreshold: Tensor, + preNmsTopn: Tensor, + minSize: Tensor, + postNmsTopn: Int64 = 300 + ) -> (rois: Tensor, roiProbabilities: Tensor) { + _RawTFEager.generateBoundingBoxProposals( + scores: scores, bboxDeltas: bboxDeltas, imageInfo: imageInfo, anchors: anchors, + nmsThreshold: nmsThreshold, preNmsTopn: preNmsTopn, minSize: minSize, + postNmsTopn: postNmsTopn) + } + + /// Given a path to new and old vocabulary files, returns a remapping Tensor of + /// + /// length `num_new_vocab`, where `remapping[i]` contains the row number in the old + /// vocabulary that corresponds to row `i` in the new vocabulary (starting at line + /// `new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i` + /// in the new vocabulary is not in the old vocabulary. The old vocabulary is + /// constrained to the first `old_vocab_size` entries if `old_vocab_size` is not the + /// default value of -1. + /// + /// `num_vocab_offset` enables + /// use in the partitioned variable case, and should generally be set through + /// examining partitioning info. The format of the files should be a text file, + /// with each line containing a single entity within the vocabulary. + /// + /// For example, with `new_vocab_file` a text file containing each of the following + /// elements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, f3], + /// `num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be + /// `[0, -1, 2]`. + /// + /// The op also returns a count of how many entries in the new vocabulary + /// were present in the old vocabulary, which is used to calculate the number of + /// values to initialize in a weight matrix remapping + /// + /// This functionality can be used to remap both row vocabularies (typically, + /// features) and column vocabularies (typically, classes) from TensorFlow + /// checkpoints. Note that the partitioning logic relies on contiguous vocabularies + /// corresponding to div-partitioned variables. Moreover, the underlying remapping + /// uses an IndexTable (as opposed to an inexact CuckooTable), so client code should + /// use the corresponding index_table_from_file() as the FeatureColumn framework + /// does (as opposed to tf.feature_to_id(), which uses a CuckooTable). + /// + /// - Parameters: + /// - new_vocab_file: Path to the new vocab file. + /// - old_vocab_file: Path to the old vocab file. + /// + /// - Attrs: + /// - new_vocab_offset: How many entries into the new vocab file to start reading. + /// - num_new_vocab: Number of entries in the new vocab file to remap. + /// - old_vocab_size: Number of entries in the old vocab file to consider. If -1, + /// use the entire old vocabulary. + /// + /// - Outputs: + /// - remapping: A Tensor of length num_new_vocab where the element at index i + /// is equal to the old ID that maps to the new ID i. This element is -1 for any + /// new ID that is not found in the old vocabulary. + /// - num_present: Number of new vocab entries found in old vocab. + @inlinable @inline(__always) + public static func generateVocabRemapping( + newVocabFile: StringTensor, + oldVocabFile: StringTensor, + newVocabOffset: Int64, + numNewVocab: Int64, + oldVocabSize: Int64 = -1 + ) -> (remapping: Tensor, numPresent: Tensor) { + _RawTFEager.generateVocabRemapping( + newVocabFile: newVocabFile, oldVocabFile: oldVocabFile, newVocabOffset: newVocabOffset, + numNewVocab: numNewVocab, oldVocabSize: oldVocabSize) + } + + /// Creates a dataset that invokes a function to generate elements. + @inlinable @inline(__always) + public static func generatorDataset< + InitfuncIn: TensorGroup, + InitfuncOut: TensorGroup, + NextfuncIn: TensorGroup, + NextfuncOut: TensorGroup, + FinalizefuncIn: TensorGroup, + FinalizefuncOut: TensorGroup, + TinitFuncArgs: TensorArrayProtocol, + TnextFuncArgs: TensorArrayProtocol, + TfinalizeFuncArgs: TensorArrayProtocol + >( + initFuncOtherArgs: TinitFuncArgs, + nextFuncOtherArgs: TnextFuncArgs, + finalizeFuncOtherArgs: TfinalizeFuncArgs, + initFunc: (InitfuncIn) -> InitfuncOut, + nextFunc: (NextfuncIn) -> NextfuncOut, + finalizeFunc: (FinalizefuncIn) -> FinalizefuncOut, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.generatorDataset( + initFuncOtherArgs: initFuncOtherArgs, nextFuncOtherArgs: nextFuncOtherArgs, + finalizeFuncOtherArgs: finalizeFuncOtherArgs, initFunc: initFunc, nextFunc: nextFunc, + finalizeFunc: finalizeFunc, outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Returns calibration data for the given resource name + @inlinable @inline(__always) + public static func getCalibrationDataOp( + resourceName: StringTensor + ) -> StringTensor { + _RawTFEager.getCalibrationDataOp(resourceName: resourceName) + } + + /// Store the input tensor in the state of the current session. + /// + /// - Parameter value: The tensor to be stored. + /// + /// - Output handle: The handle for the tensor stored in the session state, represented + /// as a string. + @inlinable @inline(__always) + public static func getSessionHandle( + value: Tensor + ) -> StringTensor { + _RawTFEager.getSessionHandle(value: value) + } + + /// Store the input tensor in the state of the current session. + /// + /// - Parameter value: The tensor to be stored. + /// + /// - Output handle: The handle for the tensor stored in the session state, represented + /// as a ResourceHandle object. + @inlinable @inline(__always) + public static func getSessionHandleV2( + value: Tensor + ) -> ResourceHandle { + _RawTFEager.getSessionHandleV2(value: value) + } + + /// Get the value of the tensor specified by its handle. + /// + /// - Parameter handle: The handle for a tensor stored in the session state. + /// + /// - Attr dtype: The type of the output value. + /// + /// - Output value: The tensor for the given handle. + @inlinable @inline(__always) + public static func getSessionTensor( + handle: StringTensor + ) -> Tensor { + _RawTFEager.getSessionTensor(handle: handle) + } + + @inlinable @inline(__always) + public static func graphDefVersion() -> Tensor { + _RawTFEager.graphDefVersion() + } + + /// Returns the truth value of (x > y) element-wise. + /// + /// *NOTE*: `Greater` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// Example: + /// + /// ```python + /// x = tf.constant([5, 4, 6]) + /// y = tf.constant([5, 2, 5]) + /// tf.math.greater(x, y) ==> [False, True, True] + /// + /// x = tf.constant([5, 4, 6]) + /// y = tf.constant([5]) + /// tf.math.greater(x, y) ==> [False, False, True] + /// ``` + @inlinable @inline(__always) + public static func greater( + _ x: Tensor, + _ y: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + return _RawXLA.greater(x, y) + case .TF_EAGER: + return _RawTFEager.greater(x, y) + } + + } + + /// Returns the truth value of (x >= y) element-wise. + /// + /// *NOTE*: `GreaterEqual` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// Example: + /// + /// ```python + /// x = tf.constant([5, 4, 6, 7]) + /// y = tf.constant([5, 2, 5, 10]) + /// tf.math.greater_equal(x, y) ==> [True, True, True, False] + /// + /// x = tf.constant([5, 4, 6, 7]) + /// y = tf.constant([5]) + /// tf.math.greater_equal(x, y) ==> [True, False, True, True] + /// ``` + @inlinable @inline(__always) + public static func greaterEqual( + _ x: Tensor, + _ y: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + return _RawXLA.greaterEqual(x, y) + case .TF_EAGER: + return _RawTFEager.greaterEqual(x, y) + } + + } + + /// Creates a dataset that computes a group-by on `input_dataset`. + /// + /// Creates a dataset that computes a group-by on `input_dataset`. + /// + /// - Parameters: + /// - input_dataset: A variant tensor representing the input dataset. + /// - key_func_other_arguments: A list of tensors, typically values that were captured when + /// building a closure for `key_func`. + /// - init_func_other_arguments: A list of tensors, typically values that were captured when + /// building a closure for `init_func`. + /// - reduce_func_other_arguments: A list of tensors, typically values that were captured when + /// building a closure for `reduce_func`. + /// - finalize_func_other_arguments: A list of tensors, typically values that were captured when + /// building a closure for `finalize_func`. + /// + /// - Attrs: + /// - key_func: A function mapping an element of `input_dataset`, concatenated + /// with `key_func_other_arguments` to a scalar value of type DT_INT64. + /// - init_func: A function mapping a key of type DT_INT64, concatenated with + /// `init_func_other_arguments` to the initial reducer state. + /// - reduce_func: A function mapping the current reducer state and an element of `input_dataset`, + /// concatenated with `reduce_func_other_arguments` to a new reducer state. + /// - finalize_func: A function mapping the final reducer state to an output element. + @inlinable @inline(__always) + public static func groupByReducerDataset< + KeyfuncIn: TensorGroup, + KeyfuncOut: TensorGroup, + InitfuncIn: TensorGroup, + InitfuncOut: TensorGroup, + ReducefuncIn: TensorGroup, + ReducefuncOut: TensorGroup, + FinalizefuncIn: TensorGroup, + FinalizefuncOut: TensorGroup, + TkeyFuncOtherArguments: TensorArrayProtocol, + TinitFuncOtherArguments: TensorArrayProtocol, + TreduceFuncOtherArguments: TensorArrayProtocol, + TfinalizeFuncOtherArguments: TensorArrayProtocol + >( + inputDataset: VariantHandle, + keyFuncOtherArguments: TkeyFuncOtherArguments, + initFuncOtherArguments: TinitFuncOtherArguments, + reduceFuncOtherArguments: TreduceFuncOtherArguments, + finalizeFuncOtherArguments: TfinalizeFuncOtherArguments, + keyFunc: (KeyfuncIn) -> KeyfuncOut, + initFunc: (InitfuncIn) -> InitfuncOut, + reduceFunc: (ReducefuncIn) -> ReducefuncOut, + finalizeFunc: (FinalizefuncIn) -> FinalizefuncOut, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.groupByReducerDataset( + inputDataset: inputDataset, keyFuncOtherArguments: keyFuncOtherArguments, + initFuncOtherArguments: initFuncOtherArguments, + reduceFuncOtherArguments: reduceFuncOtherArguments, + finalizeFuncOtherArguments: finalizeFuncOtherArguments, keyFunc: keyFunc, + initFunc: initFunc, reduceFunc: reduceFunc, finalizeFunc: finalizeFunc, + outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Creates a dataset that computes a windowed group-by on `input_dataset`. + /// + /// // TODO(mrry): Support non-int64 keys. + /// + /// - Attr key_func: A function mapping an element of `input_dataset`, concatenated + /// with `key_func_other_arguments` to a scalar value of type DT_INT64. + @inlinable @inline(__always) + public static func groupByWindowDataset< + KeyfuncIn: TensorGroup, + KeyfuncOut: TensorGroup, + ReducefuncIn: TensorGroup, + ReducefuncOut: TensorGroup, + WindowsizefuncIn: TensorGroup, + WindowsizefuncOut: TensorGroup, + TkeyFuncOtherArguments: TensorArrayProtocol, + TreduceFuncOtherArguments: TensorArrayProtocol, + TwindowSizeFuncOtherArguments: TensorArrayProtocol + >( + inputDataset: VariantHandle, + keyFuncOtherArguments: TkeyFuncOtherArguments, + reduceFuncOtherArguments: TreduceFuncOtherArguments, + windowSizeFuncOtherArguments: TwindowSizeFuncOtherArguments, + keyFunc: (KeyfuncIn) -> KeyfuncOut, + reduceFunc: (ReducefuncIn) -> ReducefuncOut, + windowSizeFunc: (WindowsizefuncIn) -> WindowsizefuncOut, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.groupByWindowDataset( + inputDataset: inputDataset, keyFuncOtherArguments: keyFuncOtherArguments, + reduceFuncOtherArguments: reduceFuncOtherArguments, + windowSizeFuncOtherArguments: windowSizeFuncOtherArguments, keyFunc: keyFunc, + reduceFunc: reduceFunc, windowSizeFunc: windowSizeFunc, outputTypes: outputTypes, + outputShapes: outputShapes) + } + + /// Gives a guarantee to the TF runtime that the input tensor is a constant. + /// + /// The runtime is then free to make optimizations based on this. + /// + /// Only accepts value typed tensors as inputs and rejects resource variable handles + /// as input. + /// + /// Returns the input tensor without modification. + @inlinable @inline(__always) + public static func guaranteeConst( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.guaranteeConst(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.guaranteeConst(input) + } + + } + + /// Convert one or more images from HSV to RGB. + /// + /// Outputs a tensor of the same shape as the `images` tensor, containing the RGB + /// value of the pixels. The output is only well defined if the value in `images` + /// are in `[0,1]`. + /// + /// See `rgb_to_hsv` for a description of the HSV encoding. + /// + /// - Parameter images: 1-D or higher rank. HSV data to convert. Last dimension must be size 3. + /// + /// - Output output: `images` converted to RGB. + @inlinable @inline(__always) + public static func hSVToRGB( + images: Tensor + ) -> Tensor { + switch images.handle.backend { + case .XLA: + let output_device = images.device + let images = Tensor(copying: images, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.hSVToRGB(images: images), to: output_device) + case .TF_EAGER: + return _RawTFEager.hSVToRGB(images: images) + } + + } + + /// Creates a non-initialized hash table. + /// + /// This op creates a hash table, specifying the type of its keys and values. + /// Before using the table you will have to initialize it. After initialization the + /// table will be immutable. + /// + /// - Attrs: + /// - container: If non-empty, this table is placed in the given container. + /// Otherwise, a default container is used. + /// - shared_name: If non-empty, this table is shared under the given name across + /// multiple sessions. + /// - use_node_name_sharing: If true and shared_name is empty, the table is shared + /// using the node name. + /// - key_dtype: Type of the table keys. + /// - value_dtype: Type of the table values. + /// + /// - Output table_handle: Handle to a table. + @inlinable @inline(__always) + public static func hashTableV2( + container: String, + sharedName: String, + useNodeNameSharing: Bool = false, + keyDtype: TensorDataType, + valueDtype: TensorDataType + ) -> ResourceHandle { + _RawTFEager.hashTableV2( + container: container, sharedName: sharedName, useNodeNameSharing: useNodeNameSharing, + keyDtype: keyDtype, valueDtype: valueDtype) + } + + /// Return histogram of values. + /// + /// Given the tensor `values`, this operation returns a rank 1 histogram counting + /// the number of entries in `values` that fall into every bin. The bins are + /// equal width and determined by the arguments `value_range` and `nbins`. + /// + /// ```python + /// # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) + /// nbins = 5 + /// value_range = [0.0, 5.0] + /// new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] + /// + /// with tf.get_default_session() as sess: + /// hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) + /// variables.global_variables_initializer().run() + /// sess.run(hist) => [2, 1, 1, 0, 2] + /// ``` + /// + /// - Parameters: + /// - values: Numeric `Tensor`. + /// - value_range: Shape [2] `Tensor` of same `dtype` as `values`. + /// values <= value_range[0] will be mapped to hist[0], + /// values >= value_range[1] will be mapped to hist[-1]. + /// - nbins: Scalar `int32 Tensor`. Number of histogram bins. + /// + /// - Output out: A 1-D `Tensor` holding histogram of values. + @inlinable @inline(__always) + public static func histogramFixedWidth< + T: TensorFlowNumeric, + Dtype: TensorFlowIndex + >( + _ values: Tensor, + valueRange: Tensor, + nbins: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(values.handle.backend, valueRange.handle.backend), nbins.handle.backend) + { + case .XLA: + let output_device = nbins.device + let values = Tensor(copying: values, to: .defaultTFEager) + let valueRange = Tensor(copying: valueRange, to: .defaultTFEager) + let nbins = Tensor(copying: nbins, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.histogramFixedWidth(values, valueRange: valueRange, nbins: nbins), + to: output_device) + case .TF_EAGER: + return _RawTFEager.histogramFixedWidth(values, valueRange: valueRange, nbins: nbins) + } + + } + + /// Outputs a `Summary` protocol buffer with a histogram. + /// + /// The generated + /// [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) + /// has one summary value containing a histogram for `values`. + /// + /// This op reports an `InvalidArgument` error if any value is not finite. + /// + /// - Parameters: + /// - tag: Scalar. Tag to use for the `Summary.Value`. + /// - values: Any shape. Values to use to build the histogram. + /// + /// - Output summary: Scalar. Serialized `Summary` protocol buffer. + @inlinable @inline(__always) + public static func histogramSummary( + tag: StringTensor, + _ values: Tensor + ) -> StringTensor { + _RawTFEager.histogramSummary(tag: tag, values) + } + + /// Inverse fast Fourier transform. + /// + /// Computes the inverse 1-dimensional discrete Fourier transform over the + /// inner-most dimension of `input`. + /// + /// - Parameter input: A complex tensor. + /// + /// - Output output: A complex tensor of the same shape as `input`. The inner-most + /// dimension of `input` is replaced with its inverse 1D Fourier transform. + /// + /// @compatibility(numpy) + /// Equivalent to np.fft.ifft + /// @end_compatibility + @inlinable @inline(__always) + public static func iFFT( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.iFFT(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.iFFT(input) + } + + } + + /// Inverse 2D fast Fourier transform. + /// + /// Computes the inverse 2-dimensional discrete Fourier transform over the + /// inner-most 2 dimensions of `input`. + /// + /// - Parameter input: A complex tensor. + /// + /// - Output output: A complex tensor of the same shape as `input`. The inner-most 2 + /// dimensions of `input` are replaced with their inverse 2D Fourier transform. + /// + /// @compatibility(numpy) + /// Equivalent to np.fft.ifft2 + /// @end_compatibility + @inlinable @inline(__always) + public static func iFFT2D( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.iFFT2D(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.iFFT2D(input) + } + + } + + /// Inverse 3D fast Fourier transform. + /// + /// Computes the inverse 3-dimensional discrete Fourier transform over the + /// inner-most 3 dimensions of `input`. + /// + /// - Parameter input: A complex tensor. + /// + /// - Output output: A complex tensor of the same shape as `input`. The inner-most 3 + /// dimensions of `input` are replaced with their inverse 3D Fourier transform. + /// + /// @compatibility(numpy) + /// Equivalent to np.fft.ifftn with 3 dimensions. + /// @end_compatibility + @inlinable @inline(__always) + public static func iFFT3D( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.iFFT3D(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.iFFT3D(input) + } + + } + + /// Inverse real-valued fast Fourier transform. + /// + /// Computes the inverse 1-dimensional discrete Fourier transform of a real-valued + /// signal over the inner-most dimension of `input`. + /// + /// The inner-most dimension of `input` is assumed to be the result of `RFFT`: the + /// `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If + /// `fft_length` is not provided, it is computed from the size of the inner-most + /// dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to + /// compute `input` is odd, it should be provided since it cannot be inferred + /// properly. + /// + /// Along the axis `IRFFT` is computed on, if `fft_length / 2 + 1` is smaller + /// than the corresponding dimension of `input`, the dimension is cropped. If it is + /// larger, the dimension is padded with zeros. + /// + /// - Parameters: + /// - input: A complex tensor. + /// - fft_length: An int32 tensor of shape [1]. The FFT length. + /// + /// - Output output: A float32 tensor of the same rank as `input`. The inner-most + /// dimension of `input` is replaced with the `fft_length` samples of its inverse + /// 1D Fourier transform. + /// + /// @compatibility(numpy) + /// Equivalent to np.fft.irfft + /// @end_compatibility + @inlinable @inline(__always) + public static func iRFFT< + Treal: FloatingPoint & TensorFlowScalar, + Tcomplex: TensorFlowScalar + >( + _ input: Tensor, + fftLength: Tensor + ) -> Tensor { + switch commonBackend(input.handle.backend, fftLength.handle.backend) { + case .XLA: + let output_device = fftLength.device + let input = Tensor(copying: input, to: .defaultTFEager) + let fftLength = Tensor(copying: fftLength, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.iRFFT(input, fftLength: fftLength), to: output_device) + case .TF_EAGER: + return _RawTFEager.iRFFT(input, fftLength: fftLength) + } + + } + + /// Inverse 2D real-valued fast Fourier transform. + /// + /// Computes the inverse 2-dimensional discrete Fourier transform of a real-valued + /// signal over the inner-most 2 dimensions of `input`. + /// + /// The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`: + /// The inner-most dimension contains the `fft_length / 2 + 1` unique components of + /// the DFT of a real-valued signal. If `fft_length` is not provided, it is computed + /// from the size of the inner-most 2 dimensions of `input`. If the FFT length used + /// to compute `input` is odd, it should be provided since it cannot be inferred + /// properly. + /// + /// Along each axis `IRFFT2D` is computed on, if `fft_length` (or + /// `fft_length / 2 + 1` for the inner-most dimension) is smaller than the + /// corresponding dimension of `input`, the dimension is cropped. If it is larger, + /// the dimension is padded with zeros. + /// + /// - Parameters: + /// - input: A complex tensor. + /// - fft_length: An int32 tensor of shape [2]. The FFT length for each dimension. + /// + /// - Output output: A float32 tensor of the same rank as `input`. The inner-most 2 + /// dimensions of `input` are replaced with the `fft_length` samples of their + /// inverse 2D Fourier transform. + /// + /// @compatibility(numpy) + /// Equivalent to np.fft.irfft2 + /// @end_compatibility + @inlinable @inline(__always) + public static func iRFFT2D< + Treal: FloatingPoint & TensorFlowScalar, + Tcomplex: TensorFlowScalar + >( + _ input: Tensor, + fftLength: Tensor + ) -> Tensor { + switch commonBackend(input.handle.backend, fftLength.handle.backend) { + case .XLA: + let output_device = fftLength.device + let input = Tensor(copying: input, to: .defaultTFEager) + let fftLength = Tensor(copying: fftLength, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.iRFFT2D(input, fftLength: fftLength), to: output_device) + case .TF_EAGER: + return _RawTFEager.iRFFT2D(input, fftLength: fftLength) + } + + } + + /// Inverse 3D real-valued fast Fourier transform. + /// + /// Computes the inverse 3-dimensional discrete Fourier transform of a real-valued + /// signal over the inner-most 3 dimensions of `input`. + /// + /// The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`: + /// The inner-most dimension contains the `fft_length / 2 + 1` unique components of + /// the DFT of a real-valued signal. If `fft_length` is not provided, it is computed + /// from the size of the inner-most 3 dimensions of `input`. If the FFT length used + /// to compute `input` is odd, it should be provided since it cannot be inferred + /// properly. + /// + /// Along each axis `IRFFT3D` is computed on, if `fft_length` (or + /// `fft_length / 2 + 1` for the inner-most dimension) is smaller than the + /// corresponding dimension of `input`, the dimension is cropped. If it is larger, + /// the dimension is padded with zeros. + /// + /// - Parameters: + /// - input: A complex tensor. + /// - fft_length: An int32 tensor of shape [3]. The FFT length for each dimension. + /// + /// - Output output: A float32 tensor of the same rank as `input`. The inner-most 3 + /// dimensions of `input` are replaced with the `fft_length` samples of their + /// inverse 3D real Fourier transform. + /// + /// @compatibility(numpy) + /// Equivalent to np.irfftn with 3 dimensions. + /// @end_compatibility + @inlinable @inline(__always) + public static func iRFFT3D< + Treal: FloatingPoint & TensorFlowScalar, + Tcomplex: TensorFlowScalar + >( + _ input: Tensor, + fftLength: Tensor + ) -> Tensor { + switch commonBackend(input.handle.backend, fftLength.handle.backend) { + case .XLA: + let output_device = fftLength.device + let input = Tensor(copying: input, to: .defaultTFEager) + let fftLength = Tensor(copying: fftLength, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.iRFFT3D(input, fftLength: fftLength), to: output_device) + case .TF_EAGER: + return _RawTFEager.iRFFT3D(input, fftLength: fftLength) + } + + } + + /// Return a tensor with the same shape and contents as the input tensor or value. + @inlinable @inline(__always) + public static func identity( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.identity(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.identity(input) + } + + } + + /// Returns a list of tensors with the same shapes and contents as the input + /// + /// tensors. + /// + /// This op can be used to override the gradient for complicated functions. For + /// example, suppose y = f(x) and we wish to apply a custom function g for backprop + /// such that dx = g(dy). In Python, + /// + /// ```python + /// with tf.get_default_graph().gradient_override_map( + /// {'IdentityN': 'OverrideGradientWithG'}): + /// y, _ = identity_n([f(x), x]) + /// + /// @tf.RegisterGradient('OverrideGradientWithG') + /// def ApplyG(op, dy, _): + /// return [None, g(dy)] # Do not backprop to f(x). + /// ``` + @inlinable @inline(__always) + public static func identityN( + _ input: T + ) -> T { + _RawTFEager.identityN(input) + } + + /// A Reader that outputs the queued work as both the key and value. + /// + /// To use, enqueue strings in a Queue. ReaderRead will take the front + /// work string and output (work, work). + /// + /// - Attrs: + /// - container: If non-empty, this reader is placed in the given container. + /// Otherwise, a default container is used. + /// - shared_name: If non-empty, this reader is named in the given bucket + /// with this shared_name. Otherwise, the node name is used instead. + /// + /// - Output reader_handle: The handle to reference the Reader. + @inlinable @inline(__always) + public static func identityReaderV2( + container: String, + sharedName: String + ) -> ResourceHandle { + _RawTFEager.identityReaderV2(container: container, sharedName: sharedName) + } + + /// output = cond ? then_branch(input) : else_branch(input) + /// + /// - Parameters: + /// - cond: A Tensor. If the tensor is a scalar of non-boolean type, the + /// scalar is converted to a boolean according to the + /// following rule: if the scalar is a numerical value, non-zero means + /// `True` and zero means False; if the scalar is a string, non-empty + /// means `True` and empty means `False`. If the tensor is not a scalar, + /// being empty means False and being non-empty means True. + /// - input: A list of input tensors. + /// + /// - Attrs: + /// - Tin: A list of input types. + /// - Tout: A list of output types. + /// - then_branch: A function that takes 'inputs' and returns a list of tensors, whose + /// types are the same as what else_branch returns. + /// - else_branch: A function that takes 'inputs' and returns a list of tensors, whose + /// types are the same as what then_branch returns. + /// + /// - Output output: A list of return values. + @inlinable @inline(__always) + public static func if_< + Tcond: TensorFlowScalar, + Tin: TensorArrayProtocol, + Tout: TensorGroup, + ThenbranchIn: TensorGroup, + ThenbranchOut: TensorGroup, + ElsebranchIn: TensorGroup, + ElsebranchOut: TensorGroup + >( + cond: Tensor, + _ input: Tin, + thenBranch: (ThenbranchIn) -> ThenbranchOut, + elseBranch: (ElsebranchIn) -> ElsebranchOut, + outputShapes: [TensorShape?] + ) -> Tout { + _RawTFEager.if_( + cond: cond, input, thenBranch: thenBranch, elseBranch: elseBranch, + outputShapes: outputShapes) + } + + /// Compute the lower regularized incomplete Gamma function `P(a, x)`. + /// + /// The lower regularized incomplete Gamma function is defined as: + /// + /// + /// \\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\\) + /// + /// where + /// + /// \\(gamma(a, x) = \\int_{0}^{x} t^{a-1} exp(-t) dt\\) + /// + /// is the lower incomplete Gamma function. + /// + /// Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete + /// Gamma function. + @inlinable @inline(__always) + public static func igamma( + _ a: Tensor, + _ x: Tensor + ) -> Tensor { + switch commonBackend(a.handle.backend, x.handle.backend) { + case .XLA: + let output_device = x.device + let a = Tensor(copying: a, to: .defaultTFEager) + let x = Tensor(copying: x, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.igamma(a, x), to: output_device) + case .TF_EAGER: + return _RawTFEager.igamma(a, x) + } + + } + + /// Computes the gradient of `igamma(a, x)` wrt `a`. + @inlinable @inline(__always) + public static func igammaGradA( + _ a: Tensor, + _ x: Tensor + ) -> Tensor { + switch commonBackend(a.handle.backend, x.handle.backend) { + case .XLA: + let output_device = x.device + let a = Tensor(copying: a, to: .defaultTFEager) + let x = Tensor(copying: x, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.igammaGradA(a, x), to: output_device) + case .TF_EAGER: + return _RawTFEager.igammaGradA(a, x) + } + + } + + /// Compute the upper regularized incomplete Gamma function `Q(a, x)`. + /// + /// The upper regularized incomplete Gamma function is defined as: + /// + /// \\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\) + /// + /// where + /// + /// \\(Gamma(a, x) = int_{x}^{\infty} t^{a-1} exp(-t) dt\\) + /// + /// is the upper incomplete Gama function. + /// + /// Note, above `P(a, x)` (`Igamma`) is the lower regularized complete + /// Gamma function. + @inlinable @inline(__always) + public static func igammac( + _ a: Tensor, + _ x: Tensor + ) -> Tensor { + switch commonBackend(a.handle.backend, x.handle.backend) { + case .XLA: + let output_device = x.device + let a = Tensor(copying: a, to: .defaultTFEager) + let x = Tensor(copying: x, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.igammac(a, x), to: output_device) + case .TF_EAGER: + return _RawTFEager.igammac(a, x) + } + + } + + /// Creates a dataset that contains the elements of `input_dataset` ignoring errors. + @inlinable @inline(__always) + public static func ignoreErrorsDataset( + inputDataset: VariantHandle, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.ignoreErrorsDataset( + inputDataset: inputDataset, outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Returns the imaginary part of a complex number. + /// + /// Given a tensor `input` of complex numbers, this operation returns a tensor of + /// type `float` that is the imaginary part of each element in `input`. All + /// elements in `input` must be complex numbers of the form \\(a + bj\\), where *a* + /// is the real part and *b* is the imaginary part returned by this operation. + /// + /// For example: + /// + /// ``` + /// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + /// tf.imag(input) ==> [4.75, 5.75] + /// ``` + @inlinable @inline(__always) + public static func imag< + T: TensorFlowScalar, + Tout: FloatingPoint & TensorFlowScalar + >( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.imag(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.imag(input) + } + + } + + /// Returns immutable tensor from memory region. + /// + /// The current implementation memmaps the tensor from a file. + /// + /// - Attrs: + /// - dtype: Type of the returned tensor. + /// - shape: Shape of the returned tensor. + /// - memory_region_name: Name of readonly memory region used by the tensor, see + /// NewReadOnlyMemoryRegionFromFile in tensorflow::Env. + @inlinable @inline(__always) + public static func immutableConst( + shape: TensorShape?, + memoryRegionName: String + ) -> Tensor { + _RawTFEager.immutableConst(shape: shape, memoryRegionName: memoryRegionName) + } + + @inlinable @inline(__always) + public static func importEvent( + writer: ResourceHandle, + event: StringTensor + ) { + _RawTFEager.importEvent(writer: writer, event: event) + } + + @inlinable @inline(__always) + public static func inPolymorphicTwice( + _ a: [Tensor], + _ b: [Tensor] + ) { + _RawTFEager.inPolymorphicTwice(a, b) + } + + /// Says whether the targets are in the top `K` predictions. + /// + /// This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the + /// prediction for the target class is among the top `k` predictions among + /// all predictions for example `i`. Note that the behavior of `InTopK` differs + /// from the `TopK` op in its handling of ties; if multiple classes have the + /// same prediction value and straddle the top-`k` boundary, all of those + /// classes are considered to be in the top `k`. + /// + /// More formally, let + /// + /// \\(predictions_i\\) be the predictions for all classes for example `i`, + /// \\(targets_i\\) be the target class for example `i`, + /// \\(out_i\\) be the output for example `i`, + /// + /// $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$ + /// + /// - Parameters: + /// - predictions: A `batch_size` x `classes` tensor. + /// - targets: A `batch_size` vector of class ids. + /// + /// - Attr k: Number of top elements to look at for computing precision. + /// + /// - Output precision: Computed Precision at `k` as a `bool Tensor`. + @inlinable @inline(__always) + public static func inTopK( + predictions: Tensor, + targets: Tensor, + k: Int64 + ) -> Tensor { + switch commonBackend(predictions.handle.backend, targets.handle.backend) { + case .XLA: + let output_device = targets.device + let predictions = Tensor(copying: predictions, to: .defaultTFEager) + let targets = Tensor(copying: targets, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.inTopK(predictions: predictions, targets: targets, k: k), + to: output_device) + case .TF_EAGER: + return _RawTFEager.inTopK(predictions: predictions, targets: targets, k: k) + } + + } + + /// Says whether the targets are in the top `K` predictions. + /// + /// This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the + /// prediction for the target class is among the top `k` predictions among + /// all predictions for example `i`. Note that the behavior of `InTopK` differs + /// from the `TopK` op in its handling of ties; if multiple classes have the + /// same prediction value and straddle the top-`k` boundary, all of those + /// classes are considered to be in the top `k`. + /// + /// More formally, let + /// + /// \\(predictions_i\\) be the predictions for all classes for example `i`, + /// \\(targets_i\\) be the target class for example `i`, + /// \\(out_i\\) be the output for example `i`, + /// + /// $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$ + /// + /// - Parameters: + /// - predictions: A `batch_size` x `classes` tensor. + /// - targets: A `batch_size` vector of class ids. + /// - k: Number of top elements to look at for computing precision. + /// + /// - Output precision: Computed precision at `k` as a `bool Tensor`. + @inlinable @inline(__always) + public static func inTopKV2( + predictions: Tensor, + targets: Tensor, + k: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(predictions.handle.backend, targets.handle.backend), k.handle.backend) + { + case .XLA: + let output_device = k.device + let predictions = Tensor(copying: predictions, to: .defaultTFEager) + let targets = Tensor(copying: targets, to: .defaultTFEager) + let k = Tensor(copying: k, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.inTopKV2(predictions: predictions, targets: targets, k: k), + to: output_device) + case .TF_EAGER: + return _RawTFEager.inTopKV2(predictions: predictions, targets: targets, k: k) + } + + } + + /// A placeholder op for a value that will be fed into the computation. + /// + /// - Attrs: + /// - dtype: The type of elements in the tensor. + /// - shape: The shape of the tensor. + /// + /// - Output output: A tensor that will be provided using the infeed mechanism. + @inlinable @inline(__always) + public static func infeedDequeue( + shape: TensorShape? + ) -> Tensor { + _RawTFEager.infeedDequeue(shape: shape) + } + + /// Fetches multiple values from infeed as an XLA tuple. + /// + /// - Attrs: + /// - dtypes: The element types of each element in `outputs`. + /// - shapes: The shapes of each tensor in `outputs`. + /// + /// - Output outputs: A list of tensors that will be provided using the infeed mechanism. + @inlinable @inline(__always) + public static func infeedDequeueTuple( + shapes: [TensorShape?] + ) -> Dtypes { + _RawTFEager.infeedDequeueTuple(shapes: shapes) + } + + /// An op which feeds a single Tensor value into the computation. + /// + /// - Parameter input: A tensor that will be provided using the infeed mechanism. + /// + /// - Attrs: + /// - dtype: The type of elements in the tensor. + /// - shape: The shape of the tensor. + /// - layout: A vector holding the requested layout in minor-to-major sequence. + /// If a layout attribute is passed, but its values are all -1, the layout will + /// be computed by the infeed operation. + /// - device_ordinal: The TPU device to use. This should be -1 when the Op + /// is running on a TPU device, and >= 0 when the Op is running on the CPU + /// device. + @inlinable @inline(__always) + public static func infeedEnqueue( + _ input: Tensor, + shape: TensorShape?, + layout: [Int32], + deviceOrdinal: Int64 = -1 + ) { + _RawTFEager.infeedEnqueue(input, shape: shape, layout: layout, deviceOrdinal: deviceOrdinal) + } + + /// An op which enqueues prelinearized buffer into TPU infeed. + /// + /// - Parameter input: A variant tensor representing linearized output. + /// + /// - Attr device_ordinal: The TPU device to use. This should be -1 when the Op is running on a TPU device + /// and = 0 when the Op is running on the CPU device. + @inlinable @inline(__always) + public static func infeedEnqueuePrelinearizedBuffer( + _ input: VariantHandle, + deviceOrdinal: Int64 = -1 + ) { + _RawTFEager.infeedEnqueuePrelinearizedBuffer(input, deviceOrdinal: deviceOrdinal) + } + + /// Feeds multiple Tensor values into the computation as an XLA tuple. + /// + /// - Parameter inputs: A list of tensors that will be provided using the infeed mechanism. + /// + /// - Attrs: + /// - dtypes: The element types of each element in `inputs`. + /// - shapes: The shapes of each tensor in `inputs`. + /// - layouts: A vector holding the requested layout in minor-to-major sequence for + /// all the tuple shapes, in the order the shapes appear in the "shapes" input. + /// The layout elements for a sub-shape can be set to -1, in which case the + /// corresponding layout will be computed by the infeed operation. + /// - device_ordinal: The TPU device to use. This should be -1 when the Op + /// is running on a TPU device, and >= 0 when the Op is running on the CPU + /// device. + @inlinable @inline(__always) + public static func infeedEnqueueTuple( + inputs: Dtypes, + shapes: [TensorShape?], + layouts: [Int32], + deviceOrdinal: Int64 = -1 + ) { + _RawTFEager.infeedEnqueueTuple( + inputs: inputs, shapes: shapes, layouts: layouts, deviceOrdinal: deviceOrdinal) + } + + @inlinable @inline(__always) + public static func initializeTRTResource( + resourceHandle: ResourceHandle, + filename: StringTensor, + maxCachedEnginesCount: Int64 = 1 + ) { + _RawTFEager.initializeTRTResource( + resourceHandle: resourceHandle, filename: filename, + maxCachedEnginesCount: maxCachedEnginesCount) + } + + /// Initializes a table from a text file. + /// + /// It inserts one key-value pair into the table for each line of the file. + /// The key and value is extracted from the whole line content, elements from the + /// split line based on `delimiter` or the line number (starting from zero). + /// Where to extract the key and value from a line is specified by `key_index` and + /// `value_index`. + /// + /// - A value of -1 means use the line number(starting from zero), expects `int64`. + /// - A value of -2 means use the whole line content, expects `string`. + /// - A value >= 0 means use the index (starting at zero) of the split line based + /// on `delimiter`. + /// + /// - Parameters: + /// - table_handle: Handle to a table which will be initialized. + /// - filename: Filename of a vocabulary text file. + /// + /// - Attrs: + /// - key_index: Column index in a line to get the table `key` values from. + /// - value_index: Column index that represents information of a line to get the table + /// `value` values from. + /// - vocab_size: Number of elements of the file, use -1 if unknown. + /// - delimiter: Delimiter to separate fields in a line. + @inlinable @inline(__always) + public static func initializeTableFromTextFileV2( + tableHandle: ResourceHandle, + filename: StringTensor, + keyIndex: Int64, + valueIndex: Int64, + vocabSize: Int64 = -1, + delimiter: String = "\t" + ) { + _RawTFEager.initializeTableFromTextFileV2( + tableHandle: tableHandle, filename: filename, keyIndex: keyIndex, valueIndex: valueIndex, + vocabSize: vocabSize, delimiter: delimiter) + } + + /// Table initializer that takes two tensors for keys and values respectively. + /// + /// - Parameters: + /// - table_handle: Handle to a table which will be initialized. + /// - keys: Keys of type Tkey. + /// - values: Values of type Tval. + @inlinable @inline(__always) + public static func initializeTableV2< + Tkey: TensorFlowScalar, + Tval: TensorFlowScalar + >( + tableHandle: ResourceHandle, + keys: Tensor, + _ values: Tensor + ) { + _RawTFEager.initializeTableV2(tableHandle: tableHandle, keys: keys, values) + } + + /// Adds v into specified rows of x. + /// + /// Computes y = x; y[i, :] += v; return y. + /// + /// - Parameters: + /// - x: A `Tensor` of type T. + /// - i: A vector. Indices into the left-most dimension of `x`. + /// - v: A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. + /// + /// - Output y: A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`. + @inlinable @inline(__always) + public static func inplaceAdd( + _ x: Tensor, + i: Tensor, + v: Tensor + ) -> Tensor { + switch commonBackend(commonBackend(x.handle.backend, i.handle.backend), v.handle.backend) { + case .XLA: + let output_device = v.device + let x = Tensor(copying: x, to: .defaultTFEager) + let i = Tensor(copying: i, to: .defaultTFEager) + let v = Tensor(copying: v, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.inplaceAdd(x, i: i, v: v), to: output_device) + case .TF_EAGER: + return _RawTFEager.inplaceAdd(x, i: i, v: v) + } + + } + + /// Subtracts `v` into specified rows of `x`. + /// + /// Computes y = x; y[i, :] -= v; return y. + /// + /// - Parameters: + /// - x: A `Tensor` of type T. + /// - i: A vector. Indices into the left-most dimension of `x`. + /// - v: A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. + /// + /// - Output y: A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`. + @inlinable @inline(__always) + public static func inplaceSub( + _ x: Tensor, + i: Tensor, + v: Tensor + ) -> Tensor { + switch commonBackend(commonBackend(x.handle.backend, i.handle.backend), v.handle.backend) { + case .XLA: + let output_device = v.device + let x = Tensor(copying: x, to: .defaultTFEager) + let i = Tensor(copying: i, to: .defaultTFEager) + let v = Tensor(copying: v, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.inplaceSub(x, i: i, v: v), to: output_device) + case .TF_EAGER: + return _RawTFEager.inplaceSub(x, i: i, v: v) + } + + } + + /// Updates specified rows with values in `v`. + /// + /// Computes `x[i, :] = v; return x`. + /// + /// - Parameters: + /// - x: A tensor of type `T`. + /// - i: A vector. Indices into the left-most dimension of `x`. + /// - v: A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. + /// + /// - Output y: A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`. + @inlinable @inline(__always) + public static func inplaceUpdate( + _ x: Tensor, + i: Tensor, + v: Tensor + ) -> Tensor { + switch commonBackend(commonBackend(x.handle.backend, i.handle.backend), v.handle.backend) { + case .XLA: + let output_device = v.device + let x = Tensor(copying: x, to: .defaultTFEager) + let i = Tensor(copying: i, to: .defaultTFEager) + let v = Tensor(copying: v, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.inplaceUpdate(x, i: i, v: v), to: output_device) + case .TF_EAGER: + return _RawTFEager.inplaceUpdate(x, i: i, v: v) + } + + } + + @inlinable @inline(__always) + public static func int64Output() -> Tensor { + _RawTFEager.int64Output() + } + + @inlinable @inline(__always) + public static func intAttr( + foo: Int64 = 1 + ) -> Tensor { + _RawTFEager.intAttr(foo: foo) + } + + @inlinable @inline(__always) + public static func intInput( + _ a: Tensor + ) { + _RawTFEager.intInput(a) + } + + @inlinable @inline(__always) + public static func intInputFloatInput( + _ a: Tensor, + _ b: Tensor + ) { + _RawTFEager.intInputFloatInput(a, b) + } + + @inlinable @inline(__always) + public static func intInputIntOutput( + _ a: Tensor + ) -> Tensor { + switch a.handle.backend { + case .XLA: + let output_device = a.device + let a = Tensor(copying: a, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.intInputIntOutput(a), to: output_device) + case .TF_EAGER: + return _RawTFEager.intInputIntOutput(a) + } + + } + + @inlinable @inline(__always) + public static func intOutput() -> Tensor { + _RawTFEager.intOutput() + } + + @inlinable @inline(__always) + public static func intOutputFloatOutput() -> (a: Tensor, b: Tensor) { + _RawTFEager.intOutputFloatOutput() + } + + /// Creates a dataset that applies `f` to the outputs of `input_dataset`. + /// + /// Unlike MapDataset, the `f` in InterleaveDataset is expected to return + /// a Dataset variant, and InterleaveDataset will flatten successive + /// results into a single Dataset. Unlike FlatMapDataset, + /// InterleaveDataset will interleave sequences of up to `block_length` + /// consecutive elements from `cycle_length` input elements. + /// + /// - Attr f: A function mapping elements of `input_dataset`, concatenated with + /// `other_arguments`, to a Dataset variant that contains elements matching + /// `output_types` and `output_shapes`. + @inlinable @inline(__always) + public static func interleaveDataset< + FIn: TensorGroup, + FOut: TensorGroup, + Targuments: TensorArrayProtocol + >( + inputDataset: VariantHandle, + otherArguments: Targuments, + cycleLength: Tensor, + blockLength: Tensor, + f: (FIn) -> FOut, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.interleaveDataset( + inputDataset: inputDataset, otherArguments: otherArguments, cycleLength: cycleLength, + blockLength: blockLength, f: f, outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Computes the reciprocal of x element-wise. + /// + /// I.e., \\(y = 1 / x\\). + @inlinable @inline(__always) + public static func inv( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + let output_device = x.device + let x = Tensor(copying: x, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.inv(x), to: output_device) + case .TF_EAGER: + return _RawTFEager.inv(x) + } + + } + + /// Computes the gradient for the inverse of `x` wrt its input. + /// + /// Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy` + /// is the corresponding input gradient. + @inlinable @inline(__always) + public static func invGrad( + _ y: Tensor, + dy: Tensor + ) -> Tensor { + switch commonBackend(y.handle.backend, dy.handle.backend) { + case .XLA: + let output_device = dy.device + let y = Tensor(copying: y, to: .defaultTFEager) + let dy = Tensor(copying: dy, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.invGrad(y, dy: dy), to: output_device) + case .TF_EAGER: + return _RawTFEager.invGrad(y, dy: dy) + } + + } + + /// Invert (flip) each bit of supported types; for example, type `uint8` value 01010101 becomes 10101010. + /// + /// Flip each bit of supported types. For example, type `int8` (decimal 2) binary 00000010 becomes (decimal -3) binary 11111101. + /// This operation is performed on each element of the tensor argument `x`. + /// + /// Example: + /// ```python + /// import tensorflow as tf + /// from tensorflow.python.ops import bitwise_ops + /// + /// # flip 2 (00000010) to -3 (11111101) + /// tf.assert_equal(-3, bitwise_ops.invert(2)) + /// + /// dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64, + /// dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64] + /// + /// inputs = [0, 5, 3, 14] + /// for dtype in dtype_list: + /// # Because of issues with negative numbers, let's test this indirectly. + /// # 1. invert(a) and a = 0 + /// # 2. invert(a) or a = invert(0) + /// input_tensor = tf.constant([0, 5, 3, 14], dtype=dtype) + /// not_a_and_a, not_a_or_a, not_0 = [bitwise_ops.bitwise_and( + /// input_tensor, bitwise_ops.invert(input_tensor)), + /// bitwise_ops.bitwise_or( + /// input_tensor, bitwise_ops.invert(input_tensor)), + /// bitwise_ops.invert( + /// tf.constant(0, dtype=dtype))] + /// + /// expected = tf.constant([0, 0, 0, 0], dtype=tf.float32) + /// tf.assert_equal(tf.cast(not_a_and_a, tf.float32), expected) + /// + /// expected = tf.cast([not_0] * 4, tf.float32) + /// tf.assert_equal(tf.cast(not_a_or_a, tf.float32), expected) + /// + /// # For unsigned dtypes let's also check the result directly. + /// if dtype.is_unsigned: + /// inverted = bitwise_ops.invert(input_tensor) + /// expected = tf.constant([dtype.max - x for x in inputs], dtype=tf.float32) + /// tf.assert_equal(tf.cast(inverted, tf.float32), tf.cast(expected, tf.float32)) + /// ``` + @inlinable @inline(__always) + public static func invert( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + let output_device = x.device + let x = Tensor(copying: x, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.invert(x), to: output_device) + case .TF_EAGER: + return _RawTFEager.invert(x) + } + + } + + /// Computes the inverse permutation of a tensor. + /// + /// This operation computes the inverse of an index permutation. It takes a 1-D + /// integer tensor `x`, which represents the indices of a zero-based array, and + /// swaps each value with its index position. In other words, for an output tensor + /// `y` and an input tensor `x`, this operation computes the following: + /// + /// `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]` + /// + /// The values must include 0. There can be no duplicate values or negative values. + /// + /// For example: + /// + /// ``` + /// # tensor `x` is [3, 4, 0, 2, 1] + /// invert_permutation(x) ==> [2, 4, 3, 0, 1] + /// ``` + /// + /// - Parameter x: 1-D. + /// + /// - Output y: 1-D. + @inlinable @inline(__always) + public static func invertPermutation( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.invertPermutation(x) + case .TF_EAGER: + return _RawTFEager.invertPermutation(x) + } + + } + + /// Checks whether a tree ensemble has been initialized. + /// + /// - Parameter tree_ensemble_handle: Handle to the tree ensemble resource. + /// + /// - Output is_initialized: output boolean on whether it is initialized or not. + @inlinable @inline(__always) + public static func isBoostedTreesEnsembleInitialized( + treeEnsembleHandle: ResourceHandle + ) -> Tensor { + _RawTFEager.isBoostedTreesEnsembleInitialized(treeEnsembleHandle: treeEnsembleHandle) + } + + /// Checks whether a quantile stream has been initialized. + /// + /// An Op that checks if quantile stream resource is initialized. + /// + /// - Parameter quantile_stream_resource_handle: resource; The reference to quantile stream resource handle. + /// + /// - Output is_initialized: bool; True if the resource is initialized, False otherwise. + @inlinable @inline(__always) + public static func isBoostedTreesQuantileStreamResourceInitialized( + quantileStreamResourceHandle: ResourceHandle + ) -> Tensor { + _RawTFEager.isBoostedTreesQuantileStreamResourceInitialized( + quantileStreamResourceHandle: quantileStreamResourceHandle) + } + + /// Returns which elements of x are finite. + /// + /// @compatibility(numpy) + /// Equivalent to np.isfinite + /// @end_compatibility + /// + /// Example: + /// + /// ```python + /// x = tf.constant([5.0, 4.8, 6.8, np.inf, np.nan]) + /// tf.math.is_finite(x) ==> [True, True, True, False, False] + /// ``` + @inlinable @inline(__always) + public static func isFinite( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.isFinite(x) + case .TF_EAGER: + return _RawTFEager.isFinite(x) + } + + } + + /// Returns which elements of x are Inf. + /// + /// @compatibility(numpy) + /// Equivalent to np.isinf + /// @end_compatibility + /// + /// Example: + /// + /// ```python + /// x = tf.constant([5.0, np.inf, 6.8, np.inf]) + /// tf.math.is_inf(x) ==> [False, True, False, True] + /// ``` + @inlinable @inline(__always) + public static func isInf( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.isInf(x) + case .TF_EAGER: + return _RawTFEager.isInf(x) + } + + } + + /// Returns which elements of x are NaN. + /// + /// @compatibility(numpy) + /// Equivalent to np.isnan + /// @end_compatibility + /// + /// Example: + /// + /// ```python + /// x = tf.constant([5.0, np.nan, 6.8, np.nan, np.inf]) + /// tf.math.is_nan(x) ==> [False, True, False, True, False] + /// ``` + @inlinable @inline(__always) + public static func isNan( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.isNan(x) + case .TF_EAGER: + return _RawTFEager.isNan(x) + } + + } + + /// A container for an iterator resource. + /// + /// - Output handle: A handle to the iterator that can be passed to a "MakeIterator" + /// or "IteratorGetNext" op. + @inlinable @inline(__always) + public static func iterator( + sharedName: String, + container: String, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> ResourceHandle { + _RawTFEager.iterator( + sharedName: sharedName, container: container, outputTypes: outputTypes, + outputShapes: outputShapes) + } + + /// Converts the given string representing a handle to an iterator to a resource. + /// + /// - Parameter string_handle: A string representation of the given handle. + /// + /// - Attrs: + /// - output_types: If specified, defines the type of each tuple component in an + /// element produced by the resulting iterator. + /// - output_shapes: If specified, defines the shape of each tuple component in an + /// element produced by the resulting iterator. + /// + /// - Output resource_handle: A handle to an iterator resource. + @inlinable @inline(__always) + public static func iteratorFromStringHandle( + stringHandle: StringTensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> ResourceHandle { + _RawTFEager.iteratorFromStringHandle( + stringHandle: stringHandle, outputTypes: outputTypes, outputShapes: outputShapes) + } + + @inlinable @inline(__always) + public static func iteratorFromStringHandleV2( + stringHandle: StringTensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> ResourceHandle { + _RawTFEager.iteratorFromStringHandleV2( + stringHandle: stringHandle, outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Returns the name of the device on which `resource` has been placed. + @inlinable @inline(__always) + public static func iteratorGetDevice( + resource: ResourceHandle + ) -> StringTensor { + _RawTFEager.iteratorGetDevice(resource: resource) + } + + /// Gets the next output from the given iterator . + @inlinable @inline(__always) + public static func iteratorGetNext( + iterator: ResourceHandle, + outputShapes: [TensorShape?] + ) -> OutputTypes { + _RawTFEager.iteratorGetNext(iterator: iterator, outputShapes: outputShapes) + } + + /// Gets the next output from the given iterator as an Optional variant. + @inlinable @inline(__always) + public static func iteratorGetNextAsOptional( + iterator: ResourceHandle, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.iteratorGetNextAsOptional( + iterator: iterator, outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Gets the next output from the given iterator. + /// + /// This operation is a synchronous version IteratorGetNext. It should only be used + /// in situations where the iterator does not block the calling thread, or where + /// the calling thread is not a member of the thread pool used to execute parallel + /// operations (e.g. in eager mode). + @inlinable @inline(__always) + public static func iteratorGetNextSync( + iterator: ResourceHandle, + outputShapes: [TensorShape?] + ) -> OutputTypes { + _RawTFEager.iteratorGetNextSync(iterator: iterator, outputShapes: outputShapes) + } + + /// Converts the given `resource_handle` representing an iterator to a string. + /// + /// - Parameter resource_handle: A handle to an iterator resource. + /// + /// - Output string_handle: A string representation of the given handle. + @inlinable @inline(__always) + public static func iteratorToStringHandle( + resourceHandle: ResourceHandle + ) -> StringTensor { + _RawTFEager.iteratorToStringHandle(resourceHandle: resourceHandle) + } + + @inlinable @inline(__always) + public static func iteratorV2( + sharedName: String, + container: String, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> ResourceHandle { + _RawTFEager.iteratorV2( + sharedName: sharedName, container: container, outputTypes: outputTypes, + outputShapes: outputShapes) + } + + /// Returns the index of a data point that should be added to the seed set. + /// + /// Entries in distances are assumed to be squared distances of candidate points to + /// the already sampled centers in the seed set. The op constructs one Markov chain + /// of the k-MC^2 algorithm and returns the index of one candidate point to be added + /// as an additional cluster center. + /// + /// - Parameters: + /// - distances: Vector with squared distances to the closest previously sampled cluster center + /// for each candidate point. + /// - seed: Scalar. Seed for initializing the random number generator. + /// + /// - Output index: Scalar with the index of the sampled point. + @inlinable @inline(__always) + public static func kMC2ChainInitialization( + distances: Tensor, + seed: Tensor + ) -> Tensor { + switch commonBackend(distances.handle.backend, seed.handle.backend) { + case .XLA: + let output_device = seed.device + let distances = Tensor(copying: distances, to: .defaultTFEager) + let seed = Tensor(copying: seed, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.kMC2ChainInitialization(distances: distances, seed: seed), + to: output_device) + case .TF_EAGER: + return _RawTFEager.kMC2ChainInitialization(distances: distances, seed: seed) + } + + } + + @inlinable @inline(__always) + public static func kernelLabel() -> StringTensor { + _RawTFEager.kernelLabel() + } + + @inlinable @inline(__always) + public static func kernelLabelRequired( + _ input: Tensor + ) -> StringTensor { + _RawTFEager.kernelLabelRequired(input) + } + + /// Selects num_to_sample rows of input using the KMeans++ criterion. + /// + /// Rows of points are assumed to be input points. One row is selected at random. + /// Subsequent rows are sampled with probability proportional to the squared L2 + /// distance from the nearest row selected thus far till num_to_sample rows have + /// been sampled. + /// + /// - Parameters: + /// - points: Matrix of shape (n, d). Rows are assumed to be input points. + /// - num_to_sample: Scalar. The number of rows to sample. This value must not be larger than n. + /// - seed: Scalar. Seed for initializing the random number generator. + /// - num_retries_per_sample: Scalar. For each row that is sampled, this parameter + /// specifies the number of additional points to draw from the current + /// distribution before selecting the best. If a negative value is specified, a + /// heuristic is used to sample O(log(num_to_sample)) additional points. + /// + /// - Output samples: Matrix of shape (num_to_sample, d). The sampled rows. + @inlinable @inline(__always) + public static func kmeansPlusPlusInitialization( + points: Tensor, + numToSample: Tensor, + seed: Tensor, + numRetriesPerSample: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(points.handle.backend, numToSample.handle.backend), seed.handle.backend), + numRetriesPerSample.handle.backend) + { + case .XLA: + let output_device = numRetriesPerSample.device + let points = Tensor(copying: points, to: .defaultTFEager) + let numToSample = Tensor(copying: numToSample, to: .defaultTFEager) + let seed = Tensor(copying: seed, to: .defaultTFEager) + let numRetriesPerSample = Tensor(copying: numRetriesPerSample, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.kmeansPlusPlusInitialization( + points: points, numToSample: numToSample, seed: seed, + numRetriesPerSample: numRetriesPerSample), to: output_device) + case .TF_EAGER: + return _RawTFEager.kmeansPlusPlusInitialization( + points: points, numToSample: numToSample, seed: seed, + numRetriesPerSample: numRetriesPerSample) + } + + } + + /// L2 Loss. + /// + /// Computes half the L2 norm of a tensor without the `sqrt`: + /// + /// output = sum(t ** 2) / 2 + /// + /// - Parameter t: Typically 2-D, but may have any dimensions. + /// + /// - Output output: 0-D. + @inlinable @inline(__always) + public static func l2Loss( + t: Tensor + ) -> Tensor { + switch t.handle.backend { + case .XLA: + let output_device = t.device + let t = Tensor(copying: t, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.l2Loss(t: t), to: output_device) + case .TF_EAGER: + return _RawTFEager.l2Loss(t: t) + } + + } + + /// Creates a dataset that emits the key-value pairs in one or more LMDB files. + /// + /// The Lightning Memory-Mapped Database Manager, or LMDB, is an embedded binary + /// key-value database. This dataset can read the contents of LMDB database files, + /// the names of which generally have the `.mdb` suffix. + /// + /// Each output element consists of a key-value pair represented as a pair of + /// scalar string `Tensor`s, where the first `Tensor` contains the key and the + /// second `Tensor` contains the value. + /// + /// LMDB uses different file formats on big- and little-endian machines. + /// `LMDBDataset` can only read files in the format of the host machine. + /// + /// - Parameter filenames: A scalar or a vector containing the name(s) of the binary file(s) to be + /// read. + @inlinable @inline(__always) + public static func lMDBDataset( + filenames: StringTensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.lMDBDataset( + filenames: filenames, outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Local Response Normalization. + /// + /// The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last + /// dimension), and each vector is normalized independently. Within a given vector, + /// each component is divided by the weighted, squared sum of inputs within + /// `depth_radius`. In detail, + /// + /// sqr_sum[a, b, c, d] = + /// sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2) + /// output = input / (bias + alpha * sqr_sum) ** beta + /// + /// For details, see [Krizhevsky et al., ImageNet classification with deep + /// convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks). + /// + /// - Parameter input: 4-D. + /// + /// - Attrs: + /// - depth_radius: 0-D. Half-width of the 1-D normalization window. + /// - bias: An offset (usually positive to avoid dividing by 0). + /// - alpha: A scale factor, usually positive. + /// - beta: An exponent. + @inlinable @inline(__always) + public static func lRN( + _ input: Tensor, + depthRadius: Int64 = 5, + bias: Double = 1, + alpha: Double = 1, + beta: Double = 0.5 + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.lRN( + input, depthRadius: depthRadius, bias: bias, alpha: alpha, beta: beta), + to: output_device) + case .TF_EAGER: + return _RawTFEager.lRN( + input, depthRadius: depthRadius, bias: bias, alpha: alpha, beta: beta) + } + + } + + /// Gradients for Local Response Normalization. + /// + /// - Parameters: + /// - input_grads: 4-D with shape `[batch, height, width, channels]`. + /// - input_image: 4-D with shape `[batch, height, width, channels]`. + /// - output_image: 4-D with shape `[batch, height, width, channels]`. + /// + /// - Attrs: + /// - depth_radius: A depth radius. + /// - bias: An offset (usually > 0 to avoid dividing by 0). + /// - alpha: A scale factor, usually positive. + /// - beta: An exponent. + /// + /// - Output output: The gradients for LRN. + @inlinable @inline(__always) + public static func lRNGrad( + inputGrads: Tensor, + inputImage: Tensor, + outputImage: Tensor, + depthRadius: Int64 = 5, + bias: Double = 1, + alpha: Double = 1, + beta: Double = 0.5 + ) -> Tensor { + switch commonBackend( + commonBackend(inputGrads.handle.backend, inputImage.handle.backend), + outputImage.handle.backend) + { + case .XLA: + let output_device = outputImage.device + let inputGrads = Tensor(copying: inputGrads, to: .defaultTFEager) + let inputImage = Tensor(copying: inputImage, to: .defaultTFEager) + let outputImage = Tensor(copying: outputImage, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.lRNGrad( + inputGrads: inputGrads, inputImage: inputImage, outputImage: outputImage, + depthRadius: depthRadius, bias: bias, alpha: alpha, beta: beta), to: output_device) + case .TF_EAGER: + return _RawTFEager.lRNGrad( + inputGrads: inputGrads, inputImage: inputImage, outputImage: outputImage, + depthRadius: depthRadius, bias: bias, alpha: alpha, beta: beta) + } + + } + + /// Computes the LSTM cell forward propagation for 1 time step. + /// + /// This implementation uses 1 weight matrix and 1 bias vector, and there's an + /// optional peephole connection. + /// + /// This kernel op implements the following mathematical equations: + /// + /// ```python + /// xh = [x, h_prev] + /// [i, f, ci, o] = xh * w + b + /// f = f + forget_bias + /// + /// if not use_peephole: + /// wci = wcf = wco = 0 + /// + /// i = sigmoid(cs_prev * wci + i) + /// f = sigmoid(cs_prev * wcf + f) + /// ci = tanh(ci) + /// + /// cs = ci .* i + cs_prev .* f + /// cs = clip(cs, cell_clip) + /// + /// o = sigmoid(cs * wco + o) + /// co = tanh(cs) + /// h = co .* o + /// ``` + /// + /// - Parameters: + /// - x: The input to the LSTM cell, shape (batch_size, num_inputs). + /// - cs_prev: Value of the cell state at previous time step. + /// - h_prev: Output of the previous cell at previous time step. + /// - w: The weight matrix. + /// - wci: The weight matrix for input gate peephole connection. + /// - wcf: The weight matrix for forget gate peephole connection. + /// - wco: The weight matrix for output gate peephole connection. + /// - b: The bias vector. + /// + /// - Attrs: + /// - forget_bias: The forget gate bias. + /// - cell_clip: Value to clip the 'cs' value to. + /// - use_peephole: Whether to use peephole weights. + /// + /// - Outputs: + /// - i: The input gate. + /// - cs: The cell state before the tanh. + /// - f: The forget gate. + /// - o: The output gate. + /// - ci: The cell input. + /// - co: The cell after the tanh. + /// - h: The output h vector. + @inlinable @inline(__always) + public static func lSTMBlockCell( + _ x: Tensor, + csPrev: Tensor, + hPrev: Tensor, + w: Tensor, + wci: Tensor, + wcf: Tensor, + wco: Tensor, + _ b: Tensor, + forgetBias: Double = 1, + cellClip: Double = 3, + usePeephole: Bool = false + ) -> ( + i: Tensor, cs: Tensor, f: Tensor, o: Tensor, ci: Tensor, co: Tensor, + h: Tensor + ) { + _RawTFEager.lSTMBlockCell( + x, csPrev: csPrev, hPrev: hPrev, w: w, wci: wci, wcf: wcf, wco: wco, b, + forgetBias: forgetBias, cellClip: cellClip, usePeephole: usePeephole) + } + + /// Computes the LSTM cell backward propagation for 1 timestep. + /// + /// This implementation is to be used in conjunction of LSTMBlockCell. + /// + /// - Parameters: + /// - x: The input to the LSTM cell, shape (batch_size, num_inputs). + /// - cs_prev: The previous cell state. + /// - h_prev: The previous h state. + /// - w: The weight matrix. + /// - wci: The weight matrix for input gate peephole connection. + /// - wcf: The weight matrix for forget gate peephole connection. + /// - wco: The weight matrix for output gate peephole connection. + /// - b: The bias vector. + /// - i: The input gate. + /// - cs: The cell state before the tanh. + /// - f: The forget gate. + /// - o: The output gate. + /// - ci: The cell input. + /// - co: The cell after the tanh. + /// - cs_grad: The current gradient of cs. + /// - h_grad: The gradient of h vector. + /// + /// - Attr use_peephole: Whether the cell uses peephole connections. + /// + /// - Outputs: + /// - cs_prev_grad: The gradient of cs to be back-propped. + /// - dicfo: The derivative wrt to [i, cs, f, o]. + /// - wci_grad: The gradient for wci to be back-propped. + /// - wcf_grad: The gradient for wcf to be back-propped. + /// - wco_grad: The gradient for wco to be back-propped. + @inlinable @inline(__always) + public static func lSTMBlockCellGrad( + _ x: Tensor, + csPrev: Tensor, + hPrev: Tensor, + w: Tensor, + wci: Tensor, + wcf: Tensor, + wco: Tensor, + _ b: Tensor, + i: Tensor, + cs: Tensor, + f: Tensor, + o: Tensor, + ci: Tensor, + co: Tensor, + csGrad: Tensor, + hGrad: Tensor, + usePeephole: Bool + ) -> ( + csPrevGrad: Tensor, dicfo: Tensor, wciGrad: Tensor, wcfGrad: Tensor, + wcoGrad: Tensor + ) { + _RawTFEager.lSTMBlockCellGrad( + x, csPrev: csPrev, hPrev: hPrev, w: w, wci: wci, wcf: wcf, wco: wco, b, i: i, cs: cs, f: f, + o: o, ci: ci, co: co, csGrad: csGrad, hGrad: hGrad, usePeephole: usePeephole) + } + + /// Records the latency of producing `input_dataset` elements in a StatsAggregator. + @inlinable @inline(__always) + public static func latencyStatsDataset( + inputDataset: VariantHandle, + tag: StringTensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.latencyStatsDataset( + inputDataset: inputDataset, tag: tag, outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Computes rectified linear: `max(features, features * alpha)`. + @inlinable @inline(__always) + public static func leakyRelu( + features: Tensor, + alpha: Double = 0.2 + ) -> Tensor { + switch features.handle.backend { + case .XLA: + return _RawXLA.leakyRelu(features: features, alpha: alpha) + case .TF_EAGER: + return _RawTFEager.leakyRelu(features: features, alpha: alpha) + } + + } + + /// Computes rectified linear gradients for a LeakyRelu operation. + /// + /// - Parameters: + /// - gradients: The backpropagated gradients to the corresponding LeakyRelu operation. + /// - features: The features passed as input to the corresponding LeakyRelu operation, + /// OR the outputs of that operation (both work equivalently). + /// + /// - Output backprops: `gradients * (features > 0) + alpha * gradients * (features <= 0)`. + @inlinable @inline(__always) + public static func leakyReluGrad( + gradients: Tensor, + features: Tensor, + alpha: Double = 0.2 + ) -> Tensor { + switch commonBackend(gradients.handle.backend, features.handle.backend) { + case .XLA: + return _RawXLA.leakyReluGrad(gradients: gradients, features: features, alpha: alpha) + case .TF_EAGER: + return _RawTFEager.leakyReluGrad(gradients: gradients, features: features, alpha: alpha) + } + + } + + /// Generates labels for candidate sampling with a learned unigram distribution. + /// + /// See explanations of candidate sampling and the data formats at + /// go/candidate-sampling. + /// + /// For each batch, this op picks a single set of sampled candidate labels. + /// + /// The advantages of sampling candidates per-batch are simplicity and the + /// possibility of efficient dense matrix multiplication. The disadvantage is that + /// the sampled candidates must be chosen independently of the context and of the + /// true labels. + /// + /// - Parameter true_classes: A batch_size * num_true matrix, in which each row contains the + /// IDs of the num_true target_classes in the corresponding original label. + /// + /// - Attrs: + /// - num_true: Number of true labels per context. + /// - num_sampled: Number of candidates to randomly sample. + /// - unique: If unique is true, we sample with rejection, so that all sampled + /// candidates in a batch are unique. This requires some approximation to + /// estimate the post-rejection sampling probabilities. + /// - range_max: The sampler will sample integers from the interval [0, range_max). + /// - seed: If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// - seed2: An second seed to avoid seed collision. + /// + /// - Outputs: + /// - sampled_candidates: A vector of length num_sampled, in which each element is + /// the ID of a sampled candidate. + /// - true_expected_count: A batch_size * num_true matrix, representing + /// the number of times each candidate is expected to occur in a batch + /// of sampled candidates. If unique=true, then this is a probability. + /// - sampled_expected_count: A vector of length num_sampled, for each sampled + /// candidate representing the number of times the candidate is expected + /// to occur in a batch of sampled candidates. If unique=true, then this is a + /// probability. + @inlinable @inline(__always) + public static func learnedUnigramCandidateSampler( + trueClasses: Tensor, + numTrue: Int64, + numSampled: Int64, + unique: Bool, + rangeMax: Int64, + seed: Int64 = 0, + seed2: Int64 = 0 + ) -> ( + sampledCandidates: Tensor, trueExpectedCount: Tensor, + sampledExpectedCount: Tensor + ) { + _RawTFEager.learnedUnigramCandidateSampler( + trueClasses: trueClasses, numTrue: numTrue, numSampled: numSampled, unique: unique, + rangeMax: rangeMax, seed: seed, seed2: seed2) + } + + /// Elementwise computes the bitwise left-shift of `x` and `y`. + /// + /// If `y` is negative, or greater than or equal to the width of `x` in bits the + /// result is implementation defined. + /// + /// Example: + /// + /// ```python + /// import tensorflow as tf + /// from tensorflow.python.ops import bitwise_ops + /// import numpy as np + /// dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64] + /// + /// for dtype in dtype_list: + /// lhs = tf.constant([-1, -5, -3, -14], dtype=dtype) + /// rhs = tf.constant([5, 0, 7, 11], dtype=dtype) + /// + /// left_shift_result = bitwise_ops.left_shift(lhs, rhs) + /// + /// print(left_shift_result) + /// + /// # This will print: + /// # tf.Tensor([ -32 -5 -128 0], shape=(4,), dtype=int8) + /// # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int16) + /// # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int32) + /// # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int64) + /// + /// lhs = np.array([-2, 64, 101, 32], dtype=np.int8) + /// rhs = np.array([-1, -5, -3, -14], dtype=np.int8) + /// bitwise_ops.left_shift(lhs, rhs) + /// # + /// ``` + /// + @inlinable @inline(__always) + public static func leftShift( + _ x: Tensor, + _ y: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + let output_device = y.device + let x = Tensor(copying: x, to: .defaultTFEager) + let y = Tensor(copying: y, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.leftShift(x, y), to: output_device) + case .TF_EAGER: + return _RawTFEager.leftShift(x, y) + } + + } + + /// Returns the truth value of (x < y) element-wise. + /// + /// *NOTE*: `Less` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// Example: + /// + /// ```python + /// x = tf.constant([5, 4, 6]) + /// y = tf.constant([5]) + /// tf.math.less(x, y) ==> [False, True, False] + /// + /// x = tf.constant([5, 4, 6]) + /// y = tf.constant([5, 6, 7]) + /// tf.math.less(x, y) ==> [False, True, True] + /// ``` + @inlinable @inline(__always) + public static func less( + _ x: Tensor, + _ y: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + return _RawXLA.less(x, y) + case .TF_EAGER: + return _RawTFEager.less(x, y) + } + + } + + /// Returns the truth value of (x <= y) element-wise. + /// + /// *NOTE*: `LessEqual` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// Example: + /// + /// ```python + /// x = tf.constant([5, 4, 6]) + /// y = tf.constant([5]) + /// tf.math.less_equal(x, y) ==> [True, True, False] + /// + /// x = tf.constant([5, 4, 6]) + /// y = tf.constant([5, 6, 6]) + /// tf.math.less_equal(x, y) ==> [True, True, True] + /// ``` + @inlinable @inline(__always) + public static func lessEqual( + _ x: Tensor, + _ y: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + return _RawXLA.lessEqual(x, y) + case .TF_EAGER: + return _RawTFEager.lessEqual(x, y) + } + + } + + /// Computes the log of the absolute value of `Gamma(x)` element-wise. + /// + /// For positive numbers, this function computes log((input - 1)!) for every element in the tensor. + /// `lgamma(5) = log((5-1)!) = log(4!) = log(24) = 3.1780539` + /// + /// Example: + /// + /// ```python + /// x = tf.constant([0, 0.5, 1, 4.5, -4, -5.6]) + /// tf.math.lgamma(x) ==> [inf, 0.5723649, 0., 2.4537368, inf, -4.6477685] + /// ``` + @inlinable @inline(__always) + public static func lgamma( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + let output_device = x.device + let x = Tensor(copying: x, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.lgamma(x), to: output_device) + case .TF_EAGER: + return _RawTFEager.lgamma(x) + } + + } + + /// Generates values in an interval. + /// + /// A sequence of `num` evenly-spaced values are generated beginning at `start`. + /// If `num > 1`, the values in the sequence increase by `stop - start / num - 1`, + /// so that the last one is exactly `stop`. + /// + /// For example: + /// + /// ``` + /// tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0] + /// ``` + /// + /// - Parameters: + /// - start: 0-D tensor. First entry in the range. + /// - stop: 0-D tensor. Last entry in the range. + /// - num: 0-D tensor. Number of values to generate. + /// + /// - Output output: 1-D. The generated values. + @inlinable @inline(__always) + public static func linSpace< + T: FloatingPoint & TensorFlowScalar, + Tidx: TensorFlowIndex + >( + start: Tensor, + stop: Tensor, + num: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(start.handle.backend, stop.handle.backend), num.handle.backend) + { + case .XLA: + return _RawXLA.linSpace(start: start, stop: stop, num: num) + case .TF_EAGER: + return _RawTFEager.linSpace(start: start, stop: stop, num: num) + } + + } + + /// Computes the difference between two lists of numbers or strings. + /// + /// Given a list `x` and a list `y`, this operation returns a list `out` that + /// represents all values that are in `x` but not in `y`. The returned list `out` + /// is sorted in the same order that the numbers appear in `x` (duplicates are + /// preserved). This operation also returns a list `idx` that represents the + /// position of each `out` element in `x`. In other words: + /// + /// `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` + /// + /// For example, given this input: + /// + /// ``` + /// x = [1, 2, 3, 4, 5, 6] + /// y = [1, 3, 5] + /// ``` + /// + /// This operation would return: + /// + /// ``` + /// out ==> [2, 4, 6] + /// idx ==> [1, 3, 5] + /// ``` + /// + /// - Parameters: + /// - x: 1-D. Values to keep. + /// - y: 1-D. Values to remove. + /// + /// - Outputs: + /// - out: 1-D. Values present in `x` but not in `y`. + /// - idx: 1-D. Positions of `x` values preserved in `out`. + @inlinable @inline(__always) + public static func listDiff< + T: TensorFlowScalar, + OutIdx: TensorFlowIndex + >( + _ x: Tensor, + _ y: Tensor + ) -> (out: Tensor, idx: Tensor) { + _RawTFEager.listDiff(x, y) + } + + @inlinable @inline(__always) + public static func listInput( + _ a: [Tensor] + ) { + _RawTFEager.listInput(a) + } + + @inlinable @inline(__always) + public static func listOutput() -> T { + _RawTFEager.listOutput() + } + + /// Loads a 2-D (matrix) `Tensor` with name `old_tensor_name` from the checkpoint + /// + /// at `ckpt_path` and potentially reorders its rows and columns using the + /// specified remappings. + /// + /// Most users should use one of the wrapper initializers (such as + /// `tf.contrib.framework.load_and_remap_matrix_initializer`) instead of this + /// function directly. + /// + /// The remappings are 1-D tensors with the following properties: + /// + /// * `row_remapping` must have exactly `num_rows` entries. Row `i` of the output + /// matrix will be initialized from the row corresponding to index + /// `row_remapping[i]` in the old `Tensor` from the checkpoint. + /// * `col_remapping` must have either 0 entries (indicating that no column + /// reordering is needed) or `num_cols` entries. If specified, column `j` of the + /// output matrix will be initialized from the column corresponding to index + /// `col_remapping[j]` in the old `Tensor` from the checkpoint. + /// * A value of -1 in either of the remappings signifies a "missing" entry. In that + /// case, values from the `initializing_values` tensor will be used to fill that + /// missing row or column. If `row_remapping` has `r` missing entries and + /// `col_remapping` has `c` missing entries, then the following condition must be + /// true: + /// + /// `(r * num_cols) + (c * num_rows) - (r * c) == len(initializing_values)` + /// + /// The remapping tensors can be generated using the GenerateVocabRemapping op. + /// + /// As an example, with row_remapping = [1, 0, -1], col_remapping = [0, 2, -1], + /// initializing_values = [0.5, -0.5, 0.25, -0.25, 42], and w(i, j) representing + /// the value from row i, column j of the old tensor in the checkpoint, the output + /// matrix will look like the following: + /// + /// [[w(1, 0), w(1, 2), 0.5], + /// [w(0, 0), w(0, 2), -0.5], + /// [0.25, -0.25, 42]] + /// + /// - Parameters: + /// - ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`) from + /// which the old matrix `Tensor` will be loaded. + /// - old_tensor_name: Name of the 2-D `Tensor` to load from checkpoint. + /// - row_remapping: An int `Tensor` of row remappings (generally created by + /// `generate_vocab_remapping`). Even if no row remapping is needed, this must + /// still be an index-valued Tensor (e.g. [0, 1, 2, ...]), or a shifted + /// index-valued `Tensor` (e.g. [8, 9, 10, ...], for partitioned `Variables`). + /// - col_remapping: An int `Tensor` of column remappings (generally created by + /// `generate_vocab_remapping`). May be a size-0 `Tensor` if only row remapping + /// is to be done (e.g. column ordering is the same). + /// - initializing_values: A float `Tensor` containing values to fill in for cells + /// in the output matrix that are not loaded from the checkpoint. Length must be + /// exactly the same as the number of missing / new cells. + /// + /// - Attrs: + /// - num_rows: Number of rows (length of the 1st dimension) in the output matrix. + /// - num_cols: Number of columns (length of the 2nd dimension) in the output matrix. + /// - max_rows_in_memory: The maximum number of rows to load from the checkpoint at + /// once. If less than or equal to 0, the entire matrix will be loaded into + /// memory. Setting this arg trades increased disk reads for lower memory usage. + /// + /// - Output output_matrix: Output matrix containing existing values loaded from the + /// checkpoint, and with any missing values filled in from initializing_values. + @inlinable @inline(__always) + public static func loadAndRemapMatrix( + ckptPath: StringTensor, + oldTensorName: StringTensor, + rowRemapping: Tensor, + colRemapping: Tensor, + initializingValues: Tensor, + numRows: Int64, + numCols: Int64, + maxRowsInMemory: Int64 = -1 + ) -> Tensor { + switch commonBackend( + commonBackend(rowRemapping.handle.backend, colRemapping.handle.backend), + initializingValues.handle.backend) + { + case .XLA: + let output_device = initializingValues.device + let rowRemapping = Tensor(copying: rowRemapping, to: .defaultTFEager) + let colRemapping = Tensor(copying: colRemapping, to: .defaultTFEager) + let initializingValues = Tensor(copying: initializingValues, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.loadAndRemapMatrix( + ckptPath: ckptPath, oldTensorName: oldTensorName, rowRemapping: rowRemapping, + colRemapping: colRemapping, initializingValues: initializingValues, numRows: numRows, + numCols: numCols, maxRowsInMemory: maxRowsInMemory), to: output_device) + case .TF_EAGER: + return _RawTFEager.loadAndRemapMatrix( + ckptPath: ckptPath, oldTensorName: oldTensorName, rowRemapping: rowRemapping, + colRemapping: colRemapping, initializingValues: initializingValues, numRows: numRows, + numCols: numCols, maxRowsInMemory: maxRowsInMemory) + } + + } + + /// Load ADAM embedding parameters. + /// + /// An op that loads optimization parameters into HBM for embedding. Must be + /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + /// embedding table configuration. For example, this op is used to install + /// parameters that are loaded from a checkpoint before a training loop is + /// executed. + /// + /// - Parameters: + /// - parameters: Value of parameters used in the ADAM optimization algorithm. + /// - momenta: Value of momenta used in the ADAM optimization algorithm. + /// - velocities: Value of velocities used in the ADAM optimization algorithm. + @inlinable @inline(__always) + public static func loadTPUEmbeddingADAMParameters( + parameters: Tensor, + momenta: Tensor, + velocities: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) { + _RawTFEager.loadTPUEmbeddingADAMParameters( + parameters: parameters, momenta: momenta, velocities: velocities, tableId: tableId, + tableName: tableName, numShards: numShards, shardId: shardId, config: config) + } + + /// Load ADAM embedding parameters with debug support. + /// + /// An op that loads optimization parameters into HBM for embedding. Must be + /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + /// embedding table configuration. For example, this op is used to install + /// parameters that are loaded from a checkpoint before a training loop is + /// executed. + /// + /// - Parameters: + /// - parameters: Value of parameters used in the ADAM optimization algorithm. + /// - momenta: Value of momenta used in the ADAM optimization algorithm. + /// - velocities: Value of velocities used in the ADAM optimization algorithm. + /// - gradient_accumulators: Value of gradient_accumulators used in the ADAM optimization algorithm. + @inlinable @inline(__always) + public static func loadTPUEmbeddingADAMParametersGradAccumDebug( + parameters: Tensor, + momenta: Tensor, + velocities: Tensor, + gradientAccumulators: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) { + _RawTFEager.loadTPUEmbeddingADAMParametersGradAccumDebug( + parameters: parameters, momenta: momenta, velocities: velocities, + gradientAccumulators: gradientAccumulators, tableId: tableId, tableName: tableName, + numShards: numShards, shardId: shardId, config: config) + } + + /// Load Adadelta embedding parameters. + /// + /// An op that loads optimization parameters into HBM for embedding. Must be + /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + /// embedding table configuration. For example, this op is used to install + /// parameters that are loaded from a checkpoint before a training loop is + /// executed. + /// + /// - Parameters: + /// - parameters: Value of parameters used in the Adadelta optimization algorithm. + /// - accumulators: Value of accumulators used in the Adadelta optimization algorithm. + /// - updates: Value of updates used in the Adadelta optimization algorithm. + @inlinable @inline(__always) + public static func loadTPUEmbeddingAdadeltaParameters( + parameters: Tensor, + accumulators: Tensor, + updates: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) { + _RawTFEager.loadTPUEmbeddingAdadeltaParameters( + parameters: parameters, accumulators: accumulators, updates: updates, tableId: tableId, + tableName: tableName, numShards: numShards, shardId: shardId, config: config) + } + + /// Load Adadelta parameters with debug support. + /// + /// An op that loads optimization parameters into HBM for embedding. Must be + /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + /// embedding table configuration. For example, this op is used to install + /// parameters that are loaded from a checkpoint before a training loop is + /// executed. + /// + /// - Parameters: + /// - parameters: Value of parameters used in the Adadelta optimization algorithm. + /// - accumulators: Value of accumulators used in the Adadelta optimization algorithm. + /// - updates: Value of updates used in the Adadelta optimization algorithm. + /// - gradient_accumulators: Value of gradient_accumulators used in the Adadelta optimization algorithm. + @inlinable @inline(__always) + public static func loadTPUEmbeddingAdadeltaParametersGradAccumDebug( + parameters: Tensor, + accumulators: Tensor, + updates: Tensor, + gradientAccumulators: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) { + _RawTFEager.loadTPUEmbeddingAdadeltaParametersGradAccumDebug( + parameters: parameters, accumulators: accumulators, updates: updates, + gradientAccumulators: gradientAccumulators, tableId: tableId, tableName: tableName, + numShards: numShards, shardId: shardId, config: config) + } + + /// Load Adagrad embedding parameters. + /// + /// An op that loads optimization parameters into HBM for embedding. Must be + /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + /// embedding table configuration. For example, this op is used to install + /// parameters that are loaded from a checkpoint before a training loop is + /// executed. + /// + /// - Parameters: + /// - parameters: Value of parameters used in the Adagrad optimization algorithm. + /// - accumulators: Value of accumulators used in the Adagrad optimization algorithm. + @inlinable @inline(__always) + public static func loadTPUEmbeddingAdagradParameters( + parameters: Tensor, + accumulators: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) { + _RawTFEager.loadTPUEmbeddingAdagradParameters( + parameters: parameters, accumulators: accumulators, tableId: tableId, tableName: tableName, + numShards: numShards, shardId: shardId, config: config) + } + + /// Load Adagrad embedding parameters with debug support. + /// + /// An op that loads optimization parameters into HBM for embedding. Must be + /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + /// embedding table configuration. For example, this op is used to install + /// parameters that are loaded from a checkpoint before a training loop is + /// executed. + /// + /// - Parameters: + /// - parameters: Value of parameters used in the Adagrad optimization algorithm. + /// - accumulators: Value of accumulators used in the Adagrad optimization algorithm. + /// - gradient_accumulators: Value of gradient_accumulators used in the Adagrad optimization algorithm. + @inlinable @inline(__always) + public static func loadTPUEmbeddingAdagradParametersGradAccumDebug( + parameters: Tensor, + accumulators: Tensor, + gradientAccumulators: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) { + _RawTFEager.loadTPUEmbeddingAdagradParametersGradAccumDebug( + parameters: parameters, accumulators: accumulators, + gradientAccumulators: gradientAccumulators, tableId: tableId, tableName: tableName, + numShards: numShards, shardId: shardId, config: config) + } + + /// Load centered RMSProp embedding parameters. + /// + /// An op that loads optimization parameters into HBM for embedding. Must be + /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + /// embedding table configuration. For example, this op is used to install + /// parameters that are loaded from a checkpoint before a training loop is + /// executed. + /// + /// - Parameters: + /// - parameters: Value of parameters used in the centered RMSProp optimization algorithm. + /// - ms: Value of ms used in the centered RMSProp optimization algorithm. + /// - mom: Value of mom used in the centered RMSProp optimization algorithm. + /// - mg: Value of mg used in the centered RMSProp optimization algorithm. + @inlinable @inline(__always) + public static func loadTPUEmbeddingCenteredRMSPropParameters( + parameters: Tensor, + ms: Tensor, + mom: Tensor, + mg: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) { + _RawTFEager.loadTPUEmbeddingCenteredRMSPropParameters( + parameters: parameters, ms: ms, mom: mom, mg: mg, tableId: tableId, tableName: tableName, + numShards: numShards, shardId: shardId, config: config) + } + + /// Load FTRL embedding parameters. + /// + /// An op that loads optimization parameters into HBM for embedding. Must be + /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + /// embedding table configuration. For example, this op is used to install + /// parameters that are loaded from a checkpoint before a training loop is + /// executed. + /// + /// - Parameters: + /// - parameters: Value of parameters used in the FTRL optimization algorithm. + /// - accumulators: Value of accumulators used in the FTRL optimization algorithm. + /// - linears: Value of linears used in the FTRL optimization algorithm. + @inlinable @inline(__always) + public static func loadTPUEmbeddingFTRLParameters( + parameters: Tensor, + accumulators: Tensor, + linears: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) { + _RawTFEager.loadTPUEmbeddingFTRLParameters( + parameters: parameters, accumulators: accumulators, linears: linears, tableId: tableId, + tableName: tableName, numShards: numShards, shardId: shardId, config: config) + } + + /// Load FTRL embedding parameters with debug support. + /// + /// An op that loads optimization parameters into HBM for embedding. Must be + /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + /// embedding table configuration. For example, this op is used to install + /// parameters that are loaded from a checkpoint before a training loop is + /// executed. + /// + /// - Parameters: + /// - parameters: Value of parameters used in the FTRL optimization algorithm. + /// - accumulators: Value of accumulators used in the FTRL optimization algorithm. + /// - linears: Value of linears used in the FTRL optimization algorithm. + /// - gradient_accumulators: Value of gradient_accumulators used in the FTRL optimization algorithm. + @inlinable @inline(__always) + public static func loadTPUEmbeddingFTRLParametersGradAccumDebug( + parameters: Tensor, + accumulators: Tensor, + linears: Tensor, + gradientAccumulators: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) { + _RawTFEager.loadTPUEmbeddingFTRLParametersGradAccumDebug( + parameters: parameters, accumulators: accumulators, linears: linears, + gradientAccumulators: gradientAccumulators, tableId: tableId, tableName: tableName, + numShards: numShards, shardId: shardId, config: config) + } + + /// Load MDL Adagrad Light embedding parameters. + /// + /// An op that loads optimization parameters into HBM for embedding. Must be + /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + /// embedding table configuration. For example, this op is used to install + /// parameters that are loaded from a checkpoint before a training loop is + /// executed. + /// + /// - Parameters: + /// - parameters: Value of parameters used in the MDL Adagrad Light optimization algorithm. + /// - accumulators: Value of accumulators used in the MDL Adagrad Light optimization algorithm. + /// - weights: Value of weights used in the MDL Adagrad Light optimization algorithm. + /// - benefits: Value of benefits used in the MDL Adagrad Light optimization algorithm. + @inlinable @inline(__always) + public static func loadTPUEmbeddingMDLAdagradLightParameters( + parameters: Tensor, + accumulators: Tensor, + weights: Tensor, + benefits: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) { + _RawTFEager.loadTPUEmbeddingMDLAdagradLightParameters( + parameters: parameters, accumulators: accumulators, weights: weights, benefits: benefits, + tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, + config: config) + } + + /// Load Momentum embedding parameters. + /// + /// An op that loads optimization parameters into HBM for embedding. Must be + /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + /// embedding table configuration. For example, this op is used to install + /// parameters that are loaded from a checkpoint before a training loop is + /// executed. + /// + /// - Parameters: + /// - parameters: Value of parameters used in the Momentum optimization algorithm. + /// - momenta: Value of momenta used in the Momentum optimization algorithm. + @inlinable @inline(__always) + public static func loadTPUEmbeddingMomentumParameters( + parameters: Tensor, + momenta: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) { + _RawTFEager.loadTPUEmbeddingMomentumParameters( + parameters: parameters, momenta: momenta, tableId: tableId, tableName: tableName, + numShards: numShards, shardId: shardId, config: config) + } + + /// Load Momentum embedding parameters with debug support. + /// + /// An op that loads optimization parameters into HBM for embedding. Must be + /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + /// embedding table configuration. For example, this op is used to install + /// parameters that are loaded from a checkpoint before a training loop is + /// executed. + /// + /// - Parameters: + /// - parameters: Value of parameters used in the Momentum optimization algorithm. + /// - momenta: Value of momenta used in the Momentum optimization algorithm. + /// - gradient_accumulators: Value of gradient_accumulators used in the Momentum optimization algorithm. + @inlinable @inline(__always) + public static func loadTPUEmbeddingMomentumParametersGradAccumDebug( + parameters: Tensor, + momenta: Tensor, + gradientAccumulators: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) { + _RawTFEager.loadTPUEmbeddingMomentumParametersGradAccumDebug( + parameters: parameters, momenta: momenta, gradientAccumulators: gradientAccumulators, + tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, + config: config) + } + + /// Load proximal Adagrad embedding parameters. + /// + /// An op that loads optimization parameters into HBM for embedding. Must be + /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + /// embedding table configuration. For example, this op is used to install + /// parameters that are loaded from a checkpoint before a training loop is + /// executed. + /// + /// - Parameters: + /// - parameters: Value of parameters used in the proximal Adagrad optimization algorithm. + /// - accumulators: Value of accumulators used in the proximal Adagrad optimization algorithm. + @inlinable @inline(__always) + public static func loadTPUEmbeddingProximalAdagradParameters( + parameters: Tensor, + accumulators: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) { + _RawTFEager.loadTPUEmbeddingProximalAdagradParameters( + parameters: parameters, accumulators: accumulators, tableId: tableId, tableName: tableName, + numShards: numShards, shardId: shardId, config: config) + } + + /// Load proximal Adagrad embedding parameters with debug support. + /// + /// An op that loads optimization parameters into HBM for embedding. Must be + /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + /// embedding table configuration. For example, this op is used to install + /// parameters that are loaded from a checkpoint before a training loop is + /// executed. + /// + /// - Parameters: + /// - parameters: Value of parameters used in the proximal Adagrad optimization algorithm. + /// - accumulators: Value of accumulators used in the proximal Adagrad optimization algorithm. + /// - gradient_accumulators: Value of gradient_accumulators used in the proximal Adagrad optimization algorithm. + @inlinable @inline(__always) + public static func loadTPUEmbeddingProximalAdagradParametersGradAccumDebug( + parameters: Tensor, + accumulators: Tensor, + gradientAccumulators: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) { + _RawTFEager.loadTPUEmbeddingProximalAdagradParametersGradAccumDebug( + parameters: parameters, accumulators: accumulators, + gradientAccumulators: gradientAccumulators, tableId: tableId, tableName: tableName, + numShards: numShards, shardId: shardId, config: config) + } + + /// Load RMSProp embedding parameters. + /// + /// An op that loads optimization parameters into HBM for embedding. Must be + /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + /// embedding table configuration. For example, this op is used to install + /// parameters that are loaded from a checkpoint before a training loop is + /// executed. + /// + /// - Parameters: + /// - parameters: Value of parameters used in the RMSProp optimization algorithm. + /// - ms: Value of ms used in the RMSProp optimization algorithm. + /// - mom: Value of mom used in the RMSProp optimization algorithm. + @inlinable @inline(__always) + public static func loadTPUEmbeddingRMSPropParameters( + parameters: Tensor, + ms: Tensor, + mom: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) { + _RawTFEager.loadTPUEmbeddingRMSPropParameters( + parameters: parameters, ms: ms, mom: mom, tableId: tableId, tableName: tableName, + numShards: numShards, shardId: shardId, config: config) + } + + /// Load RMSProp embedding parameters with debug support. + /// + /// An op that loads optimization parameters into HBM for embedding. Must be + /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + /// embedding table configuration. For example, this op is used to install + /// parameters that are loaded from a checkpoint before a training loop is + /// executed. + /// + /// - Parameters: + /// - parameters: Value of parameters used in the RMSProp optimization algorithm. + /// - ms: Value of ms used in the RMSProp optimization algorithm. + /// - mom: Value of mom used in the RMSProp optimization algorithm. + /// - gradient_accumulators: Value of gradient_accumulators used in the RMSProp optimization algorithm. + @inlinable @inline(__always) + public static func loadTPUEmbeddingRMSPropParametersGradAccumDebug( + parameters: Tensor, + ms: Tensor, + mom: Tensor, + gradientAccumulators: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) { + _RawTFEager.loadTPUEmbeddingRMSPropParametersGradAccumDebug( + parameters: parameters, ms: ms, mom: mom, gradientAccumulators: gradientAccumulators, + tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, + config: config) + } + + /// Load SGD embedding parameters. + /// + /// An op that loads optimization parameters into HBM for embedding. Must be + /// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct + /// embedding table configuration. For example, this op is used to install + /// parameters that are loaded from a checkpoint before a training loop is + /// executed. + /// + /// - Parameter parameters: Value of parameters used in the stochastic gradient descent optimization algorithm. + @inlinable @inline(__always) + public static func loadTPUEmbeddingStochasticGradientDescentParameters( + parameters: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) { + _RawTFEager.loadTPUEmbeddingStochasticGradientDescentParameters( + parameters: parameters, tableId: tableId, tableName: tableName, numShards: numShards, + shardId: shardId, config: config) + } + + /// Computes natural logarithm of x element-wise. + /// + /// I.e., \\(y = \log_e x\\). + /// + /// Example: + /// + /// ```python + /// x = tf.constant([0, 0.5, 1, 5]) + /// tf.math.log(x) ==> [-inf, -0.6931472, 0. , 1.609438] + /// ``` + @inlinable @inline(__always) + public static func log( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.log(x) + case .TF_EAGER: + return _RawTFEager.log(x) + } + + } + + /// Computes natural logarithm of (1 + x) element-wise. + /// + /// I.e., \\(y = \log_e (1 + x)\\). + /// + /// Example: + /// + /// ```python + /// x = tf.constant([0, 0.5, 1, 5]) + /// tf.math.log1p(x) ==> [0., 0.4054651, 0.6931472, 1.7917595] + /// ``` + @inlinable @inline(__always) + public static func log1p( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.log1p(x) + case .TF_EAGER: + return _RawTFEager.log1p(x) + } + + } + + /// Computes the sign and the log of the absolute value of the determinant of + /// + /// one or more square matrices. + /// + /// The input is a tensor of shape `[N, M, M]` whose inner-most 2 dimensions + /// form square matrices. The outputs are two tensors containing the signs and + /// absolute values of the log determinants for all N input submatrices + /// `[..., :, :]` such that the determinant = sign*exp(log_abs_determinant). + /// The log_abs_determinant is computed as det(P)*sum(log(diag(LU))) where LU + /// is the LU decomposition of the input and P is the corresponding + /// permutation matrix. + /// + /// - Parameter input: Shape is `[N, M, M]`. + /// + /// - Outputs: + /// - sign: The signs of the log determinants of the inputs. Shape is `[N]`. + /// - log_abs_determinant: The logs of the absolute values of the determinants + /// of the N input matrices. Shape is `[N]`. + @inlinable @inline(__always) + public static func logMatrixDeterminant( + _ input: Tensor + ) -> (sign: Tensor, logAbsDeterminant: Tensor) { + _RawTFEager.logMatrixDeterminant(input) + } + + /// Computes log softmax activations. + /// + /// For each batch `i` and class `j` we have + /// + /// logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i]))) + /// + /// - Parameter logits: 2-D with shape `[batch_size, num_classes]`. + /// + /// - Output logsoftmax: Same shape as `logits`. + @inlinable @inline(__always) + public static func logSoftmax( + logits: Tensor + ) -> Tensor { + switch logits.handle.backend { + case .XLA: + return _RawXLA.logSoftmax(logits: logits) + case .TF_EAGER: + return _RawTFEager.logSoftmax(logits: logits) + } + + } + + /// Generates labels for candidate sampling with a log-uniform distribution. + /// + /// See explanations of candidate sampling and the data formats at + /// go/candidate-sampling. + /// + /// For each batch, this op picks a single set of sampled candidate labels. + /// + /// The advantages of sampling candidates per-batch are simplicity and the + /// possibility of efficient dense matrix multiplication. The disadvantage is that + /// the sampled candidates must be chosen independently of the context and of the + /// true labels. + /// + /// - Parameter true_classes: A batch_size * num_true matrix, in which each row contains the + /// IDs of the num_true target_classes in the corresponding original label. + /// + /// - Attrs: + /// - num_true: Number of true labels per context. + /// - num_sampled: Number of candidates to randomly sample. + /// - unique: If unique is true, we sample with rejection, so that all sampled + /// candidates in a batch are unique. This requires some approximation to + /// estimate the post-rejection sampling probabilities. + /// - range_max: The sampler will sample integers from the interval [0, range_max). + /// - seed: If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// - seed2: An second seed to avoid seed collision. + /// + /// - Outputs: + /// - sampled_candidates: A vector of length num_sampled, in which each element is + /// the ID of a sampled candidate. + /// - true_expected_count: A batch_size * num_true matrix, representing + /// the number of times each candidate is expected to occur in a batch + /// of sampled candidates. If unique=true, then this is a probability. + /// - sampled_expected_count: A vector of length num_sampled, for each sampled + /// candidate representing the number of times the candidate is expected + /// to occur in a batch of sampled candidates. If unique=true, then this is a + /// probability. + @inlinable @inline(__always) + public static func logUniformCandidateSampler( + trueClasses: Tensor, + numTrue: Int64, + numSampled: Int64, + unique: Bool, + rangeMax: Int64, + seed: Int64 = 0, + seed2: Int64 = 0 + ) -> ( + sampledCandidates: Tensor, trueExpectedCount: Tensor, + sampledExpectedCount: Tensor + ) { + _RawTFEager.logUniformCandidateSampler( + trueClasses: trueClasses, numTrue: numTrue, numSampled: numSampled, unique: unique, + rangeMax: rangeMax, seed: seed, seed2: seed2) + } + + /// Returns the truth value of x AND y element-wise. + /// + /// *NOTE*: `LogicalAnd` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + @inlinable @inline(__always) + public static func logicalAnd( + _ x: Tensor, + _ y: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + return _RawXLA.logicalAnd(x, y) + case .TF_EAGER: + return _RawTFEager.logicalAnd(x, y) + } + + } + + /// Returns the truth value of `NOT x` element-wise. + /// + /// - Parameter x: A `Tensor` of type `bool`. + /// + /// - Output y: A `Tensor` of type `bool` with the same shape as `x`. The logical negation of `x`. + @inlinable @inline(__always) + public static func logicalNot( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.logicalNot(x) + case .TF_EAGER: + return _RawTFEager.logicalNot(x) + } + + } + + /// Returns the truth value of x OR y element-wise. + /// + /// *NOTE*: `LogicalOr` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + @inlinable @inline(__always) + public static func logicalOr( + _ x: Tensor, + _ y: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + return _RawXLA.logicalOr(x, y) + case .TF_EAGER: + return _RawTFEager.logicalOr(x, y) + } + + } + + /// Outputs all keys and values in the table. + /// + /// - Parameter table_handle: Handle to the table. + /// + /// - Outputs: + /// - keys: Vector of all keys present in the table. + /// - values: Tensor of all values in the table. Indexed in parallel with `keys`. + @inlinable @inline(__always) + public static func lookupTableExportV2< + Tkeys: TensorFlowScalar, + Tvalues: TensorFlowScalar + >( + tableHandle: ResourceHandle + ) -> (keys: Tensor, values: Tensor) { + _RawTFEager.lookupTableExportV2(tableHandle: tableHandle) + } + + /// Looks up keys in a table, outputs the corresponding values. + /// + /// The tensor `keys` must of the same type as the keys of the table. + /// The output `values` is of the type of the table values. + /// + /// The scalar `default_value` is the value output for keys not present in the + /// table. It must also be of the same type as the table values. + /// + /// - Parameters: + /// - table_handle: Handle to the table. + /// - keys: Any shape. Keys to look up. + /// + /// - Output values: Same shape as `keys`. Values found in the table, or `default_values` + /// for missing keys. + @inlinable @inline(__always) + public static func lookupTableFindV2< + Tin: TensorFlowScalar, + Tout: TensorFlowScalar + >( + tableHandle: ResourceHandle, + keys: Tensor, + defaultValue: Tensor + ) -> Tensor { + switch commonBackend(keys.handle.backend, defaultValue.handle.backend) { + case .XLA: + let output_device = defaultValue.device + let keys = Tensor(copying: keys, to: .defaultTFEager) + let defaultValue = Tensor(copying: defaultValue, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.lookupTableFindV2( + tableHandle: tableHandle, keys: keys, defaultValue: defaultValue), to: output_device) + case .TF_EAGER: + return _RawTFEager.lookupTableFindV2( + tableHandle: tableHandle, keys: keys, defaultValue: defaultValue) + } + + } + + /// Replaces the contents of the table with the specified keys and values. + /// + /// The tensor `keys` must be of the same type as the keys of the table. + /// The tensor `values` must be of the type of the table values. + /// + /// - Parameters: + /// - table_handle: Handle to the table. + /// - keys: Any shape. Keys to look up. + /// - values: Values to associate with keys. + @inlinable @inline(__always) + public static func lookupTableImportV2< + Tin: TensorFlowScalar, + Tout: TensorFlowScalar + >( + tableHandle: ResourceHandle, + keys: Tensor, + _ values: Tensor + ) { + _RawTFEager.lookupTableImportV2(tableHandle: tableHandle, keys: keys, values) + } + + /// Updates the table to associates keys with values. + /// + /// The tensor `keys` must be of the same type as the keys of the table. + /// The tensor `values` must be of the type of the table values. + /// + /// - Parameters: + /// - table_handle: Handle to the table. + /// - keys: Any shape. Keys to look up. + /// - values: Values to associate with keys. + @inlinable @inline(__always) + public static func lookupTableInsertV2< + Tin: TensorFlowScalar, + Tout: TensorFlowScalar + >( + tableHandle: ResourceHandle, + keys: Tensor, + _ values: Tensor + ) { + _RawTFEager.lookupTableInsertV2(tableHandle: tableHandle, keys: keys, values) + } + + /// Removes keys and its associated values from a table. + /// + /// The tensor `keys` must of the same type as the keys of the table. Keys not + /// already in the table are silently ignored. + /// + /// - Parameters: + /// - table_handle: Handle to the table. + /// - keys: Any shape. Keys of the elements to remove. + @inlinable @inline(__always) + public static func lookupTableRemoveV2( + tableHandle: ResourceHandle, + keys: Tensor + ) { + _RawTFEager.lookupTableRemoveV2(tableHandle: tableHandle, keys: keys) + } + + /// Computes the number of elements in the given table. + /// + /// - Parameter table_handle: Handle to the table. + /// + /// - Output size: Scalar that contains number of elements in the table. + @inlinable @inline(__always) + public static func lookupTableSizeV2( + tableHandle: ResourceHandle + ) -> Tensor { + _RawTFEager.lookupTableSizeV2(tableHandle: tableHandle) + } + + /// Forwards the input to the output. + /// + /// This operator represents the loop termination condition used by the + /// "pivot" switches of a loop. + /// + /// - Parameter input: A boolean scalar, representing the branch predicate of the Switch op. + /// + /// - Output output: The same tensor as `input`. + @inlinable @inline(__always) + public static func loopCond( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.loopCond(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.loopCond(input) + } + + } + + /// Applies lower_bound(sorted_search_values, values) along each row. + /// + /// Each set of rows with the same index in (sorted_inputs, values) is treated + /// independently. The resulting row is the equivalent of calling + /// `np.searchsorted(sorted_inputs, values, side='left')`. + /// + /// The result is not a global index to the entire + /// `Tensor`, but rather just the index in the last dimension. + /// + /// A 2-D example: + /// sorted_sequence = [[0, 3, 9, 9, 10], + /// [1, 2, 3, 4, 5]] + /// values = [[2, 4, 9], + /// [0, 2, 6]] + /// + /// result = LowerBound(sorted_sequence, values) + /// + /// result == [[1, 2, 2], + /// [0, 1, 5]] + /// + /// - Parameters: + /// - sorted_inputs: 2-D Tensor where each row is ordered. + /// - values: 2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains + /// the values that will be searched for in `sorted_search_values`. + /// + /// - Output output: A `Tensor` with the same shape as `values`. It contains the first scalar index + /// into the last dimension where values can be inserted without changing the + /// ordered property. + @inlinable @inline(__always) + public static func lowerBound< + T: TensorFlowScalar, + OutType: TensorFlowIndex + >( + sortedInputs: Tensor, + _ values: Tensor + ) -> Tensor { + switch commonBackend(sortedInputs.handle.backend, values.handle.backend) { + case .XLA: + let output_device = values.device + let sortedInputs = Tensor(copying: sortedInputs, to: .defaultTFEager) + let values = Tensor(copying: values, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.lowerBound(sortedInputs: sortedInputs, values), to: output_device) + case .TF_EAGER: + return _RawTFEager.lowerBound(sortedInputs: sortedInputs, values) + } + + } + + /// Computes the LU decomposition of one or more square matrices. + /// + /// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + /// form square matrices. + /// + /// The input has to be invertible. + /// + /// The output consists of two tensors LU and P containing the LU decomposition + /// of all input submatrices `[..., :, :]`. LU encodes the lower triangular and + /// upper triangular factors. + /// + /// For each input submatrix of shape `[M, M]`, L is a lower triangular matrix of + /// shape `[M, M]` with unit diagonal whose entries correspond to the strictly lower + /// triangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose + /// entries correspond to the upper triangular part, including the diagonal, of LU. + /// + /// P represents a permutation matrix encoded as a list of indices each between `0` + /// and `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to + /// P, then the L, U and P satisfies P_mat * input = L * U. + /// + /// - Parameter input: A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form matrices of + /// size `[M, M]`. + /// + /// - Outputs: + /// - lu: A tensor of shape `[..., M, M]` whose strictly lower triangular part denotes the + /// lower triangular factor `L` with unit diagonal, and whose upper triangular part + /// denotes the upper triangular factor `U`. + /// - p: Permutation of the rows encoded as a list of indices in `0..M-1`. Shape is + /// `[..., M]`. + /// @compatibility(scipy) + /// Similar to `scipy.linalg.lu`, except the triangular factors `L` and `U` are + /// packed into a single tensor, the permutation is applied to `input` instead of + /// the right hand side and the permutation `P` is returned as a list of indices + /// instead of a permutation matrix. + /// @end_compatibility + @inlinable @inline(__always) + public static func lu< + T: FloatingPoint & TensorFlowScalar, + OutputIdxType: TensorFlowIndex + >( + _ input: Tensor + ) -> (lu: Tensor, p: Tensor) { + _RawTFEager.lu(input) + } + + /// Makes a new iterator from the given `dataset` and stores it in `iterator`. + /// + /// This operation may be executed multiple times. Each execution will reset the + /// iterator in `iterator` to the first element of `dataset`. + @inlinable @inline(__always) + public static func makeIterator( + dataset: VariantHandle, + iterator: ResourceHandle + ) { + _RawTFEager.makeIterator(dataset: dataset, iterator: iterator) + } + + /// Creates a dataset that fuses mapping with batching. + /// + /// Creates a dataset that applies `f` to the outputs of `input_dataset` and then + /// batches `batch_size` of them. + /// + /// Unlike a "MapDataset", which applies `f` sequentially, this dataset invokes up + /// to `batch_size * num_parallel_batches` copies of `f` in parallel. + /// + /// - Parameters: + /// - input_dataset: A variant tensor representing the input dataset. + /// - other_arguments: A list of tensors, typically values that were captured when building a closure + /// for `f`. + /// - batch_size: A scalar representing the number of elements to accumulate in a + /// batch. It determines the number of concurrent invocations of `f` that process + /// elements from `input_dataset` in parallel. + /// - num_parallel_calls: A scalar representing the maximum number of parallel invocations of the `map_fn` + /// function. Applying the `map_fn` on consecutive input elements in parallel has + /// the potential to improve input pipeline throughput. + /// - drop_remainder: A scalar representing whether the last batch should be dropped in case its size + /// is smaller than desired. + /// + /// - Attr f: A function to apply to the outputs of `input_dataset`. + @inlinable @inline(__always) + public static func mapAndBatchDataset< + FIn: TensorGroup, + FOut: TensorGroup, + Targuments: TensorArrayProtocol + >( + inputDataset: VariantHandle, + otherArguments: Targuments, + batchSize: Tensor, + numParallelCalls: Tensor, + dropRemainder: Tensor, + f: (FIn) -> FOut, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?], + preserveCardinality: Bool = false + ) -> VariantHandle { + _RawTFEager.mapAndBatchDataset( + inputDataset: inputDataset, otherArguments: otherArguments, batchSize: batchSize, + numParallelCalls: numParallelCalls, dropRemainder: dropRemainder, f: f, + outputTypes: outputTypes, outputShapes: outputShapes, + preserveCardinality: preserveCardinality) + } + + /// Op removes all elements in the underlying container. + @inlinable @inline(__always) + public static func mapClear( + capacity: Int64 = 0, + memoryLimit: Int64 = 0, + dtypes: [TensorDataType], + container: String, + sharedName: String + ) { + _RawTFEager.mapClear( + capacity: capacity, memoryLimit: memoryLimit, dtypes: dtypes, container: container, + sharedName: sharedName) + } + + /// Creates a dataset that applies `f` to the outputs of `input_dataset`. + @inlinable @inline(__always) + public static func mapDataset< + FIn: TensorGroup, + FOut: TensorGroup, + Targuments: TensorArrayProtocol + >( + inputDataset: VariantHandle, + otherArguments: Targuments, + f: (FIn) -> FOut, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?], + useInterOpParallelism: Bool = true, + preserveCardinality: Bool = false + ) -> VariantHandle { + _RawTFEager.mapDataset( + inputDataset: inputDataset, otherArguments: otherArguments, f: f, outputTypes: outputTypes, + outputShapes: outputShapes, useInterOpParallelism: useInterOpParallelism, + preserveCardinality: preserveCardinality) + } + + /// Maps a function on the list of tensors unpacked from arguments on dimension 0. + /// The function given by `f` is assumed to be stateless, and is executed + /// concurrently on all the slices; up to batch_size (i.e. the size of the 0th + /// dimension of each argument) functions will be scheduled at once. + /// + /// The `max_intra_op_parallelism` attr, which defaults to 1, can be used to + /// limit the intra op parallelism. To limit inter-op parallelism, a user can + /// set a private threadpool on the dataset using `tf.data.Options`'s + /// `ThreadingOptions`. + /// + /// Note that this op is not exposed to users directly, but is invoked in tf.data + /// rewrites. + /// + /// - Parameters: + /// - arguments: A list of tensors whose types are `Targuments`, corresponding to the inputs + /// the function should be mapped over. + /// - captured_inputs: A list of tensors whose types are `Tcaptured`, corresponding to the captured + /// inputs of the defun. + /// + /// - Attrs: + /// - Targuments: A list of types. + /// - Tcaptured: A list of types. + /// - output_types: A list of types. + /// - output_shapes: A list of shapes. + /// + /// - Output output: A list of output tensors whose types are `output_types` and whose dimensions + /// 0 are the same as the dimensions 0 of the tensors in `arguments`, and whose + /// remaining dimensions correspond to those in `output_shapes`. + @inlinable @inline(__always) + public static func mapDefun< + Targuments: TensorArrayProtocol, + Tcaptured: TensorArrayProtocol, + OutputTypes: TensorGroup, + FIn: TensorGroup, + FOut: TensorGroup + >( + arguments: Targuments, + capturedInputs: Tcaptured, + outputShapes: [TensorShape?], + f: (FIn) -> FOut, + maxIntraOpParallelism: Int64 = 1 + ) -> OutputTypes { + _RawTFEager.mapDefun( + arguments: arguments, capturedInputs: capturedInputs, outputShapes: outputShapes, f: f, + maxIntraOpParallelism: maxIntraOpParallelism) + } + + /// Op returns the number of incomplete elements in the underlying container. + @inlinable @inline(__always) + public static func mapIncompleteSize( + capacity: Int64 = 0, + memoryLimit: Int64 = 0, + dtypes: [TensorDataType], + container: String, + sharedName: String + ) -> Tensor { + _RawTFEager.mapIncompleteSize( + capacity: capacity, memoryLimit: memoryLimit, dtypes: dtypes, container: container, + sharedName: sharedName) + } + + /// Op peeks at the values at the specified key. If the + /// + /// underlying container does not contain this key + /// this op will block until it does. + @inlinable @inline(__always) + public static func mapPeek( + key: Tensor, + indices: Tensor, + capacity: Int64 = 0, + memoryLimit: Int64 = 0, + container: String, + sharedName: String + ) -> Dtypes { + _RawTFEager.mapPeek( + key: key, indices: indices, capacity: capacity, memoryLimit: memoryLimit, + container: container, sharedName: sharedName) + } + + /// Op returns the number of elements in the underlying container. + @inlinable @inline(__always) + public static func mapSize( + capacity: Int64 = 0, + memoryLimit: Int64 = 0, + dtypes: [TensorDataType], + container: String, + sharedName: String + ) -> Tensor { + _RawTFEager.mapSize( + capacity: capacity, memoryLimit: memoryLimit, dtypes: dtypes, container: container, + sharedName: sharedName) + } + + /// Stage (key, values) in the underlying container which behaves like a hashtable. + /// + /// - Parameters: + /// - key: int64 + /// - values: a list of tensors + /// dtypes A list of data types that inserted values should adhere to. + /// + /// - Attrs: + /// - capacity: Maximum number of elements in the Staging Area. If > 0, inserts + /// on the container will block when the capacity is reached. + /// - container: If non-empty, this queue is placed in the given container. Otherwise, + /// a default container is used. + /// - shared_name: It is necessary to match this name to the matching Unstage Op. + @inlinable @inline(__always) + public static func mapStage( + key: Tensor, + indices: Tensor, + _ values: FakeDtypes, + capacity: Int64 = 0, + memoryLimit: Int64 = 0, + dtypes: [TensorDataType], + container: String, + sharedName: String + ) { + _RawTFEager.mapStage( + key: key, indices: indices, values, capacity: capacity, memoryLimit: memoryLimit, + dtypes: dtypes, container: container, sharedName: sharedName) + } + + /// Op removes and returns the values associated with the key + /// + /// from the underlying container. If the underlying container + /// does not contain this key, the op will block until it does. + @inlinable @inline(__always) + public static func mapUnstage( + key: Tensor, + indices: Tensor, + capacity: Int64 = 0, + memoryLimit: Int64 = 0, + container: String, + sharedName: String + ) -> Dtypes { + _RawTFEager.mapUnstage( + key: key, indices: indices, capacity: capacity, memoryLimit: memoryLimit, + container: container, sharedName: sharedName) + } + + /// Op removes and returns a random (key, value) + /// + /// from the underlying container. If the underlying container + /// does not contain elements, the op will block until it does. + @inlinable @inline(__always) + public static func mapUnstageNoKey( + indices: Tensor, + capacity: Int64 = 0, + memoryLimit: Int64 = 0, + container: String, + sharedName: String + ) -> (key: Tensor, values: Dtypes) { + _RawTFEager.mapUnstageNoKey( + indices: indices, capacity: capacity, memoryLimit: memoryLimit, container: container, + sharedName: sharedName) + } + + /// Multiply the matrix "a" by the matrix "b". + /// + /// The inputs must be two-dimensional matrices and the inner dimension of + /// "a" (after being transposed if transpose_a is true) must match the + /// outer dimension of "b" (after being transposed if transposed_b is + /// true). + /// + /// *Note*: The default kernel implementation for MatMul on GPUs uses + /// cublas. + /// + /// - Attrs: + /// - transpose_a: If true, "a" is transposed before multiplication. + /// - transpose_b: If true, "b" is transposed before multiplication. + @inlinable @inline(__always) + public static func matMul( + _ a: Tensor, + _ b: Tensor, + transposeA: Bool = false, + transposeB: Bool = false + ) -> Tensor { + switch commonBackend(a.handle.backend, b.handle.backend) { + case .XLA: + return _RawXLA.matMul(a, b, transposeA: transposeA, transposeB: transposeB) + case .TF_EAGER: + return _RawTFEager.matMul(a, b, transposeA: transposeA, transposeB: transposeB) + } + + } + + /// Returns the set of files matching one or more glob patterns. + /// + /// Note that this routine only supports wildcard characters in the + /// basename portion of the pattern, not in the directory portion. + /// Note also that the order of filenames returned is deterministic. + /// + /// - Parameter pattern: Shell wildcard pattern(s). Scalar or vector of type string. + /// + /// - Output filenames: A vector of matching filenames. + @inlinable @inline(__always) + public static func matchingFiles( + pattern: StringTensor + ) -> StringTensor { + _RawTFEager.matchingFiles(pattern: pattern) + } + + @inlinable @inline(__always) + public static func matchingFilesDataset( + patterns: StringTensor + ) -> VariantHandle { + _RawTFEager.matchingFilesDataset(patterns: patterns) + } + + /// Copy a tensor setting everything outside a central band in each innermost matrix + /// + /// to zero. + /// + /// The `band` part is computed as follows: + /// Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a + /// tensor with the same shape where + /// + /// `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`. + /// + /// The indicator function + /// + /// `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) && + /// (num_upper < 0 || (n-m) <= num_upper)`. + /// + /// For example: + /// + /// ``` + /// # if 'input' is [[ 0, 1, 2, 3] + /// [-1, 0, 1, 2] + /// [-2, -1, 0, 1] + /// [-3, -2, -1, 0]], + /// + /// tf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3] + /// [-1, 0, 1, 2] + /// [ 0, -1, 0, 1] + /// [ 0, 0, -1, 0]], + /// + /// tf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0] + /// [-1, 0, 1, 0] + /// [-2, -1, 0, 1] + /// [ 0, -2, -1, 0]] + /// ``` + /// + /// Useful special cases: + /// + /// ``` + /// tf.matrix_band_part(input, 0, -1) ==> Upper triangular part. + /// tf.matrix_band_part(input, -1, 0) ==> Lower triangular part. + /// tf.matrix_band_part(input, 0, 0) ==> Diagonal. + /// ``` + /// + /// - Parameters: + /// - input: Rank `k` tensor. + /// - num_lower: 0-D tensor. Number of subdiagonals to keep. If negative, keep entire + /// lower triangle. + /// - num_upper: 0-D tensor. Number of superdiagonals to keep. If negative, keep + /// entire upper triangle. + /// + /// - Output band: Rank `k` tensor of the same shape as input. The extracted banded tensor. + @inlinable @inline(__always) + public static func matrixBandPart< + T: TensorFlowScalar, + Tindex: TensorFlowIndex + >( + _ input: Tensor, + numLower: Tensor, + numUpper: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(input.handle.backend, numLower.handle.backend), numUpper.handle.backend) + { + case .XLA: + let output_device = numUpper.device + let input = Tensor(copying: input, to: .defaultTFEager) + let numLower = Tensor(copying: numLower, to: .defaultTFEager) + let numUpper = Tensor(copying: numUpper, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.matrixBandPart(input, numLower: numLower, numUpper: numUpper), + to: output_device) + case .TF_EAGER: + return _RawTFEager.matrixBandPart(input, numLower: numLower, numUpper: numUpper) + } + + } + + /// Computes the determinant of one or more square matrices. + /// + /// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + /// form square matrices. The output is a tensor containing the determinants + /// for all input submatrices `[..., :, :]`. + /// + /// - Parameter input: Shape is `[..., M, M]`. + /// + /// - Output output: Shape is `[...]`. + @inlinable @inline(__always) + public static func matrixDeterminant( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.matrixDeterminant(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.matrixDeterminant(input) + } + + } + + /// Returns a batched diagonal tensor with a given batched diagonal values. + /// + /// Given a `diagonal`, this operation returns a tensor with the `diagonal` and + /// everything else padded with zeros. The diagonal is computed as follows: + /// + /// Assume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a + /// tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where: + /// + /// `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`. + /// + /// For example: + /// + /// ``` + /// # 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]] + /// + /// and diagonal.shape = (2, 4) + /// + /// tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0] + /// [0, 2, 0, 0] + /// [0, 0, 3, 0] + /// [0, 0, 0, 4]], + /// [[5, 0, 0, 0] + /// [0, 6, 0, 0] + /// [0, 0, 7, 0] + /// [0, 0, 0, 8]]] + /// + /// which has shape (2, 4, 4) + /// ``` + /// + /// - Parameter diagonal: Rank `k`, where `k >= 1`. + /// + /// - Output output: Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`. + @inlinable @inline(__always) + public static func matrixDiag( + diagonal: Tensor + ) -> Tensor { + switch diagonal.handle.backend { + case .XLA: + let output_device = diagonal.device + let diagonal = Tensor(copying: diagonal, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.matrixDiag(diagonal: diagonal), to: output_device) + case .TF_EAGER: + return _RawTFEager.matrixDiag(diagonal: diagonal) + } + + } + + /// Returns the batched diagonal part of a batched tensor. + /// + /// This operation returns a tensor with the `diagonal` part + /// of the batched `input`. The `diagonal` part is computed as follows: + /// + /// Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a + /// tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where: + /// + /// `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`. + /// + /// The input must be at least a matrix. + /// + /// For example: + /// + /// ``` + /// # 'input' is [[[1, 0, 0, 0] + /// [0, 2, 0, 0] + /// [0, 0, 3, 0] + /// [0, 0, 0, 4]], + /// [[5, 0, 0, 0] + /// [0, 6, 0, 0] + /// [0, 0, 7, 0] + /// [0, 0, 0, 8]]] + /// + /// and input.shape = (2, 4, 4) + /// + /// tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]] + /// + /// which has shape (2, 4) + /// ``` + /// + /// - Parameter input: Rank `k` tensor where `k >= 2`. + /// + /// - Output diagonal: The extracted diagonal(s) having shape + /// `diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`. + @inlinable @inline(__always) + public static func matrixDiagPart( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.matrixDiagPart(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.matrixDiagPart(input) + } + + } + + /// Returns the batched diagonal part of a batched tensor. + /// + /// Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched + /// `input`. + /// + /// Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`. + /// Let `max_diag_len` be the maximum length among all diagonals to be extracted, + /// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` + /// Let `num_diags` be the number of diagonals to extract, + /// `num_diags = k[1] - k[0] + 1`. + /// + /// If `num_diags == 1`, the output tensor is of rank `r - 1` with shape + /// `[I, J, ..., L, max_diag_len]` and values: + /// + /// ``` + /// diagonal[i, j, ..., l, n] + /// = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, + /// padding_value ; otherwise. + /// ``` + /// where `y = max(-k[1], 0)`, `x = max(k[1], 0)`. + /// + /// Otherwise, the output tensor has rank `r` with dimensions + /// `[I, J, ..., L, num_diags, max_diag_len]` with values: + /// + /// ``` + /// diagonal[i, j, ..., l, m, n] + /// = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, + /// padding_value ; otherwise. + /// ``` + /// where `d = k[1] - m`, `y = max(-d, 0)`, and `x = max(d, 0)`. + /// + /// The input must be at least a matrix. + /// + /// For example: + /// + /// ``` + /// input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4) + /// [5, 6, 7, 8], + /// [9, 8, 7, 6]], + /// [[5, 4, 3, 2], + /// [1, 2, 3, 4], + /// [5, 6, 7, 8]]]) + /// + /// # A main diagonal from each batch. + /// tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3) + /// [5, 2, 7]] + /// + /// # A superdiagonal from each batch. + /// tf.matrix_diag_part(input, k = 1) + /// ==> [[2, 7, 6], # Output shape: (2, 3) + /// [4, 3, 8]] + /// + /// # A tridiagonal band from each batch. + /// tf.matrix_diag_part(input, k = (-1, 1)) + /// ==> [[[2, 7, 6], # Output shape: (2, 3, 3) + /// [1, 6, 7], + /// [5, 8, 0]], + /// [[4, 3, 8], + /// [5, 2, 7], + /// [1, 6, 0]]] + /// + /// # Padding value = 9 + /// tf.matrix_diag_part(input, k = (1, 3), padding_value = 9) + /// ==> [[[4, 9, 9], # Output shape: (2, 3, 3) + /// [3, 8, 9], + /// [2, 7, 6]], + /// [[2, 9, 9], + /// [3, 4, 9], + /// [4, 3, 8]]] + /// ``` + /// + /// - Parameters: + /// - input: Rank `r` tensor where `r >= 2`. + /// - k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main + /// diagonal, and negative value means subdiagonals. `k` can be a single integer + /// (for a single diagonal) or a pair of integers specifying the low and high ends + /// of a matrix band. `k[0]` must not be larger than `k[1]`. + /// - padding_value: The value to fill the area outside the specified diagonal band with. + /// Default is 0. + /// + /// - Output diagonal: The extracted diagonal(s). + @inlinable @inline(__always) + public static func matrixDiagPartV2( + _ input: Tensor, + k: Tensor, + paddingValue: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(input.handle.backend, k.handle.backend), paddingValue.handle.backend) + { + case .XLA: + let output_device = paddingValue.device + let input = Tensor(copying: input, to: .defaultTFEager) + let k = Tensor(copying: k, to: .defaultTFEager) + let paddingValue = Tensor(copying: paddingValue, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.matrixDiagPartV2(input, k: k, paddingValue: paddingValue), + to: output_device) + case .TF_EAGER: + return _RawTFEager.matrixDiagPartV2(input, k: k, paddingValue: paddingValue) + } + + } + + /// Returns a batched diagonal tensor with given batched diagonal values. + /// + /// Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th + /// diagonals of a matrix, with everything else padded with `padding`. `num_rows` + /// and `num_cols` specify the dimension of the innermost matrix of the output. If + /// both are not specified, the op assumes the innermost matrix is square and infers + /// its size from `k` and the innermost dimension of `diagonal`. If only one of them + /// is specified, the op assumes the unspecified value is the smallest possible + /// based on other criteria. + /// + /// Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has + /// rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one + /// diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank + /// `r` with shape `[I, J, ..., L, num_rows, num_cols]`. + /// + /// The second innermost dimension of `diagonal` has double meaning. + /// When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size + /// [I, J, ..., M], and the output tensor is: + /// + /// ``` + /// output[i, j, ..., l, m, n] + /// = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper + /// padding_value ; otherwise + /// ``` + /// + /// Otherwise, `M` is treated as the number of diagonals for the matrix in the + /// same batch (`M = k[1]-k[0]+1`), and the output tensor is: + /// + /// ``` + /// output[i, j, ..., l, m, n] + /// = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] + /// padding_value ; otherwise + /// ``` + /// where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`. + /// + /// For example: + /// + /// ``` + /// # The main diagonal. + /// diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4) + /// [5, 6, 7, 8]]) + /// tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4) + /// [0, 2, 0, 0], + /// [0, 0, 3, 0], + /// [0, 0, 0, 4]], + /// [[5, 0, 0, 0], + /// [0, 6, 0, 0], + /// [0, 0, 7, 0], + /// [0, 0, 0, 8]]] + /// + /// # A superdiagonal (per batch). + /// diagonal = np.array([[1, 2, 3], # Input shape: (2, 3) + /// [4, 5, 6]]) + /// tf.matrix_diag(diagonal, k = 1) + /// ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4) + /// [0, 0, 2, 0], + /// [0, 0, 0, 3], + /// [0, 0, 0, 0]], + /// [[0, 4, 0, 0], + /// [0, 0, 5, 0], + /// [0, 0, 0, 6], + /// [0, 0, 0, 0]]] + /// + /// # A band of diagonals. + /// diagonals = np.array([[[1, 2, 3], # Input shape: (2, 2, 3) + /// [4, 5, 0]], + /// [[6, 7, 9], + /// [9, 1, 0]]]) + /// tf.matrix_diag(diagonals, k = (-1, 0)) + /// ==> [[[1, 0, 0], # Output shape: (2, 3, 3) + /// [4, 2, 0], + /// [0, 5, 3]], + /// [[6, 0, 0], + /// [9, 7, 0], + /// [0, 1, 9]]] + /// + /// # Rectangular matrix. + /// diagonal = np.array([1, 2]) # Input shape: (2) + /// tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4) + /// ==> [[0, 0, 0, 0], # Output shape: (3, 4) + /// [1, 0, 0, 0], + /// [0, 2, 0, 0]] + /// + /// # Rectangular matrix with inferred num_cols and padding_value = 9. + /// tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9) + /// ==> [[9, 9], # Output shape: (3, 2) + /// [1, 9], + /// [9, 2]] + /// ``` + /// + /// - Parameters: + /// - diagonal: Rank `r`, where `r >= 1` + /// - k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main + /// diagonal, and negative value means subdiagonals. `k` can be a single integer + /// (for a single diagonal) or a pair of integers specifying the low and high ends + /// of a matrix band. `k[0]` must not be larger than `k[1]`. + /// - num_rows: The number of rows of the output matrix. If it is not provided, the op assumes + /// the output matrix is a square matrix and infers the matrix size from k and the + /// innermost dimension of `diagonal`. + /// - num_cols: The number of columns of the output matrix. If it is not provided, the op + /// assumes the output matrix is a square matrix and infers the matrix size from + /// k and the innermost dimension of `diagonal`. + /// - padding_value: The number to fill the area outside the specified diagonal band with. + /// Default is 0. + /// + /// - Output output: Has rank `r+1` when `k` is an integer or `k[0] == k[1]`, rank `r` otherwise. + @inlinable @inline(__always) + public static func matrixDiagV2( + diagonal: Tensor, + k: Tensor, + numRows: Tensor, + numCols: Tensor, + paddingValue: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend( + commonBackend(diagonal.handle.backend, k.handle.backend), numRows.handle.backend), + numCols.handle.backend), paddingValue.handle.backend) + { + case .XLA: + let output_device = paddingValue.device + let diagonal = Tensor(copying: diagonal, to: .defaultTFEager) + let k = Tensor(copying: k, to: .defaultTFEager) + let numRows = Tensor(copying: numRows, to: .defaultTFEager) + let numCols = Tensor(copying: numCols, to: .defaultTFEager) + let paddingValue = Tensor(copying: paddingValue, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.matrixDiagV2( + diagonal: diagonal, k: k, numRows: numRows, numCols: numCols, paddingValue: paddingValue + ), to: output_device) + case .TF_EAGER: + return _RawTFEager.matrixDiagV2( + diagonal: diagonal, k: k, numRows: numRows, numCols: numCols, paddingValue: paddingValue) + } + + } + + /// Deprecated, use python implementation tf.linalg.matrix_exponential. + @inlinable @inline(__always) + public static func matrixExponential( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.matrixExponential(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.matrixExponential(input) + } + + } + + /// Computes the inverse of one or more square invertible matrices or their + /// + /// adjoints (conjugate transposes). + /// + /// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + /// form square matrices. The output is a tensor of the same shape as the input + /// containing the inverse for all input submatrices `[..., :, :]`. + /// + /// The op uses LU decomposition with partial pivoting to compute the inverses. + /// + /// If a matrix is not invertible there is no guarantee what the op does. It + /// may detect the condition and raise an exception or it may simply return a + /// garbage result. + /// + /// - Parameter input: Shape is `[..., M, M]`. + /// + /// - Output output: Shape is `[..., M, M]`. + /// + /// @compatibility(numpy) + /// Equivalent to np.linalg.inv + /// @end_compatibility + @inlinable @inline(__always) + public static func matrixInverse( + _ input: Tensor, + adjoint: Bool = false + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.matrixInverse(input, adjoint: adjoint), to: output_device) + case .TF_EAGER: + return _RawTFEager.matrixInverse(input, adjoint: adjoint) + } + + } + + /// Computes the matrix logarithm of one or more square matrices: + /// + /// + /// \\(log(exp(A)) = A\\) + /// + /// This op is only defined for complex matrices. If A is positive-definite and + /// real, then casting to a complex matrix, taking the logarithm and casting back + /// to a real matrix will give the correct result. + /// + /// This function computes the matrix logarithm using the Schur-Parlett algorithm. + /// Details of the algorithm can be found in Section 11.6.2 of: + /// Nicholas J. Higham, Functions of Matrices: Theory and Computation, SIAM 2008. + /// ISBN 978-0-898716-46-7. + /// + /// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + /// form square matrices. The output is a tensor of the same shape as the input + /// containing the exponential for all input submatrices `[..., :, :]`. + /// + /// - Parameter input: Shape is `[..., M, M]`. + /// + /// - Output output: Shape is `[..., M, M]`. + /// + /// @compatibility(scipy) + /// Equivalent to scipy.linalg.logm + /// @end_compatibility + @inlinable @inline(__always) + public static func matrixLogarithm( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.matrixLogarithm(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.matrixLogarithm(input) + } + + } + + /// Returns a batched matrix tensor with new batched diagonal values. + /// + /// Given `input` and `diagonal`, this operation returns a tensor with the + /// same shape and values as `input`, except for the main diagonal of the + /// innermost matrices. These will be overwritten by the values in `diagonal`. + /// + /// The output is computed as follows: + /// + /// Assume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has + /// `k` dimensions `[I, J, K, ..., min(M, N)]`. Then the output is a + /// tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where: + /// + /// * `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`. + /// * `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`. + /// + /// - Parameters: + /// - input: Rank `k+1`, where `k >= 1`. + /// - diagonal: Rank `k`, where `k >= 1`. + /// + /// - Output output: Rank `k+1`, with `output.shape = input.shape`. + @inlinable @inline(__always) + public static func matrixSetDiag( + _ input: Tensor, + diagonal: Tensor + ) -> Tensor { + switch commonBackend(input.handle.backend, diagonal.handle.backend) { + case .XLA: + let output_device = diagonal.device + let input = Tensor(copying: input, to: .defaultTFEager) + let diagonal = Tensor(copying: diagonal, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.matrixSetDiag(input, diagonal: diagonal), to: output_device) + case .TF_EAGER: + return _RawTFEager.matrixSetDiag(input, diagonal: diagonal) + } + + } + + /// Returns a batched matrix tensor with new batched diagonal values. + /// + /// Given `input` and `diagonal`, this operation returns a tensor with the + /// same shape and values as `input`, except for the specified diagonals of the + /// innermost matrices. These will be overwritten by the values in `diagonal`. + /// + /// `input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or + /// `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`. + /// Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`. + /// `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`. + /// `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`, + /// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` + /// + /// The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`. + /// If `k` is scalar or `k[0] == k[1]`: + /// + /// ``` + /// output[i, j, ..., l, m, n] + /// = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1] + /// input[i, j, ..., l, m, n] ; otherwise + /// ``` + /// + /// Otherwise, + /// + /// ``` + /// output[i, j, ..., l, m, n] + /// = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] + /// input[i, j, ..., l, m, n] ; otherwise + /// ``` + /// where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`. + /// + /// For example: + /// + /// ``` + /// # The main diagonal. + /// input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4) + /// [7, 7, 7, 7], + /// [7, 7, 7, 7]], + /// [[7, 7, 7, 7], + /// [7, 7, 7, 7], + /// [7, 7, 7, 7]]]) + /// diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3) + /// [4, 5, 6]]) + /// tf.matrix_set_diag(diagonal) ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4) + /// [7, 2, 7, 7], + /// [7, 7, 3, 7]], + /// [[4, 7, 7, 7], + /// [7, 5, 7, 7], + /// [7, 7, 6, 7]]] + /// + /// # A superdiagonal (per batch). + /// tf.matrix_set_diag(diagonal, k = 1) + /// ==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4) + /// [7, 7, 2, 7], + /// [7, 7, 7, 3]], + /// [[7, 4, 7, 7], + /// [7, 7, 5, 7], + /// [7, 7, 7, 6]]] + /// + /// # A band of diagonals. + /// diagonals = np.array([[[1, 2, 3], # Diagonal shape: (2, 2, 3) + /// [4, 5, 0]], + /// [[6, 1, 2], + /// [3, 4, 0]]]) + /// tf.matrix_set_diag(diagonals, k = (-1, 0)) + /// ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4) + /// [4, 2, 7, 7], + /// [0, 5, 3, 7]], + /// [[6, 7, 7, 7], + /// [3, 1, 7, 7], + /// [7, 4, 2, 7]]] + /// + /// ``` + /// + /// - Parameters: + /// - input: Rank `r+1`, where `r >= 1`. + /// - diagonal: Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank `r+1`. + /// `k >= 1`. + /// - k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main + /// diagonal, and negative value means subdiagonals. `k` can be a single integer + /// (for a single diagonal) or a pair of integers specifying the low and high ends + /// of a matrix band. `k[0]` must not be larger than `k[1]`. + /// + /// - Output output: Rank `r+1`, with `output.shape = input.shape`. + @inlinable @inline(__always) + public static func matrixSetDiagV2( + _ input: Tensor, + diagonal: Tensor, + k: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(input.handle.backend, diagonal.handle.backend), k.handle.backend) + { + case .XLA: + let output_device = k.device + let input = Tensor(copying: input, to: .defaultTFEager) + let diagonal = Tensor(copying: diagonal, to: .defaultTFEager) + let k = Tensor(copying: k, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.matrixSetDiagV2(input, diagonal: diagonal, k: k), to: output_device) + case .TF_EAGER: + return _RawTFEager.matrixSetDiagV2(input, diagonal: diagonal, k: k) + } + + } + + /// Solves systems of linear equations. + /// + /// `Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + /// form square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is + /// a tensor shape `[..., M, K]`. If `adjoint` is `False` then each output matrix + /// satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. + /// If `adjoint` is `True` then each output matrix satisfies + /// `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`. + /// + /// - Parameters: + /// - matrix: Shape is `[..., M, M]`. + /// - rhs: Shape is `[..., M, K]`. + /// + /// - Attr adjoint: Boolean indicating whether to solve with `matrix` or its (block-wise) + /// adjoint. + /// + /// - Output output: Shape is `[..., M, K]`. + @inlinable @inline(__always) + public static func matrixSolve( + matrix: Tensor, + rhs: Tensor, + adjoint: Bool = false + ) -> Tensor { + switch commonBackend(matrix.handle.backend, rhs.handle.backend) { + case .XLA: + let output_device = rhs.device + let matrix = Tensor(copying: matrix, to: .defaultTFEager) + let rhs = Tensor(copying: rhs, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.matrixSolve(matrix: matrix, rhs: rhs, adjoint: adjoint), + to: output_device) + case .TF_EAGER: + return _RawTFEager.matrixSolve(matrix: matrix, rhs: rhs, adjoint: adjoint) + } + + } + + /// Solves one or more linear least-squares problems. + /// + /// `matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions + /// form real or complex matrices of size `[M, N]`. `Rhs` is a tensor of the same + /// type as `matrix` and shape `[..., M, K]`. + /// The output is a tensor shape `[..., N, K]` where each output matrix solves + /// each of the equations + /// `matrix[..., :, :]` * `output[..., :, :]` = `rhs[..., :, :]` + /// in the least squares sense. + /// + /// We use the following notation for (complex) matrix and right-hand sides + /// in the batch: + /// + /// `matrix`=\\(A \in \mathbb{C}^{m \times n}\\), + /// `rhs`=\\(B \in \mathbb{C}^{m \times k}\\), + /// `output`=\\(X \in \mathbb{C}^{n \times k}\\), + /// `l2_regularizer`=\\(\lambda \in \mathbb{R}\\). + /// + /// If `fast` is `True`, then the solution is computed by solving the normal + /// equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then + /// \\(X = (A^H A + \lambda I)^{-1} A^H B\\), which solves the least-squares + /// problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k} } ||A Z - B||_F^2 + \lambda ||Z||_F^2\\). + /// If \\(m \lt n\\) then `output` is computed as + /// \\(X = A^H (A A^H + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is the + /// minimum-norm solution to the under-determined linear system, i.e. + /// \\(X = \mathrm{argmin}_{Z \in \mathbb{C}^{n \times k} } ||Z||_F^2 \\), + /// subject to \\(A Z = B\\). Notice that the fast path is only numerically stable + /// when \\(A\\) is numerically full rank and has a condition number + /// \\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach} } }\\) or \\(\lambda\\) is + /// sufficiently large. + /// + /// If `fast` is `False` an algorithm based on the numerically robust complete + /// orthogonal decomposition is used. This computes the minimum-norm + /// least-squares solution, even when \\(A\\) is rank deficient. This path is + /// typically 6-7 times slower than the fast path. If `fast` is `False` then + /// `l2_regularizer` is ignored. + /// + /// - Parameters: + /// - matrix: Shape is `[..., M, N]`. + /// - rhs: Shape is `[..., M, K]`. + /// - l2_regularizer: Scalar tensor. + /// + /// @compatibility(numpy) + /// Equivalent to np.linalg.lstsq + /// @end_compatibility + /// + /// - Output output: Shape is `[..., N, K]`. + @inlinable @inline(__always) + public static func matrixSolveLs( + matrix: Tensor, + rhs: Tensor, + l2Regularizer: Tensor, + fast: Bool = true + ) -> Tensor { + switch commonBackend( + commonBackend(matrix.handle.backend, rhs.handle.backend), l2Regularizer.handle.backend) + { + case .XLA: + let output_device = l2Regularizer.device + let matrix = Tensor(copying: matrix, to: .defaultTFEager) + let rhs = Tensor(copying: rhs, to: .defaultTFEager) + let l2Regularizer = Tensor(copying: l2Regularizer, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.matrixSolveLs( + matrix: matrix, rhs: rhs, l2Regularizer: l2Regularizer, fast: fast), to: output_device) + case .TF_EAGER: + return _RawTFEager.matrixSolveLs( + matrix: matrix, rhs: rhs, l2Regularizer: l2Regularizer, fast: fast) + } + + } + + /// Computes the matrix square root of one or more square matrices: + /// + /// matmul(sqrtm(A), sqrtm(A)) = A + /// + /// The input matrix should be invertible. If the input matrix is real, it should + /// have no eigenvalues which are real and negative (pairs of complex conjugate + /// eigenvalues are allowed). + /// + /// The matrix square root is computed by first reducing the matrix to + /// quasi-triangular form with the real Schur decomposition. The square root + /// of the quasi-triangular matrix is then computed directly. Details of + /// the algorithm can be found in: Nicholas J. Higham, "Computing real + /// square roots of a real matrix", Linear Algebra Appl., 1987. + /// + /// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + /// form square matrices. The output is a tensor of the same shape as the input + /// containing the matrix square root for all input submatrices `[..., :, :]`. + /// + /// - Parameter input: Shape is `[..., M, M]`. + /// + /// - Output output: Shape is `[..., M, M]`. + /// + /// @compatibility(scipy) + /// Equivalent to scipy.linalg.sqrtm + /// @end_compatibility + @inlinable @inline(__always) + public static func matrixSquareRoot( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.matrixSquareRoot(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.matrixSquareRoot(input) + } + + } + + /// Solves systems of linear equations with upper or lower triangular matrices by backsubstitution. + /// + /// + /// `matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form + /// square matrices. If `lower` is `True` then the strictly upper triangular part + /// of each inner-most matrix is assumed to be zero and not accessed. + /// If `lower` is False then the strictly lower triangular part of each inner-most + /// matrix is assumed to be zero and not accessed. + /// `rhs` is a tensor of shape `[..., M, N]`. + /// + /// The output is a tensor of shape `[..., M, N]`. If `adjoint` is + /// `True` then the innermost matrices in `output` satisfy matrix equations + /// `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. + /// If `adjoint` is `False` then the strictly then the innermost matrices in + /// `output` satisfy matrix equations + /// `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`. + /// + /// Note, the batch shapes for the inputs only need to broadcast. + /// + /// Example: + /// ```python + /// + /// a = tf.constant([[3, 0, 0, 0], + /// [2, 1, 0, 0], + /// [1, 0, 1, 0], + /// [1, 1, 1, 1]], dtype=tf.float32) + /// + /// b = tf.constant([[4], + /// [2], + /// [4], + /// [2]], dtype=tf.float32) + /// + /// x = tf.linalg.triangular_solve(a, b, lower=True) + /// x + /// # + /// + /// # in python3 one can use `a@x` + /// tf.matmul(a, x) + /// # + /// ``` + /// + /// - Parameters: + /// - matrix: Shape is `[..., M, M]`. + /// - rhs: Shape is `[..., M, K]`. + /// + /// - Attrs: + /// - lower: Boolean indicating whether the innermost matrices in `matrix` are + /// lower or upper triangular. + /// - adjoint: Boolean indicating whether to solve with `matrix` or its (block-wise) + /// adjoint. + /// + /// @compatibility(numpy) + /// Equivalent to scipy.linalg.solve_triangular + /// @end_compatibility + /// + /// - Output output: Shape is `[..., M, K]`. + @inlinable @inline(__always) + public static func matrixTriangularSolve( + matrix: Tensor, + rhs: Tensor, + lower: Bool = true, + adjoint: Bool = false + ) -> Tensor { + switch commonBackend(matrix.handle.backend, rhs.handle.backend) { + case .XLA: + let output_device = rhs.device + let matrix = Tensor(copying: matrix, to: .defaultTFEager) + let rhs = Tensor(copying: rhs, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.matrixTriangularSolve( + matrix: matrix, rhs: rhs, lower: lower, adjoint: adjoint), to: output_device) + case .TF_EAGER: + return _RawTFEager.matrixTriangularSolve( + matrix: matrix, rhs: rhs, lower: lower, adjoint: adjoint) + } + + } + + /// Computes the maximum of elements across dimensions of a tensor. + /// + /// Reduces `input` along the dimensions given in `axis`. Unless + /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + /// `axis`. If `keep_dims` is true, the reduced dimensions are + /// retained with length 1. + /// + /// - Parameters: + /// - input: The tensor to reduce. + /// - reduction_indices: The dimensions to reduce. Must be in the range + /// `[-rank(input), rank(input))`. + /// + /// - Attr keep_dims: If true, retain reduced dimensions with length 1. + /// + /// - Output output: The reduced tensor. + @inlinable @inline(__always) + public static func max< + T: TensorFlowNumeric, + Tidx: TensorFlowIndex + >( + _ input: Tensor, + reductionIndices: Tensor, + keepDims: Bool = false + ) -> Tensor { + switch commonBackend(input.handle.backend, reductionIndices.handle.backend) { + case .XLA: + return _RawXLA.max(input, reductionIndices: reductionIndices, keepDims: keepDims) + case .TF_EAGER: + return _RawTFEager.max(input, reductionIndices: reductionIndices, keepDims: keepDims) + } + + } + + /// Creates a dataset that overrides the maximum intra-op parallelism. + /// + /// - Parameter max_intra_op_parallelism: Identifies the maximum intra-op parallelism to use. + @inlinable @inline(__always) + public static func maxIntraOpParallelismDataset( + inputDataset: VariantHandle, + maxIntraOpParallelism: Tensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.maxIntraOpParallelismDataset( + inputDataset: inputDataset, maxIntraOpParallelism: maxIntraOpParallelism, + outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Performs max pooling on the input. + /// + /// - Parameter input: 4-D input to pool over. + /// + /// - Attrs: + /// - ksize: The size of the window for each dimension of the input tensor. + /// - strides: The stride of the sliding window for each dimension of the + /// input tensor. + /// - padding: The type of padding algorithm to use. + /// - data_format: Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// - Output output: The max pooled output tensor. + @inlinable @inline(__always) + public static func maxPool( + _ input: Tensor, + ksize: [Int32], + strides: [Int32], + padding: Padding, + dataFormat: DataFormat2 = .nhwc + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.maxPool( + input, ksize: ksize, strides: strides, padding: padding, dataFormat: dataFormat), + to: output_device) + case .TF_EAGER: + return _RawTFEager.maxPool( + input, ksize: ksize, strides: strides, padding: padding, dataFormat: dataFormat) + } + + } + + /// Performs 3D max pooling on the input. + /// + /// - Parameter input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over. + /// + /// - Attrs: + /// - ksize: 1-D tensor of length 5. The size of the window for each dimension of + /// the input tensor. Must have `ksize[0] = ksize[4] = 1`. + /// - strides: 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. + /// - padding: The type of padding algorithm to use. + /// - data_format: The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// + /// - Output output: The max pooled output tensor. + @inlinable @inline(__always) + public static func maxPool3D( + _ input: Tensor, + ksize: [Int32], + strides: [Int32], + padding: Padding, + dataFormat: DataFormat1 = .ndhwc + ) -> Tensor { + switch input.handle.backend { + case .XLA: + return _RawXLA.maxPool3D( + input, ksize: ksize, strides: strides, padding: padding, dataFormat: dataFormat) + case .TF_EAGER: + return _RawTFEager.maxPool3D( + input, ksize: ksize, strides: strides, padding: padding, dataFormat: dataFormat) + } + + } + + /// Computes gradients of max pooling function. + /// + /// - Parameters: + /// - orig_input: The original input tensor. + /// - orig_output: The original output tensor. + /// - grad: Output backprop of shape `[batch, depth, rows, cols, channels]`. + /// + /// - Attrs: + /// - ksize: 1-D tensor of length 5. The size of the window for each dimension of + /// the input tensor. Must have `ksize[0] = ksize[4] = 1`. + /// - strides: 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. + /// - padding: The type of padding algorithm to use. + /// - data_format: The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + @inlinable @inline(__always) + public static func maxPool3DGrad< + T: FloatingPoint & TensorFlowScalar, + Tinput: FloatingPoint & TensorFlowScalar + >( + origInput: Tensor, + origOutput: Tensor, + grad: Tensor, + ksize: [Int32], + strides: [Int32], + padding: Padding, + dataFormat: DataFormat1 = .ndhwc + ) -> Tensor { + switch commonBackend( + commonBackend(origInput.handle.backend, origOutput.handle.backend), grad.handle.backend) + { + case .XLA: + return _RawXLA.maxPool3DGrad( + origInput: origInput, origOutput: origOutput, grad: grad, ksize: ksize, strides: strides, + padding: padding, dataFormat: dataFormat) + case .TF_EAGER: + return _RawTFEager.maxPool3DGrad( + origInput: origInput, origOutput: origOutput, grad: grad, ksize: ksize, strides: strides, + padding: padding, dataFormat: dataFormat) + } + + } + + /// Computes second-order gradients of the maxpooling function. + /// + /// - Parameters: + /// - orig_input: The original input tensor. + /// - orig_output: The original output tensor. + /// - grad: Output backprop of shape `[batch, depth, rows, cols, channels]`. + /// + /// - Attrs: + /// - ksize: 1-D tensor of length 5. The size of the window for each dimension of + /// the input tensor. Must have `ksize[0] = ksize[4] = 1`. + /// - strides: 1-D tensor of length 5. The stride of the sliding window for each + /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. + /// - padding: The type of padding algorithm to use. + /// - data_format: The data format of the input and output data. With the + /// default format "NDHWC", the data is stored in the order of: + /// [batch, in_depth, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCDHW", the data storage order is: + /// [batch, in_channels, in_depth, in_height, in_width]. + /// + /// - Output output: Gradients of gradients w.r.t. the input to `max_pool`. + @inlinable @inline(__always) + public static func maxPool3DGradGrad( + origInput: Tensor, + origOutput: Tensor, + grad: Tensor, + ksize: [Int32], + strides: [Int32], + padding: Padding, + dataFormat: DataFormat1 = .ndhwc + ) -> Tensor { + switch commonBackend( + commonBackend(origInput.handle.backend, origOutput.handle.backend), grad.handle.backend) + { + case .XLA: + let output_device = grad.device + let origInput = Tensor(copying: origInput, to: .defaultTFEager) + let origOutput = Tensor(copying: origOutput, to: .defaultTFEager) + let grad = Tensor(copying: grad, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.maxPool3DGradGrad( + origInput: origInput, origOutput: origOutput, grad: grad, ksize: ksize, + strides: strides, padding: padding, dataFormat: dataFormat), to: output_device) + case .TF_EAGER: + return _RawTFEager.maxPool3DGradGrad( + origInput: origInput, origOutput: origOutput, grad: grad, ksize: ksize, strides: strides, + padding: padding, dataFormat: dataFormat) + } + + } + + /// Computes gradients of the maxpooling function. + /// + /// - Parameters: + /// - orig_input: The original input tensor. + /// - orig_output: The original output tensor. + /// - grad: 4-D. Gradients w.r.t. the output of `max_pool`. + /// + /// - Attrs: + /// - ksize: The size of the window for each dimension of the input tensor. + /// - strides: The stride of the sliding window for each dimension of the + /// input tensor. + /// - padding: The type of padding algorithm to use. + /// - data_format: Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// - Output output: Gradients w.r.t. the input to `max_pool`. + @inlinable @inline(__always) + public static func maxPoolGrad( + origInput: Tensor, + origOutput: Tensor, + grad: Tensor, + ksize: [Int32], + strides: [Int32], + padding: Padding, + dataFormat: DataFormat = .nhwc + ) -> Tensor { + switch commonBackend( + commonBackend(origInput.handle.backend, origOutput.handle.backend), grad.handle.backend) + { + case .XLA: + let output_device = grad.device + let origInput = Tensor(copying: origInput, to: .defaultTFEager) + let origOutput = Tensor(copying: origOutput, to: .defaultTFEager) + let grad = Tensor(copying: grad, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.maxPoolGrad( + origInput: origInput, origOutput: origOutput, grad: grad, ksize: ksize, + strides: strides, padding: padding, dataFormat: dataFormat), to: output_device) + case .TF_EAGER: + return _RawTFEager.maxPoolGrad( + origInput: origInput, origOutput: origOutput, grad: grad, ksize: ksize, strides: strides, + padding: padding, dataFormat: dataFormat) + } + + } + + /// Computes second-order gradients of the maxpooling function. + /// + /// - Parameters: + /// - orig_input: The original input tensor. + /// - orig_output: The original output tensor. + /// - grad: 4-D. Gradients of gradients w.r.t. the input of `max_pool`. + /// + /// - Attrs: + /// - ksize: The size of the window for each dimension of the input tensor. + /// - strides: The stride of the sliding window for each dimension of the + /// input tensor. + /// - padding: The type of padding algorithm to use. + /// - data_format: Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// - Output output: Gradients of gradients w.r.t. the input to `max_pool`. + @inlinable @inline(__always) + public static func maxPoolGradGrad( + origInput: Tensor, + origOutput: Tensor, + grad: Tensor, + ksize: [Int32], + strides: [Int32], + padding: Padding, + dataFormat: DataFormat = .nhwc + ) -> Tensor { + switch commonBackend( + commonBackend(origInput.handle.backend, origOutput.handle.backend), grad.handle.backend) + { + case .XLA: + let output_device = grad.device + let origInput = Tensor(copying: origInput, to: .defaultTFEager) + let origOutput = Tensor(copying: origOutput, to: .defaultTFEager) + let grad = Tensor(copying: grad, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.maxPoolGradGrad( + origInput: origInput, origOutput: origOutput, grad: grad, ksize: ksize, + strides: strides, padding: padding, dataFormat: dataFormat), to: output_device) + case .TF_EAGER: + return _RawTFEager.maxPoolGradGrad( + origInput: origInput, origOutput: origOutput, grad: grad, ksize: ksize, strides: strides, + padding: padding, dataFormat: dataFormat) + } + + } + + /// Computes second-order gradients of the maxpooling function. + /// + /// - Parameters: + /// - orig_input: The original input tensor. + /// - orig_output: The original output tensor. + /// - grad: 4-D. Gradients of gradients w.r.t. the input of `max_pool`. + /// - ksize: The size of the window for each dimension of the input tensor. + /// - strides: The stride of the sliding window for each dimension of the + /// input tensor. + /// + /// - Attrs: + /// - padding: The type of padding algorithm to use. + /// - data_format: Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// - Output output: Gradients of gradients w.r.t. the input to `max_pool`. + @inlinable @inline(__always) + public static func maxPoolGradGradV2( + origInput: Tensor, + origOutput: Tensor, + grad: Tensor, + ksize: Tensor, + strides: Tensor, + padding: Padding, + dataFormat: DataFormat = .nhwc + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend( + commonBackend(origInput.handle.backend, origOutput.handle.backend), grad.handle.backend), + ksize.handle.backend), strides.handle.backend) + { + case .XLA: + let output_device = strides.device + let origInput = Tensor(copying: origInput, to: .defaultTFEager) + let origOutput = Tensor(copying: origOutput, to: .defaultTFEager) + let grad = Tensor(copying: grad, to: .defaultTFEager) + let ksize = Tensor(copying: ksize, to: .defaultTFEager) + let strides = Tensor(copying: strides, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.maxPoolGradGradV2( + origInput: origInput, origOutput: origOutput, grad: grad, ksize: ksize, + strides: strides, padding: padding, dataFormat: dataFormat), to: output_device) + case .TF_EAGER: + return _RawTFEager.maxPoolGradGradV2( + origInput: origInput, origOutput: origOutput, grad: grad, ksize: ksize, strides: strides, + padding: padding, dataFormat: dataFormat) + } + + } + + /// Computes second-order gradients of the maxpooling function. + /// + /// - Parameters: + /// - input: The original input. + /// - grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the + /// input of `max_pool`. + /// - argmax: The indices of the maximum values chosen for each output of `max_pool`. + /// + /// - Attrs: + /// - ksize: The size of the window for each dimension of the input tensor. + /// - strides: The stride of the sliding window for each dimension of the + /// input tensor. + /// - padding: The type of padding algorithm to use. + /// - include_batch_in_index: Whether to include batch dimension in flattened index of `argmax`. + /// + /// - Output output: Gradients of gradients w.r.t. the input of `max_pool`. + @inlinable @inline(__always) + public static func maxPoolGradGradWithArgmax< + Targmax: TensorFlowIndex, + T: TensorFlowNumeric + >( + _ input: Tensor, + grad: Tensor, + argmax: Tensor, + ksize: [Int32], + strides: [Int32], + padding: Padding, + includeBatchInIndex: Bool = false + ) -> Tensor { + switch commonBackend( + commonBackend(input.handle.backend, grad.handle.backend), argmax.handle.backend) + { + case .XLA: + let output_device = argmax.device + let input = Tensor(copying: input, to: .defaultTFEager) + let grad = Tensor(copying: grad, to: .defaultTFEager) + let argmax = Tensor(copying: argmax, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.maxPoolGradGradWithArgmax( + input, grad: grad, argmax: argmax, ksize: ksize, strides: strides, padding: padding, + includeBatchInIndex: includeBatchInIndex), to: output_device) + case .TF_EAGER: + return _RawTFEager.maxPoolGradGradWithArgmax( + input, grad: grad, argmax: argmax, ksize: ksize, strides: strides, padding: padding, + includeBatchInIndex: includeBatchInIndex) + } + + } + + /// Computes gradients of the maxpooling function. + /// + /// - Parameters: + /// - orig_input: The original input tensor. + /// - orig_output: The original output tensor. + /// - grad: 4-D. Gradients w.r.t. the output of `max_pool`. + /// - ksize: The size of the window for each dimension of the input tensor. + /// - strides: The stride of the sliding window for each dimension of the + /// input tensor. + /// + /// - Attrs: + /// - padding: The type of padding algorithm to use. + /// - data_format: Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// - Output output: Gradients w.r.t. the input to `max_pool`. + @inlinable @inline(__always) + public static func maxPoolGradV2( + origInput: Tensor, + origOutput: Tensor, + grad: Tensor, + ksize: Tensor, + strides: Tensor, + padding: Padding, + dataFormat: DataFormat = .nhwc + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend( + commonBackend(origInput.handle.backend, origOutput.handle.backend), grad.handle.backend), + ksize.handle.backend), strides.handle.backend) + { + case .XLA: + return _RawXLA.maxPoolGradV2( + origInput: origInput, origOutput: origOutput, grad: grad, ksize: ksize, strides: strides, + padding: padding, dataFormat: dataFormat) + case .TF_EAGER: + return _RawTFEager.maxPoolGradV2( + origInput: origInput, origOutput: origOutput, grad: grad, ksize: ksize, strides: strides, + padding: padding, dataFormat: dataFormat) + } + + } + + /// Computes gradients of the maxpooling function. + /// + /// - Parameters: + /// - input: The original input. + /// - grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the + /// output of `max_pool`. + /// - argmax: The indices of the maximum values chosen for each output of `max_pool`. + /// + /// - Attrs: + /// - ksize: The size of the window for each dimension of the input tensor. + /// - strides: The stride of the sliding window for each dimension of the + /// input tensor. + /// - padding: The type of padding algorithm to use. + /// - include_batch_in_index: Whether to include batch dimension in flattened index of `argmax`. + /// + /// - Output output: Gradients w.r.t. the input of `max_pool`. + @inlinable @inline(__always) + public static func maxPoolGradWithArgmax< + Targmax: TensorFlowIndex, + T: TensorFlowNumeric + >( + _ input: Tensor, + grad: Tensor, + argmax: Tensor, + ksize: [Int32], + strides: [Int32], + padding: Padding, + includeBatchInIndex: Bool = false + ) -> Tensor { + switch commonBackend( + commonBackend(input.handle.backend, grad.handle.backend), argmax.handle.backend) + { + case .XLA: + let output_device = argmax.device + let input = Tensor(copying: input, to: .defaultTFEager) + let grad = Tensor(copying: grad, to: .defaultTFEager) + let argmax = Tensor(copying: argmax, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.maxPoolGradWithArgmax( + input, grad: grad, argmax: argmax, ksize: ksize, strides: strides, padding: padding, + includeBatchInIndex: includeBatchInIndex), to: output_device) + case .TF_EAGER: + return _RawTFEager.maxPoolGradWithArgmax( + input, grad: grad, argmax: argmax, ksize: ksize, strides: strides, padding: padding, + includeBatchInIndex: includeBatchInIndex) + } + + } + + /// Performs max pooling on the input. + /// + /// - Parameters: + /// - input: 4-D input to pool over. + /// - ksize: The size of the window for each dimension of the input tensor. + /// - strides: The stride of the sliding window for each dimension of the + /// input tensor. + /// + /// - Attrs: + /// - padding: The type of padding algorithm to use. + /// - data_format: Specify the data format of the input and output data. With the + /// default format "NHWC", the data is stored in the order of: + /// [batch, in_height, in_width, in_channels]. + /// Alternatively, the format could be "NCHW", the data storage order of: + /// [batch, in_channels, in_height, in_width]. + /// + /// - Output output: The max pooled output tensor. + @inlinable @inline(__always) + public static func maxPoolV2( + _ input: Tensor, + ksize: Tensor, + strides: Tensor, + padding: Padding, + dataFormat: DataFormat2 = .nhwc + ) -> Tensor { + switch commonBackend( + commonBackend(input.handle.backend, ksize.handle.backend), strides.handle.backend) + { + case .XLA: + return _RawXLA.maxPoolV2( + input, ksize: ksize, strides: strides, padding: padding, dataFormat: dataFormat) + case .TF_EAGER: + return _RawTFEager.maxPoolV2( + input, ksize: ksize, strides: strides, padding: padding, dataFormat: dataFormat) + } + + } + + /// Performs max pooling on the input and outputs both max values and indices. + /// + /// The indices in `argmax` are flattened, so that a maximum value at position + /// `[b, y, x, c]` becomes flattened index: + /// `(y * width + x) * channels + c` if `include_batch_in_index` is False; + /// `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. + /// + /// The indices returned are always in `[0, height) x [0, width)` before flattening, + /// even if padding is involved and the mathematically correct answer is outside + /// (either negative or too large). This is a bug, but fixing it is difficult to do + /// in a safe backwards compatible way, especially due to flattening. + /// + /// - Parameter input: 4-D with shape `[batch, height, width, channels]`. Input to pool over. + /// + /// - Attrs: + /// - ksize: The size of the window for each dimension of the input tensor. + /// - strides: The stride of the sliding window for each dimension of the + /// input tensor. + /// - padding: The type of padding algorithm to use. + /// - include_batch_in_index: Whether to include batch dimension in flattened index of `argmax`. + /// + /// - Outputs: + /// - output: The max pooled output tensor. + /// - argmax: 4-D. The flattened indices of the max values chosen for each output. + @inlinable @inline(__always) + public static func maxPoolWithArgmax< + Targmax: TensorFlowIndex, + T: TensorFlowNumeric + >( + _ input: Tensor, + ksize: [Int32], + strides: [Int32], + padding: Padding, + includeBatchInIndex: Bool = false + ) -> (output: Tensor, argmax: Tensor) { + _RawTFEager.maxPoolWithArgmax( + input, ksize: ksize, strides: strides, padding: padding, + includeBatchInIndex: includeBatchInIndex) + } + + /// Returns the max of x and y (i.e. x > y ? x : y) element-wise. + /// + /// *NOTE*: `Maximum` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + @inlinable @inline(__always) + public static func maximum( + _ x: Tensor, + _ y: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + return _RawXLA.maximum(x, y) + case .TF_EAGER: + return _RawTFEager.maximum(x, y) + } + + } + + /// Computes the mean of elements across dimensions of a tensor. + /// + /// Reduces `input` along the dimensions given in `axis`. Unless + /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + /// `axis`. If `keep_dims` is true, the reduced dimensions are + /// retained with length 1. + /// + /// - Parameters: + /// - input: The tensor to reduce. + /// - reduction_indices: The dimensions to reduce. Must be in the range + /// `[-rank(input), rank(input))`. + /// + /// - Attr keep_dims: If true, retain reduced dimensions with length 1. + /// + /// - Output output: The reduced tensor. + @inlinable @inline(__always) + public static func mean< + T: TensorFlowNumeric, + Tidx: TensorFlowIndex + >( + _ input: Tensor, + reductionIndices: Tensor, + keepDims: Bool = false + ) -> Tensor { + switch commonBackend(input.handle.backend, reductionIndices.handle.backend) { + case .XLA: + return _RawXLA.mean(input, reductionIndices: reductionIndices, keepDims: keepDims) + case .TF_EAGER: + return _RawTFEager.mean(input, reductionIndices: reductionIndices, keepDims: keepDims) + } + + } + + /// Forwards the value of an available tensor from `inputs` to `output`. + /// + /// `Merge` waits for at least one of the tensors in `inputs` to become available. + /// It is usually combined with `Switch` to implement branching. + /// + /// `Merge` forwards the first tensor to become available to `output`, and sets + /// `value_index` to its index in `inputs`. + /// + /// - Parameter inputs: The input tensors, exactly one of which will become available. + /// + /// - Outputs: + /// - output: Will be set to the available input tensor. + /// - value_index: The index of the chosen input tensor in `inputs`. + @inlinable @inline(__always) + public static func merge( + inputs: [Tensor] + ) -> (output: Tensor, valueIndex: Tensor) { + _RawTFEager.merge(inputs: inputs) + } + + /// Merges summaries. + /// + /// This op creates a + /// [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) + /// protocol buffer that contains the union of all the values in the input + /// summaries. + /// + /// When the Op is run, it reports an `InvalidArgument` error if multiple values + /// in the summaries to merge use the same tag. + /// + /// - Parameter inputs: Can be of any shape. Each must contain serialized `Summary` protocol + /// buffers. + /// + /// - Output summary: Scalar. Serialized `Summary` protocol buffer. + @inlinable @inline(__always) + public static func mergeSummary( + inputs: [StringTensor] + ) -> StringTensor { + _RawTFEager.mergeSummary(inputs: inputs) + } + + /// V2 format specific: merges the metadata files of sharded checkpoints. The + /// + /// result is one logical checkpoint, with one physical metadata file and renamed + /// data files. + /// + /// Intended for "grouping" multiple checkpoints in a sharded checkpoint setup. + /// + /// If delete_old_dirs is true, attempts to delete recursively the dirname of each + /// path in the input checkpoint_prefixes. This is useful when those paths are non + /// user-facing temporary locations. + /// + /// - Parameters: + /// - checkpoint_prefixes: prefixes of V2 checkpoints to merge. + /// - destination_prefix: scalar. The desired final prefix. Allowed to be the same + /// as one of the checkpoint_prefixes. + /// + /// - Attr delete_old_dirs: see above. + @inlinable @inline(__always) + public static func mergeV2Checkpoints( + checkpointPrefixes: StringTensor, + destinationPrefix: StringTensor, + deleteOldDirs: Bool = true + ) { + _RawTFEager.mergeV2Checkpoints( + checkpointPrefixes: checkpointPrefixes, destinationPrefix: destinationPrefix, + deleteOldDirs: deleteOldDirs) + } + + /// Transforms a spectrogram into a form that's useful for speech recognition. + /// + /// Mel Frequency Cepstral Coefficients are a way of representing audio data that's + /// been effective as an input feature for machine learning. They are created by + /// taking the spectrum of a spectrogram (a 'cepstrum'), and discarding some of the + /// higher frequencies that are less significant to the human ear. They have a long + /// history in the speech recognition world, and https://en.wikipedia.org/wiki/Mel-frequency_cepstrum + /// is a good resource to learn more. + /// + /// - Parameters: + /// - spectrogram: Typically produced by the Spectrogram op, with magnitude_squared + /// set to true. + /// - sample_rate: How many samples per second the source audio used. + /// + /// - Attrs: + /// - upper_frequency_limit: The highest frequency to use when calculating the + /// ceptstrum. + /// - lower_frequency_limit: The lowest frequency to use when calculating the + /// ceptstrum. + /// - filterbank_channel_count: Resolution of the Mel bank used internally. + /// - dct_coefficient_count: How many output channels to produce per time slice. + @inlinable @inline(__always) + public static func mfcc( + spectrogram: Tensor, + sampleRate: Tensor, + upperFrequencyLimit: Double = 4000, + lowerFrequencyLimit: Double = 20, + filterbankChannelCount: Int64 = 40, + dctCoefficientCount: Int64 = 13 + ) -> Tensor { + switch commonBackend(spectrogram.handle.backend, sampleRate.handle.backend) { + case .XLA: + let output_device = sampleRate.device + let spectrogram = Tensor(copying: spectrogram, to: .defaultTFEager) + let sampleRate = Tensor(copying: sampleRate, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.mfcc( + spectrogram: spectrogram, sampleRate: sampleRate, + upperFrequencyLimit: upperFrequencyLimit, lowerFrequencyLimit: lowerFrequencyLimit, + filterbankChannelCount: filterbankChannelCount, dctCoefficientCount: dctCoefficientCount + ), to: output_device) + case .TF_EAGER: + return _RawTFEager.mfcc( + spectrogram: spectrogram, sampleRate: sampleRate, + upperFrequencyLimit: upperFrequencyLimit, lowerFrequencyLimit: lowerFrequencyLimit, + filterbankChannelCount: filterbankChannelCount, dctCoefficientCount: dctCoefficientCount) + } + + } + + /// Computes the minimum of elements across dimensions of a tensor. + /// + /// Reduces `input` along the dimensions given in `axis`. Unless + /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + /// `axis`. If `keep_dims` is true, the reduced dimensions are + /// retained with length 1. + /// + /// - Parameters: + /// - input: The tensor to reduce. + /// - reduction_indices: The dimensions to reduce. Must be in the range + /// `[-rank(input), rank(input))`. + /// + /// - Attr keep_dims: If true, retain reduced dimensions with length 1. + /// + /// - Output output: The reduced tensor. + @inlinable @inline(__always) + public static func min< + T: TensorFlowNumeric, + Tidx: TensorFlowIndex + >( + _ input: Tensor, + reductionIndices: Tensor, + keepDims: Bool = false + ) -> Tensor { + switch commonBackend(input.handle.backend, reductionIndices.handle.backend) { + case .XLA: + return _RawXLA.min(input, reductionIndices: reductionIndices, keepDims: keepDims) + case .TF_EAGER: + return _RawTFEager.min(input, reductionIndices: reductionIndices, keepDims: keepDims) + } + + } + + /// Returns the min of x and y (i.e. x < y ? x : y) element-wise. + /// + /// *NOTE*: `Minimum` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + @inlinable @inline(__always) + public static func minimum( + _ x: Tensor, + _ y: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + return _RawXLA.minimum(x, y) + case .TF_EAGER: + return _RawTFEager.minimum(x, y) + } + + } + + /// Pads a tensor with mirrored values. + /// + /// This operation pads a `input` with mirrored values according to the `paddings` + /// you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is + /// the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates + /// how many values to add before the contents of `input` in that dimension, and + /// `paddings[D, 1]` indicates how many values to add after the contents of `input` + /// in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater + /// than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true + /// (if false, respectively). + /// + /// The padded size of each dimension D of the output is: + /// + /// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` + /// + /// For example: + /// + /// ``` + /// # 't' is [[1, 2, 3], [4, 5, 6]]. + /// # 'paddings' is [[1, 1]], [2, 2]]. + /// # 'mode' is SYMMETRIC. + /// # rank of 't' is 2. + /// pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2] + /// [2, 1, 1, 2, 3, 3, 2] + /// [5, 4, 4, 5, 6, 6, 5] + /// [5, 4, 4, 5, 6, 6, 5]] + /// ``` + /// + /// - Parameters: + /// - input: The input tensor to be padded. + /// - paddings: A two-column matrix specifying the padding sizes. The number of + /// rows must be the same as the rank of `input`. + /// + /// - Attr mode: Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions + /// do not include the borders, while in symmetric mode the padded regions + /// do include the borders. For example, if `input` is `[1, 2, 3]` and `paddings` + /// is `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and + /// it is `[1, 2, 3, 3, 2]` in symmetric mode. + /// + /// - Output output: The padded tensor. + @inlinable @inline(__always) + public static func mirrorPad< + T: TensorFlowScalar, + Tpaddings: TensorFlowIndex + >( + _ input: Tensor, + paddings: Tensor, + mode: Mode1 + ) -> Tensor { + switch commonBackend(input.handle.backend, paddings.handle.backend) { + case .XLA: + return _RawXLA.mirrorPad(input, paddings: paddings, mode: mode) + case .TF_EAGER: + return _RawTFEager.mirrorPad(input, paddings: paddings, mode: mode) + } + + } + + /// Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor. + /// + /// This operation folds the padded areas of `input` by `MirrorPad` according to the + /// `paddings` you specify. `paddings` must be the same as `paddings` argument + /// given to the corresponding `MirrorPad` op. + /// + /// The folded size of each dimension D of the output is: + /// + /// `input.dim_size(D) - paddings(D, 0) - paddings(D, 1)` + /// + /// For example: + /// + /// ``` + /// # 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]]. + /// # 'paddings' is [[0, 1]], [0, 1]]. + /// # 'mode' is SYMMETRIC. + /// # rank of 't' is 2. + /// pad(t, paddings) ==> [[ 1, 5] + /// [11, 28]] + /// ``` + /// + /// - Parameters: + /// - input: The input tensor to be folded. + /// - paddings: A two-column matrix specifying the padding sizes. The number of + /// rows must be the same as the rank of `input`. + /// + /// - Attr mode: The mode used in the `MirrorPad` op. + /// + /// - Output output: The folded tensor. + @inlinable @inline(__always) + public static func mirrorPadGrad< + T: TensorFlowScalar, + Tpaddings: TensorFlowIndex + >( + _ input: Tensor, + paddings: Tensor, + mode: Mode1 + ) -> Tensor { + switch commonBackend(input.handle.backend, paddings.handle.backend) { + case .XLA: + return _RawXLA.mirrorPadGrad(input, paddings: paddings, mode: mode) + case .TF_EAGER: + return _RawTFEager.mirrorPadGrad(input, paddings: paddings, mode: mode) + } + + } + + @inlinable @inline(__always) + public static func mixedStruct( + nA: Int64 + ) -> (a: [Tensor], b: Tensor) { + _RawTFEager.mixedStruct(nA: nA) + } + + /// Wraps an arbitrary MLIR computation expressed as a module with a main() function. + /// + /// This operation does not have an associated kernel and is not intended to be + /// executed in a regular TensorFlow session. Instead it is intended to be used for + /// testing or for special case where a user intends to pass custom MLIR computation + /// through a TensorFlow graph with the intent of having custom tooling processing + /// it downstream (when targeting a different environment, like TensorFlow lite for + /// example). + /// The MLIR module is expected to have a main() function that will be used as an + /// entry point. The inputs to the operations will be passed as argument to the + /// main() function and the returned values of the main function mapped to the + /// outputs. + /// Example usage: + /// + /// ``` + /// import tensorflow as tf + /// from tensorflow.compiler.mlir.tensorflow.gen_mlir_passthrough_op import mlir_passthrough_op + /// + /// mlir_module = '''python + /// func @main(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10x10xf32> { + /// %add = "magic.op"(%arg0, %arg1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10x10xf32> + /// return %ret : tensor<10x10xf32> + /// } + /// ''' + /// + /// @tf.function + /// def foo(x, y): + /// return = mlir_passthrough_op([x, y], mlir_module, Toutputs=[tf.float32]) + /// + /// graph_def = foo.get_concrete_function(tf.TensorSpec([10], tf.float32), tf.TensorSpec([10], tf.float32)).graph.as_graph_def() + /// ``` + @inlinable @inline(__always) + public static func mlirPassthroughOp< + Tinputs: TensorArrayProtocol, + Toutputs: TensorGroup + >( + inputs: Tinputs, + mlirModule: String + ) -> Toutputs { + _RawTFEager.mlirPassthroughOp(inputs: inputs, mlirModule: mlirModule) + } + + /// Returns element-wise remainder of division. This emulates C semantics in that + /// + /// the result here is consistent with a truncating divide. E.g. + /// `tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`. + /// + /// *NOTE*: `Mod` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + @inlinable @inline(__always) + public static func mod( + _ x: Tensor, + _ y: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + return _RawXLA.mod(x, y) + case .TF_EAGER: + return _RawTFEager.mod(x, y) + } + + } + + /// Identity transformation that models performance. + /// + /// Identity transformation that models performance. + /// + /// - Parameter input_dataset: A variant tensor representing the input dataset. + @inlinable @inline(__always) + public static func modelDataset( + inputDataset: VariantHandle, + algorithm: Int64 = 0, + cpuBudget: Int64 = 0, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.modelDataset( + inputDataset: inputDataset, algorithm: algorithm, cpuBudget: cpuBudget, + outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Returns x * y element-wise. + /// + /// *NOTE*: `Multiply` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + @inlinable @inline(__always) + public static func mul( + _ x: Tensor, + _ y: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + return _RawXLA.mul(x, y) + case .TF_EAGER: + return _RawTFEager.mul(x, y) + } + + } + + /// Returns x * y element-wise. Returns zero if y is zero, even if x if infinite or NaN. + /// + /// *NOTE*: `MulNoNan` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + @inlinable @inline(__always) + public static func mulNoNan( + _ x: Tensor, + _ y: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + let output_device = y.device + let x = Tensor(copying: x, to: .defaultTFEager) + let y = Tensor(copying: y, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.mulNoNan(x, y), to: output_device) + case .TF_EAGER: + return _RawTFEager.mulNoNan(x, y) + } + + } + + /// Creates a MultiDeviceIterator resource. + /// + /// - Attrs: + /// - devices: A list of devices the iterator works across. + /// - shared_name: If non-empty, this resource will be shared under the given name + /// across multiple sessions. + /// - container: If non-empty, this resource is placed in the given container. + /// Otherwise, a default container is used. + /// - output_types: The type list for the return values. + /// - output_shapes: The list of shapes being produced. + /// + /// - Output handle: Handle to the resource created. + @inlinable @inline(__always) + public static func multiDeviceIterator( + devices: [String], + sharedName: String, + container: String, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> ResourceHandle { + _RawTFEager.multiDeviceIterator( + devices: devices, sharedName: sharedName, container: container, outputTypes: outputTypes, + outputShapes: outputShapes) + } + + /// Generates a MultiDeviceIterator resource from its provided string handle. + /// + /// - Parameter string_handle: String representing the resource. + /// + /// - Attrs: + /// - output_types: The type list for the return values. + /// - output_shapes: The list of shapes being produced. + /// + /// - Output multi_device_iterator: A MultiDeviceIterator resource. + @inlinable @inline(__always) + public static func multiDeviceIteratorFromStringHandle( + stringHandle: StringTensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> ResourceHandle { + _RawTFEager.multiDeviceIteratorFromStringHandle( + stringHandle: stringHandle, outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Gets next element for the provided shard number. + /// + /// - Parameters: + /// - multi_device_iterator: A MultiDeviceIterator resource. + /// - shard_num: Integer representing which shard to fetch data for. + /// - incarnation_id: Which incarnation of the MultiDeviceIterator is running. + /// + /// - Attrs: + /// - output_types: The type list for the return values. + /// - output_shapes: The list of shapes being produced. + /// + /// - Output components: Result of the get_next on the dataset. + @inlinable @inline(__always) + public static func multiDeviceIteratorGetNextFromShard( + multiDeviceIterator: ResourceHandle, + shardNum: Tensor, + incarnationId: Tensor, + outputShapes: [TensorShape?] + ) -> OutputTypes { + _RawTFEager.multiDeviceIteratorGetNextFromShard( + multiDeviceIterator: multiDeviceIterator, shardNum: shardNum, incarnationId: incarnationId, + outputShapes: outputShapes) + } + + /// Initializes the multi device iterator with the given dataset. + /// + /// - Parameters: + /// - dataset: Dataset to be iterated upon. + /// - multi_device_iterator: A MultiDeviceIteratorResource. + /// - max_buffer_size: The maximum size of the host side per device buffer to keep. + /// + /// - Output incarnation_id: An int64 indicating which incarnation of the MultiDeviceIterator + /// is running. + @inlinable @inline(__always) + public static func multiDeviceIteratorInit( + dataset: VariantHandle, + multiDeviceIterator: ResourceHandle, + maxBufferSize: Tensor + ) -> Tensor { + switch maxBufferSize.handle.backend { + case .XLA: + let output_device = maxBufferSize.device + let maxBufferSize = Tensor(copying: maxBufferSize, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.multiDeviceIteratorInit( + dataset: dataset, multiDeviceIterator: multiDeviceIterator, maxBufferSize: maxBufferSize + ), to: output_device) + case .TF_EAGER: + return _RawTFEager.multiDeviceIteratorInit( + dataset: dataset, multiDeviceIterator: multiDeviceIterator, maxBufferSize: maxBufferSize) + } + + } + + /// Produces a string handle for the given MultiDeviceIterator. + /// + /// - Parameter multi_device_iterator: A MultiDeviceIterator resource. + /// + /// - Output string_handle: A string representing the resource. + @inlinable @inline(__always) + public static func multiDeviceIteratorToStringHandle( + multiDeviceIterator: ResourceHandle + ) -> StringTensor { + _RawTFEager.multiDeviceIteratorToStringHandle(multiDeviceIterator: multiDeviceIterator) + } + + /// Draws samples from a multinomial distribution. + /// + /// - Parameters: + /// - logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` + /// represents the unnormalized log probabilities for all classes. + /// - num_samples: 0-D. Number of independent samples to draw for each row slice. + /// + /// - Attrs: + /// - seed: If either seed or seed2 is set to be non-zero, the internal random number + /// generator is seeded by the given seed. Otherwise, a random seed is used. + /// - seed2: A second seed to avoid seed collision. + /// + /// - Output output: 2-D Tensor with shape `[batch_size, num_samples]`. Each slice `[i, :]` + /// contains the drawn class labels with range `[0, num_classes)`. + @inlinable @inline(__always) + public static func multinomial< + T: TensorFlowNumeric, + OutputDtype: TensorFlowIndex + >( + logits: Tensor, + numSamples: Tensor, + seed: Int64 = 0, + seed2: Int64 = 0 + ) -> Tensor { + switch commonBackend(logits.handle.backend, numSamples.handle.backend) { + case .XLA: + let output_device = numSamples.device + let logits = Tensor(copying: logits, to: .defaultTFEager) + let numSamples = Tensor(copying: numSamples, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.multinomial( + logits: logits, numSamples: numSamples, seed: seed, seed2: seed2), to: output_device) + case .TF_EAGER: + return _RawTFEager.multinomial( + logits: logits, numSamples: numSamples, seed: seed, seed2: seed2) + } + + } + + /// Creates an empty hash table that uses tensors as the backing store. + /// + /// It uses "open addressing" with quadratic reprobing to resolve + /// collisions. + /// + /// This op creates a mutable hash table, specifying the type of its keys and + /// values. Each value must be a scalar. Data can be inserted into the table using + /// the insert operations. It does not support the initialization operation. + /// + /// - Parameter empty_key: The key used to represent empty key buckets internally. Must not + /// be used in insert or lookup operations. + /// + /// - Attrs: + /// - container: If non-empty, this table is placed in the given container. + /// Otherwise, a default container is used. + /// - shared_name: If non-empty, this table is shared under the given name across + /// multiple sessions. + /// - key_dtype: Type of the table keys. + /// - value_dtype: Type of the table values. + /// - value_shape: The shape of each value. + /// - initial_num_buckets: The initial number of hash table buckets. Must be a power + /// to 2. + /// - max_load_factor: The maximum ratio between number of entries and number of + /// buckets before growing the table. Must be between 0 and 1. + /// + /// - Output table_handle: Handle to a table. + @inlinable @inline(__always) + public static func mutableDenseHashTableV2( + emptyKey: Tensor, + deletedKey: Tensor, + container: String, + sharedName: String, + useNodeNameSharing: Bool = false, + valueDtype: TensorDataType, + valueShape: TensorShape?, + initialNumBuckets: Int64 = 131072, + maxLoadFactor: Double = 0.8 + ) -> ResourceHandle { + _RawTFEager.mutableDenseHashTableV2( + emptyKey: emptyKey, deletedKey: deletedKey, container: container, sharedName: sharedName, + useNodeNameSharing: useNodeNameSharing, valueDtype: valueDtype, valueShape: valueShape, + initialNumBuckets: initialNumBuckets, maxLoadFactor: maxLoadFactor) + } + + /// Creates an empty hash table. + /// + /// This op creates a mutable hash table, specifying the type of its keys and + /// values. Each value must be a vector. Data can be inserted into the table using + /// the insert operations. It does not support the initialization operation. + /// + /// - Attrs: + /// - container: If non-empty, this table is placed in the given container. + /// Otherwise, a default container is used. + /// - shared_name: If non-empty, this table is shared under the given name across + /// multiple sessions. + /// - key_dtype: Type of the table keys. + /// - value_dtype: Type of the table values. + /// + /// - Output table_handle: Handle to a table. + @inlinable @inline(__always) + public static func mutableHashTableOfTensorsV2( + container: String, + sharedName: String, + useNodeNameSharing: Bool = false, + keyDtype: TensorDataType, + valueDtype: TensorDataType, + valueShape: TensorShape? + ) -> ResourceHandle { + _RawTFEager.mutableHashTableOfTensorsV2( + container: container, sharedName: sharedName, useNodeNameSharing: useNodeNameSharing, + keyDtype: keyDtype, valueDtype: valueDtype, valueShape: valueShape) + } + + /// Creates an empty hash table. + /// + /// This op creates a mutable hash table, specifying the type of its keys and + /// values. Each value must be a scalar. Data can be inserted into the table using + /// the insert operations. It does not support the initialization operation. + /// + /// - Attrs: + /// - container: If non-empty, this table is placed in the given container. + /// Otherwise, a default container is used. + /// - shared_name: If non-empty, this table is shared under the given name across + /// multiple sessions. + /// - use_node_name_sharing: If true and shared_name is empty, the table is shared + /// using the node name. + /// - key_dtype: Type of the table keys. + /// - value_dtype: Type of the table values. + /// + /// - Output table_handle: Handle to a table. + @inlinable @inline(__always) + public static func mutableHashTableV2( + container: String, + sharedName: String, + useNodeNameSharing: Bool = false, + keyDtype: TensorDataType, + valueDtype: TensorDataType + ) -> ResourceHandle { + _RawTFEager.mutableHashTableV2( + container: container, sharedName: sharedName, useNodeNameSharing: useNodeNameSharing, + keyDtype: keyDtype, valueDtype: valueDtype) + } + + /// Locks a mutex resource. The output is the lock. So long as the lock tensor + /// + /// is alive, any other request to use `MutexLock` with this mutex will wait. + /// + /// This is particularly useful for creating a critical section when used in + /// conjunction with `MutexLockIdentity`: + /// + /// ```python + /// + /// mutex = mutex_v2( + /// shared_name=handle_name, container=container, name=name) + /// + /// def execute_in_critical_section(fn, *args, **kwargs): + /// lock = gen_resource_variable_ops.mutex_lock(mutex) + /// + /// with ops.control_dependencies([lock]): + /// r = fn(*args, **kwargs) + /// + /// with ops.control_dependencies(nest.flatten(r)): + /// with ops.colocate_with(mutex): + /// ensure_lock_exists = mutex_lock_identity(lock) + /// + /// # Make sure that if any element of r is accessed, all of + /// # them are executed together. + /// r = nest.map_structure(tf.identity, r) + /// + /// with ops.control_dependencies([ensure_lock_exists]): + /// return nest.map_structure(tf.identity, r) + /// ``` + /// + /// While `fn` is running in the critical section, no other functions which wish to + /// use this critical section may run. + /// + /// Often the use case is that two executions of the same graph, in parallel, + /// wish to run `fn`; and we wish to ensure that only one of them executes + /// at a time. This is especially important if `fn` modifies one or more + /// variables at a time. + /// + /// It is also useful if two separate functions must share a resource, but we + /// wish to ensure the usage is exclusive. + /// + /// - Parameter mutex: The mutex resource to lock. + /// + /// - Output mutex_lock: A tensor that keeps a shared pointer to a lock on the mutex; + /// when the Tensor is destroyed, the use count on the shared pointer is decreased + /// by 1. When it reaches 0, the lock is released. + @inlinable @inline(__always) + public static func mutexLock( + mutex: ResourceHandle + ) -> VariantHandle { + _RawTFEager.mutexLock(mutex: mutex) + } + + /// Creates a Mutex resource that can be locked by `MutexLock`. + /// + /// - Attrs: + /// - container: If non-empty, this variable is placed in the given container. + /// Otherwise, a default container is used. + /// - shared_name: If non-empty, this variable is named in the given bucket + /// with this shared_name. Otherwise, the node name is used instead. + /// + /// - Output resource: The mutex resource. + @inlinable @inline(__always) + public static func mutexV2( + container: String, + sharedName: String + ) -> ResourceHandle { + _RawTFEager.mutexV2(container: container, sharedName: sharedName) + } + + @inlinable @inline(__always) + public static func nInPolymorphicTwice( + _ a: [Tensor], + _ b: [Tensor] + ) { + _RawTFEager.nInPolymorphicTwice(a, b) + } + + @inlinable @inline(__always) + public static func nInTwice( + _ a: [Tensor], + _ b: [StringTensor] + ) { + _RawTFEager.nInTwice(a, b) + } + + @inlinable @inline(__always) + public static func nInTwoTypeVariables< + S: TensorFlowScalar, + T: TensorFlowScalar + >( + _ a: [Tensor], + _ b: [Tensor] + ) { + _RawTFEager.nInTwoTypeVariables(a, b) + } + + @inlinable @inline(__always) + public static func nIntsIn( + _ a: [Tensor] + ) { + _RawTFEager.nIntsIn(a) + } + + @inlinable @inline(__always) + public static func nIntsOut( + n: Int64 + ) -> [Tensor] { + _RawTFEager.nIntsOut(n: n) + } + + @inlinable @inline(__always) + public static func nIntsOutDefault( + n: Int64 = 3 + ) -> [Tensor] { + _RawTFEager.nIntsOutDefault(n: n) + } + + @inlinable @inline(__always) + public static func nPolymorphicIn( + _ a: [Tensor] + ) { + _RawTFEager.nPolymorphicIn(a) + } + + @inlinable @inline(__always) + public static func nPolymorphicOut( + n: Int64 + ) -> [Tensor] { + _RawTFEager.nPolymorphicOut(n: n) + } + + @inlinable @inline(__always) + public static func nPolymorphicOutDefault( + n: Int64 = 2 + ) -> [Tensor] { + _RawTFEager.nPolymorphicOutDefault(n: n) + } + + @inlinable @inline(__always) + public static func nPolymorphicRestrictIn( + _ a: [Tensor] + ) { + _RawTFEager.nPolymorphicRestrictIn(a) + } + + @inlinable @inline(__always) + public static func nPolymorphicRestrictIn( + _ a: [StringTensor] + ) { + _RawTFEager.nPolymorphicRestrictIn(a) + } + + @inlinable @inline(__always) + public static func nPolymorphicRestrictOut( + n: Int64 + ) -> [Tensor] { + _RawTFEager.nPolymorphicRestrictOut(n: n) + } + + @inlinable @inline(__always) + public static func nPolymorphicRestrictOut( + n: Int64 + ) -> [StringTensor] { + _RawTFEager.nPolymorphicRestrictOut(n: n) + } + + @inlinable @inline(__always) + public static func namespaceTestStringOutput( + _ input: Tensor + ) -> (output1: Tensor, output2: StringTensor) { + _RawTFEager.namespaceTestStringOutput(input) + } + + /// Outputs a tensor containing the reduction across all input tensors. + /// + /// Outputs a tensor containing the reduction across all input tensors passed to ops + /// within the same `shared_name. + /// + /// The graph should be constructed so if one op runs with shared_name value `c`, + /// then `num_devices` ops will run with shared_name value `c`. Failure to do so + /// will cause the graph execution to fail to complete. + /// + /// input: the input to the reduction + /// data: the value of the reduction across all `num_devices` devices. + /// reduction: the reduction operation to perform. + /// num_devices: The number of devices participating in this reduction. + /// shared_name: Identifier that shared between ops of the same reduction. + @inlinable @inline(__always) + public static func ncclAllReduce( + _ input: Tensor, + reduction: Reduction, + numDevices: Int64, + sharedName: String + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.ncclAllReduce( + input, reduction: reduction, numDevices: numDevices, sharedName: sharedName), + to: output_device) + case .TF_EAGER: + return _RawTFEager.ncclAllReduce( + input, reduction: reduction, numDevices: numDevices, sharedName: sharedName) + } + + } + + /// Sends `input` to all devices that are connected to the output. + /// + /// Sends `input` to all devices that are connected to the output. + /// + /// The graph should be constructed so that all ops connected to the output have a + /// valid device assignment, and the op itself is assigned one of these devices. + /// + /// input: The input to the broadcast. + /// output: The same as input. + /// shape: The shape of the input tensor. + /// + @inlinable @inline(__always) + public static func ncclBroadcast( + _ input: Tensor, + shape: TensorShape? + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.ncclBroadcast(input, shape: shape), to: output_device) + case .TF_EAGER: + return _RawTFEager.ncclBroadcast(input, shape: shape) + } + + } + + /// Reduces `input` from `num_devices` using `reduction` to a single device. + /// + /// Reduces `input` from `num_devices` using `reduction` to a single device. + /// + /// The graph should be constructed so that all inputs have a valid device + /// assignment, and the op itself is assigned one of these devices. + /// + /// input: The input to the reduction. + /// data: the value of the reduction across all `num_devices` devices. + /// reduction: the reduction operation to perform. + @inlinable @inline(__always) + public static func ncclReduce( + _ input: [Tensor], + reduction: Reduction + ) -> Tensor { + _RawTFEager.ncclReduce(input, reduction: reduction) + } + + @inlinable @inline(__always) + public static func ndtri( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + let output_device = x.device + let x = Tensor(copying: x, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.ndtri(x), to: output_device) + case .TF_EAGER: + return _RawTFEager.ndtri(x) + } + + } + + /// Selects the k nearest centers for each point. + /// + /// Rows of points are assumed to be input points. Rows of centers are assumed to be + /// the list of candidate centers. For each point, the k centers that have least L2 + /// distance to it are computed. + /// + /// - Parameters: + /// - points: Matrix of shape (n, d). Rows are assumed to be input points. + /// - centers: Matrix of shape (m, d). Rows are assumed to be centers. + /// - k: Number of nearest centers to return for each point. If k is larger than m, then + /// only m centers are returned. + /// + /// - Outputs: + /// - nearest_center_indices: Matrix of shape (n, min(m, k)). Each row contains the indices of the centers + /// closest to the corresponding point, ordered by increasing distance. + /// - nearest_center_distances: Matrix of shape (n, min(m, k)). Each row contains the squared L2 distance to the + /// corresponding center in nearest_center_indices. + @inlinable @inline(__always) + public static func nearestNeighbors( + points: Tensor, + centers: Tensor, + k: Tensor + ) -> (nearestCenterIndices: Tensor, nearestCenterDistances: Tensor) { + _RawTFEager.nearestNeighbors(points: points, centers: centers, k: k) + } + + /// Computes numerical negative value element-wise. + /// + /// I.e., \\(y = -x\\). + @inlinable @inline(__always) + public static func neg( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.neg(x) + case .TF_EAGER: + return _RawTFEager.neg(x) + } + + } + + /// Returns the next representable value of `x1` in the direction of `x2`, element-wise. + /// + /// This operation returns the same result as the C++ std::nextafter function. + /// + /// It can also return a subnormal number. + /// + /// @compatibility(cpp) + /// Equivalent to C++ std::nextafter function. + /// @end_compatibility + @inlinable @inline(__always) + public static func nextAfter( + x1: Tensor, + x2: Tensor + ) -> Tensor { + switch commonBackend(x1.handle.backend, x2.handle.backend) { + case .XLA: + let output_device = x2.device + let x1 = Tensor(copying: x1, to: .defaultTFEager) + let x2 = Tensor(copying: x2, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.nextAfter(x1: x1, x2: x2), to: output_device) + case .TF_EAGER: + return _RawTFEager.nextAfter(x1: x1, x2: x2) + } + + } + + /// Makes its input available to the next iteration. + /// + /// - Parameter data: The tensor to be made available to the next iteration. + /// + /// - Output output: The same tensor as `data`. + @inlinable @inline(__always) + public static func nextIteration( + data: Tensor + ) -> Tensor { + switch data.handle.backend { + case .XLA: + let output_device = data.device + let data = Tensor(copying: data, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.nextIteration(data: data), to: output_device) + case .TF_EAGER: + return _RawTFEager.nextIteration(data: data) + } + + } + + /// Does nothing. Only useful as a placeholder for control edges. + @inlinable @inline(__always) + public static func noOp() { + _RawTFEager.noOp() + } + + /// Non-deterministically generates some integers. + /// + /// This op may use some OS-provided source of non-determinism (e.g. an RNG), so each execution will give different results. + /// + /// - Parameter shape: The shape of the output tensor. + /// + /// - Attr dtype: The type of the output. + /// + /// - Output output: Non-deterministic integer values with specified shape. + @inlinable @inline(__always) + public static func nonDeterministicInts< + Dtype: TensorFlowScalar, + ShapeDtype: TensorFlowScalar + >( + shape: Tensor + ) -> Tensor { + switch shape.handle.backend { + case .XLA: + let output_device = shape.device + let shape = Tensor(copying: shape, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.nonDeterministicInts(shape: shape), to: output_device) + case .TF_EAGER: + return _RawTFEager.nonDeterministicInts(shape: shape) + } + + } + + /// Greedily selects a subset of bounding boxes in descending order of score, + /// + /// pruning away boxes that have high intersection-over-union (IOU) overlap + /// with previously selected boxes. Bounding boxes are supplied as + /// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any + /// diagonal pair of box corners and the coordinates can be provided as normalized + /// (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm + /// is agnostic to where the origin is in the coordinate system. Note that this + /// algorithm is invariant to orthogonal transformations and translations + /// of the coordinate system; thus translating or reflections of the coordinate + /// system result in the same boxes being selected by the algorithm. + /// The output of this operation is a set of integers indexing into the input + /// collection of bounding boxes representing the selected boxes. The bounding + /// box coordinates corresponding to the selected indices can then be obtained + /// using the `tf.gather operation`. For example: + /// selected_indices = tf.image.non_max_suppression( + /// boxes, scores, max_output_size, iou_threshold) + /// selected_boxes = tf.gather(boxes, selected_indices) + /// + /// - Parameters: + /// - boxes: A 2-D float tensor of shape `[num_boxes, 4]`. + /// - scores: A 1-D float tensor of shape `[num_boxes]` representing a single + /// score corresponding to each box (each row of boxes). + /// - max_output_size: A scalar integer tensor representing the maximum number of + /// boxes to be selected by non max suppression. + /// + /// - Attr iou_threshold: A float representing the threshold for deciding whether boxes + /// overlap too much with respect to IOU. + /// + /// - Output selected_indices: A 1-D integer tensor of shape `[M]` representing the selected + /// indices from the boxes tensor, where `M <= max_output_size`. + @inlinable @inline(__always) + public static func nonMaxSuppression( + boxes: Tensor, + scores: Tensor, + maxOutputSize: Tensor, + iouThreshold: Double = 0.5 + ) -> Tensor { + switch commonBackend( + commonBackend(boxes.handle.backend, scores.handle.backend), maxOutputSize.handle.backend) + { + case .XLA: + let output_device = maxOutputSize.device + let boxes = Tensor(copying: boxes, to: .defaultTFEager) + let scores = Tensor(copying: scores, to: .defaultTFEager) + let maxOutputSize = Tensor(copying: maxOutputSize, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.nonMaxSuppression( + boxes: boxes, scores: scores, maxOutputSize: maxOutputSize, iouThreshold: iouThreshold), + to: output_device) + case .TF_EAGER: + return _RawTFEager.nonMaxSuppression( + boxes: boxes, scores: scores, maxOutputSize: maxOutputSize, iouThreshold: iouThreshold) + } + + } + + /// Greedily selects a subset of bounding boxes in descending order of score, + /// + /// pruning away boxes that have high intersection-over-union (IOU) overlap + /// with previously selected boxes. Bounding boxes are supplied as + /// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any + /// diagonal pair of box corners and the coordinates can be provided as normalized + /// (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm + /// is agnostic to where the origin is in the coordinate system. Note that this + /// algorithm is invariant to orthogonal transformations and translations + /// of the coordinate system; thus translating or reflections of the coordinate + /// system result in the same boxes being selected by the algorithm. + /// + /// The output of this operation is a set of integers indexing into the input + /// collection of bounding boxes representing the selected boxes. The bounding + /// box coordinates corresponding to the selected indices can then be obtained + /// using the `tf.gather operation`. For example: + /// + /// selected_indices = tf.image.non_max_suppression_v2( + /// boxes, scores, max_output_size, iou_threshold) + /// selected_boxes = tf.gather(boxes, selected_indices) + /// + /// - Parameters: + /// - boxes: A 2-D float tensor of shape `[num_boxes, 4]`. + /// - scores: A 1-D float tensor of shape `[num_boxes]` representing a single + /// score corresponding to each box (each row of boxes). + /// - max_output_size: A scalar integer tensor representing the maximum number of + /// boxes to be selected by non max suppression. + /// - iou_threshold: A 0-D float tensor representing the threshold for deciding whether + /// boxes overlap too much with respect to IOU. + /// + /// - Output selected_indices: A 1-D integer tensor of shape `[M]` representing the selected + /// indices from the boxes tensor, where `M <= max_output_size`. + @inlinable @inline(__always) + public static func nonMaxSuppressionV2< + T: FloatingPoint & TensorFlowScalar, + TThreshold: FloatingPoint & TensorFlowScalar + >( + boxes: Tensor, + scores: Tensor, + maxOutputSize: Tensor, + iouThreshold: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(boxes.handle.backend, scores.handle.backend), maxOutputSize.handle.backend), + iouThreshold.handle.backend) + { + case .XLA: + let output_device = iouThreshold.device + let boxes = Tensor(copying: boxes, to: .defaultTFEager) + let scores = Tensor(copying: scores, to: .defaultTFEager) + let maxOutputSize = Tensor(copying: maxOutputSize, to: .defaultTFEager) + let iouThreshold = Tensor(copying: iouThreshold, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.nonMaxSuppressionV2( + boxes: boxes, scores: scores, maxOutputSize: maxOutputSize, iouThreshold: iouThreshold), + to: output_device) + case .TF_EAGER: + return _RawTFEager.nonMaxSuppressionV2( + boxes: boxes, scores: scores, maxOutputSize: maxOutputSize, iouThreshold: iouThreshold) + } + + } + + /// Greedily selects a subset of bounding boxes in descending order of score, + /// + /// pruning away boxes that have high intersection-over-union (IOU) overlap + /// with previously selected boxes. Bounding boxes with score less than + /// `score_threshold` are removed. Bounding boxes are supplied as + /// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any + /// diagonal pair of box corners and the coordinates can be provided as normalized + /// (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm + /// is agnostic to where the origin is in the coordinate system and more + /// generally is invariant to orthogonal transformations and translations + /// of the coordinate system; thus translating or reflections of the coordinate + /// system result in the same boxes being selected by the algorithm. + /// The output of this operation is a set of integers indexing into the input + /// collection of bounding boxes representing the selected boxes. The bounding + /// box coordinates corresponding to the selected indices can then be obtained + /// using the `tf.gather operation`. For example: + /// selected_indices = tf.image.non_max_suppression_v2( + /// boxes, scores, max_output_size, iou_threshold, score_threshold) + /// selected_boxes = tf.gather(boxes, selected_indices) + /// + /// - Parameters: + /// - boxes: A 2-D float tensor of shape `[num_boxes, 4]`. + /// - scores: A 1-D float tensor of shape `[num_boxes]` representing a single + /// score corresponding to each box (each row of boxes). + /// - max_output_size: A scalar integer tensor representing the maximum number of + /// boxes to be selected by non max suppression. + /// - iou_threshold: A 0-D float tensor representing the threshold for deciding whether + /// boxes overlap too much with respect to IOU. + /// - score_threshold: A 0-D float tensor representing the threshold for deciding when to remove + /// boxes based on score. + /// + /// - Output selected_indices: A 1-D integer tensor of shape `[M]` representing the selected + /// indices from the boxes tensor, where `M <= max_output_size`. + @inlinable @inline(__always) + public static func nonMaxSuppressionV3< + T: FloatingPoint & TensorFlowScalar, + TThreshold: FloatingPoint & TensorFlowScalar + >( + boxes: Tensor, + scores: Tensor, + maxOutputSize: Tensor, + iouThreshold: Tensor, + scoreThreshold: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend( + commonBackend(boxes.handle.backend, scores.handle.backend), maxOutputSize.handle.backend + ), iouThreshold.handle.backend), scoreThreshold.handle.backend) + { + case .XLA: + let output_device = scoreThreshold.device + let boxes = Tensor(copying: boxes, to: .defaultTFEager) + let scores = Tensor(copying: scores, to: .defaultTFEager) + let maxOutputSize = Tensor(copying: maxOutputSize, to: .defaultTFEager) + let iouThreshold = Tensor(copying: iouThreshold, to: .defaultTFEager) + let scoreThreshold = Tensor(copying: scoreThreshold, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.nonMaxSuppressionV3( + boxes: boxes, scores: scores, maxOutputSize: maxOutputSize, iouThreshold: iouThreshold, + scoreThreshold: scoreThreshold), to: output_device) + case .TF_EAGER: + return _RawTFEager.nonMaxSuppressionV3( + boxes: boxes, scores: scores, maxOutputSize: maxOutputSize, iouThreshold: iouThreshold, + scoreThreshold: scoreThreshold) + } + + } + + /// Greedily selects a subset of bounding boxes in descending order of score, + /// + /// pruning away boxes that have high intersection-over-union (IOU) overlap + /// with previously selected boxes. Bounding boxes with score less than + /// `score_threshold` are removed. Bounding boxes are supplied as + /// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any + /// diagonal pair of box corners and the coordinates can be provided as normalized + /// (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm + /// is agnostic to where the origin is in the coordinate system and more + /// generally is invariant to orthogonal transformations and translations + /// of the coordinate system; thus translating or reflections of the coordinate + /// system result in the same boxes being selected by the algorithm. + /// The output of this operation is a set of integers indexing into the input + /// collection of bounding boxes representing the selected boxes. The bounding + /// box coordinates corresponding to the selected indices can then be obtained + /// using the `tf.gather operation`. For example: + /// selected_indices = tf.image.non_max_suppression_v2( + /// boxes, scores, max_output_size, iou_threshold, score_threshold) + /// selected_boxes = tf.gather(boxes, selected_indices) + /// + /// - Parameters: + /// - boxes: A 2-D float tensor of shape `[num_boxes, 4]`. + /// - scores: A 1-D float tensor of shape `[num_boxes]` representing a single + /// score corresponding to each box (each row of boxes). + /// - max_output_size: A scalar integer tensor representing the maximum number of + /// boxes to be selected by non max suppression. + /// - iou_threshold: A 0-D float tensor representing the threshold for deciding whether + /// boxes overlap too much with respect to IOU. + /// - score_threshold: A 0-D float tensor representing the threshold for deciding when to remove + /// boxes based on score. + /// + /// - Attr pad_to_max_output_size: If true, the output `selected_indices` is padded to be of length + /// `max_output_size`. Defaults to false. + /// + /// - Outputs: + /// - selected_indices: A 1-D integer tensor of shape `[M]` representing the selected + /// indices from the boxes tensor, where `M <= max_output_size`. + /// - valid_outputs: A 0-D integer tensor representing the number of valid elements in + /// `selected_indices`, with the valid elements appearing first. + @inlinable @inline(__always) + public static func nonMaxSuppressionV4< + T: FloatingPoint & TensorFlowScalar, + TThreshold: FloatingPoint & TensorFlowScalar + >( + boxes: Tensor, + scores: Tensor, + maxOutputSize: Tensor, + iouThreshold: Tensor, + scoreThreshold: Tensor, + padToMaxOutputSize: Bool = false + ) -> (selectedIndices: Tensor, validOutputs: Tensor) { + _RawTFEager.nonMaxSuppressionV4( + boxes: boxes, scores: scores, maxOutputSize: maxOutputSize, iouThreshold: iouThreshold, + scoreThreshold: scoreThreshold, padToMaxOutputSize: padToMaxOutputSize) + } + + /// Greedily selects a subset of bounding boxes in descending order of score, + /// + /// pruning away boxes that have high intersection-over-union (IOU) overlap + /// with previously selected boxes. Bounding boxes with score less than + /// `score_threshold` are removed. Bounding boxes are supplied as + /// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any + /// diagonal pair of box corners and the coordinates can be provided as normalized + /// (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm + /// is agnostic to where the origin is in the coordinate system and more + /// generally is invariant to orthogonal transformations and translations + /// of the coordinate system; thus translating or reflections of the coordinate + /// system result in the same boxes being selected by the algorithm. + /// The output of this operation is a set of integers indexing into the input + /// collection of bounding boxes representing the selected boxes. The bounding + /// box coordinates corresponding to the selected indices can then be obtained + /// using the `tf.gather operation`. For example: + /// selected_indices = tf.image.non_max_suppression_v2( + /// boxes, scores, max_output_size, iou_threshold, score_threshold) + /// selected_boxes = tf.gather(boxes, selected_indices) + /// This op also supports a Soft-NMS (with Gaussian weighting) mode (c.f. + /// Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score + /// of other overlapping boxes instead of directly causing them to be pruned. + /// To enable this Soft-NMS mode, set the `soft_nms_sigma` parameter to be + /// larger than 0. + /// + /// - Parameters: + /// - boxes: A 2-D float tensor of shape `[num_boxes, 4]`. + /// - scores: A 1-D float tensor of shape `[num_boxes]` representing a single + /// score corresponding to each box (each row of boxes). + /// - max_output_size: A scalar integer tensor representing the maximum number of + /// boxes to be selected by non max suppression. + /// - iou_threshold: A 0-D float tensor representing the threshold for deciding whether + /// boxes overlap too much with respect to IOU. + /// - score_threshold: A 0-D float tensor representing the threshold for deciding when to remove + /// boxes based on score. + /// - soft_nms_sigma: A 0-D float tensor representing the sigma parameter for Soft NMS; see Bodla et + /// al (c.f. https://arxiv.org/abs/1704.04503). When `soft_nms_sigma=0.0` (which + /// is default), we fall back to standard (hard) NMS. + /// + /// - Attr pad_to_max_output_size: If true, the output `selected_indices` is padded to be of length + /// `max_output_size`. Defaults to false. + /// + /// - Outputs: + /// - selected_indices: A 1-D integer tensor of shape `[M]` representing the selected + /// indices from the boxes tensor, where `M <= max_output_size`. + /// - selected_scores: A 1-D float tensor of shape `[M]` representing the corresponding + /// scores for each selected box, where `M <= max_output_size`. Scores only differ + /// from corresponding input scores when using Soft NMS (i.e. when + /// `soft_nms_sigma>0`) + /// - valid_outputs: A 0-D integer tensor representing the number of valid elements in + /// `selected_indices`, with the valid elements appearing first. + @inlinable @inline(__always) + public static func nonMaxSuppressionV5( + boxes: Tensor, + scores: Tensor, + maxOutputSize: Tensor, + iouThreshold: Tensor, + scoreThreshold: Tensor, + softNmsSigma: Tensor, + padToMaxOutputSize: Bool = false + ) -> (selectedIndices: Tensor, selectedScores: Tensor, validOutputs: Tensor) { + _RawTFEager.nonMaxSuppressionV5( + boxes: boxes, scores: scores, maxOutputSize: maxOutputSize, iouThreshold: iouThreshold, + scoreThreshold: scoreThreshold, softNmsSigma: softNmsSigma, + padToMaxOutputSize: padToMaxOutputSize) + } + + /// Greedily selects a subset of bounding boxes in descending order of score, + /// + /// pruning away boxes that have high overlaps + /// with previously selected boxes. Bounding boxes with score less than + /// `score_threshold` are removed. N-by-n overlap values are supplied as square matrix, + /// which allows for defining a custom overlap criterium (eg. intersection over union, + /// intersection over area, etc.). + /// + /// The output of this operation is a set of integers indexing into the input + /// collection of bounding boxes representing the selected boxes. The bounding + /// box coordinates corresponding to the selected indices can then be obtained + /// using the `tf.gather operation`. For example: + /// + /// selected_indices = tf.image.non_max_suppression_with_overlaps( + /// overlaps, scores, max_output_size, overlap_threshold, score_threshold) + /// selected_boxes = tf.gather(boxes, selected_indices) + /// + /// - Parameters: + /// - overlaps: A 2-D float tensor of shape `[num_boxes, num_boxes]` representing + /// the n-by-n box overlap values. + /// - scores: A 1-D float tensor of shape `[num_boxes]` representing a single + /// score corresponding to each box (each row of boxes). + /// - max_output_size: A scalar integer tensor representing the maximum number of + /// boxes to be selected by non max suppression. + /// - overlap_threshold: A 0-D float tensor representing the threshold for deciding whether + /// boxes overlap too. + /// - score_threshold: A 0-D float tensor representing the threshold for deciding when to remove + /// boxes based on score. + /// + /// - Output selected_indices: A 1-D integer tensor of shape `[M]` representing the selected + /// indices from the boxes tensor, where `M <= max_output_size`. + @inlinable @inline(__always) + public static func nonMaxSuppressionWithOverlaps( + overlaps: Tensor, + scores: Tensor, + maxOutputSize: Tensor, + overlapThreshold: Tensor, + scoreThreshold: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend( + commonBackend(overlaps.handle.backend, scores.handle.backend), + maxOutputSize.handle.backend), overlapThreshold.handle.backend), + scoreThreshold.handle.backend) + { + case .XLA: + let output_device = scoreThreshold.device + let overlaps = Tensor(copying: overlaps, to: .defaultTFEager) + let scores = Tensor(copying: scores, to: .defaultTFEager) + let maxOutputSize = Tensor(copying: maxOutputSize, to: .defaultTFEager) + let overlapThreshold = Tensor(copying: overlapThreshold, to: .defaultTFEager) + let scoreThreshold = Tensor(copying: scoreThreshold, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.nonMaxSuppressionWithOverlaps( + overlaps: overlaps, scores: scores, maxOutputSize: maxOutputSize, + overlapThreshold: overlapThreshold, scoreThreshold: scoreThreshold), to: output_device) + case .TF_EAGER: + return _RawTFEager.nonMaxSuppressionWithOverlaps( + overlaps: overlaps, scores: scores, maxOutputSize: maxOutputSize, + overlapThreshold: overlapThreshold, scoreThreshold: scoreThreshold) + } + + } + + @inlinable @inline(__always) + public static func nonSerializableDataset( + inputDataset: VariantHandle, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.nonSerializableDataset( + inputDataset: inputDataset, outputTypes: outputTypes, outputShapes: outputShapes) + } + + @inlinable @inline(__always) + public static func none() { + _RawTFEager.none() + } + + /// Returns the truth value of (x != y) element-wise. + /// + /// *NOTE*: `NotEqual` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + @inlinable @inline(__always) + public static func notEqual( + _ x: Tensor, + _ y: Tensor, + incompatibleShapeError: Bool = true + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + return _RawXLA.notEqual(x, y, incompatibleShapeError: incompatibleShapeError) + case .TF_EAGER: + return _RawTFEager.notEqual(x, y, incompatibleShapeError: incompatibleShapeError) + } + + } + + /// Returns the truth value of (x != y) element-wise. + /// + /// *NOTE*: `NotEqual` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + @inlinable @inline(__always) + public static func notEqual( + _ x: StringTensor, + _ y: StringTensor, + incompatibleShapeError: Bool = true + ) -> Tensor { + _RawTFEager.notEqual(x, y, incompatibleShapeError: incompatibleShapeError) + } + + /// Finds values of the `n`-th order statistic for the last dimension. + /// + /// If the input is a vector (rank-1), finds the entries which is the nth-smallest + /// value in the vector and outputs their values as scalar tensor. + /// + /// For matrices (resp. higher rank input), computes the entries which is the + /// nth-smallest value in each row (resp. vector along the last dimension). Thus, + /// + /// values.shape = input.shape[:-1] + /// + /// - Parameters: + /// - input: 1-D or higher with last dimension at least `n+1`. + /// - n: 0-D. Position of sorted vector to select along the last dimension (along + /// each row for matrices). Valid range of n is `[0, input.shape[:-1])` + /// + /// - Attr reverse: When set to True, find the nth-largest value in the vector and vice + /// versa. + /// + /// - Output values: The `n`-th order statistic along each last dimensional slice. + @inlinable @inline(__always) + public static func nthElement( + _ input: Tensor, + n: Tensor, + reverse: Bool = false + ) -> Tensor { + switch commonBackend(input.handle.backend, n.handle.backend) { + case .XLA: + let output_device = n.device + let input = Tensor(copying: input, to: .defaultTFEager) + let n = Tensor(copying: n, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.nthElement(input, n: n, reverse: reverse), to: output_device) + case .TF_EAGER: + return _RawTFEager.nthElement(input, n: n, reverse: reverse) + } + + } + + @inlinable @inline(__always) + public static func old() { + _RawTFEager.old() + } + + /// Returns a one-hot tensor. + /// + /// The locations represented by indices in `indices` take value `on_value`, + /// while all other locations take value `off_value`. + /// + /// If the input `indices` is rank `N`, the output will have rank `N+1`, + /// The new axis is created at dimension `axis` (default: the new axis is + /// appended at the end). + /// + /// If `indices` is a scalar the output shape will be a vector of length `depth`. + /// + /// If `indices` is a vector of length `features`, the output shape will be: + /// ``` + /// features x depth if axis == -1 + /// depth x features if axis == 0 + /// ``` + /// + /// If `indices` is a matrix (batch) with shape `[batch, features]`, + /// the output shape will be: + /// ``` + /// batch x features x depth if axis == -1 + /// batch x depth x features if axis == 1 + /// depth x batch x features if axis == 0 + /// ``` + /// + /// + /// Examples + /// ========= + /// + /// Suppose that + /// ``` + /// indices = [0, 2, -1, 1] + /// depth = 3 + /// on_value = 5.0 + /// off_value = 0.0 + /// axis = -1 + /// ``` + /// + /// Then output is `[4 x 3]`: + /// ``` + /// output = + /// [5.0 0.0 0.0] // one_hot(0) + /// [0.0 0.0 5.0] // one_hot(2) + /// [0.0 0.0 0.0] // one_hot(-1) + /// [0.0 5.0 0.0] // one_hot(1) + /// ``` + /// + /// Suppose that + /// ``` + /// indices = [0, 2, -1, 1] + /// depth = 3 + /// on_value = 0.0 + /// off_value = 3.0 + /// axis = 0 + /// ``` + /// + /// Then output is `[3 x 4]`: + /// ``` + /// output = + /// [0.0 3.0 3.0 3.0] + /// [3.0 3.0 3.0 0.0] + /// [3.0 3.0 3.0 3.0] + /// [3.0 0.0 3.0 3.0] + /// // ^ one_hot(0) + /// // ^ one_hot(2) + /// // ^ one_hot(-1) + /// // ^ one_hot(1) + /// ``` + /// + /// Suppose that + /// ``` + /// indices = [[0, 2], [1, -1]] + /// depth = 3 + /// on_value = 1.0 + /// off_value = 0.0 + /// axis = -1 + /// ``` + /// + /// Then output is `[2 x 2 x 3]`: + /// ``` + /// output = + /// [ + /// [1.0, 0.0, 0.0] // one_hot(0) + /// [0.0, 0.0, 1.0] // one_hot(2) + /// ][ + /// [0.0, 1.0, 0.0] // one_hot(1) + /// [0.0, 0.0, 0.0] // one_hot(-1) + /// ] + /// ``` + /// + /// - Parameters: + /// - indices: A tensor of indices. + /// - depth: A scalar defining the depth of the one hot dimension. + /// - on_value: A scalar defining the value to fill in output when `indices[j] = i`. + /// - off_value: A scalar defining the value to fill in output when `indices[j] != i`. + /// + /// - Attr axis: The axis to fill (default: -1, a new inner-most axis). + /// + /// - Output output: The one-hot tensor. + @inlinable @inline(__always) + public static func oneHot< + T: TensorFlowScalar, + Ti: TensorFlowInteger + >( + indices: Tensor, + depth: Tensor, + onValue: Tensor, + offValue: Tensor, + axis: Int64 = -1 + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(indices.handle.backend, depth.handle.backend), onValue.handle.backend), + offValue.handle.backend) + { + case .XLA: + return _RawXLA.oneHot( + indices: indices, depth: depth, onValue: onValue, offValue: offValue, axis: axis) + case .TF_EAGER: + return _RawTFEager.oneHot( + indices: indices, depth: depth, onValue: onValue, offValue: offValue, axis: axis) + } + + } + + /// Makes a "one-shot" iterator that can be iterated only once. + /// + /// A one-shot iterator bundles the logic for defining the dataset and + /// the state of the iterator in a single op, which allows simple input + /// pipelines to be defined without an additional initialization + /// ("MakeIterator") step. + /// + /// One-shot iterators have the following limitations: + /// + /// * They do not support parameterization: all logic for creating the underlying + /// dataset must be bundled in the `dataset_factory` function. + /// * They are not resettable. Once a one-shot iterator reaches the end of its + /// underlying dataset, subsequent "IteratorGetNext" operations on that + /// iterator will always produce an `OutOfRange` error. + /// + /// For greater flexibility, use "Iterator" and "MakeIterator" to define + /// an iterator using an arbitrary subgraph, which may capture tensors + /// (including fed values) as parameters, and which may be reset multiple + /// times by rerunning "MakeIterator". + /// + /// - Attr dataset_factory: A function of type `() -> DT_VARIANT`, where the returned + /// DT_VARIANT is a dataset. + /// + /// - Output handle: A handle to the iterator that can be passed to an "IteratorGetNext" + /// op. + @inlinable @inline(__always) + public static func oneShotIterator< + DatasetfactoryIn: TensorGroup, + DatasetfactoryOut: TensorGroup + >( + datasetFactory: (DatasetfactoryIn) -> DatasetfactoryOut, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?], + container: String, + sharedName: String + ) -> ResourceHandle { + _RawTFEager.oneShotIterator( + datasetFactory: datasetFactory, outputTypes: outputTypes, outputShapes: outputShapes, + container: container, sharedName: sharedName) + } + + /// Returns a tensor of ones with the same shape and type as x. + /// + /// - Parameter x: a tensor of type T. + /// + /// - Output y: a tensor of the same shape and type as x but filled with ones. + @inlinable @inline(__always) + public static func onesLike( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.onesLike(x) + case .TF_EAGER: + return _RawTFEager.onesLike(x) + } + + } + + @inlinable @inline(__always) + public static func opWithDefaultAttr( + defaultFloat: Double = 123 + ) -> Tensor { + _RawTFEager.opWithDefaultAttr(defaultFloat: defaultFloat) + } + + @inlinable @inline(__always) + public static func opWithFutureDefaultAttr() { + _RawTFEager.opWithFutureDefaultAttr() + } + + /// Creates a dataset by applying optimizations to `input_dataset`. + /// + /// Creates a dataset by applying optimizations to `input_dataset`. + /// + /// - Parameters: + /// - input_dataset: A variant tensor representing the input dataset. + /// - optimizations: A `tf.string` vector `tf.Tensor` identifying optimizations to use. + @inlinable @inline(__always) + public static func optimizeDataset( + inputDataset: VariantHandle, + optimizations: StringTensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?], + optimizationConfigs: [String] + ) -> VariantHandle { + _RawTFEager.optimizeDataset( + inputDataset: inputDataset, optimizations: optimizations, outputTypes: outputTypes, + outputShapes: outputShapes, optimizationConfigs: optimizationConfigs) + } + + /// Constructs an Optional variant from a tuple of tensors. + @inlinable @inline(__always) + public static func optionalFromValue( + components: ToutputTypes + ) -> VariantHandle { + _RawTFEager.optionalFromValue(components: components) + } + + /// Returns the value stored in an Optional variant or raises an error if none exists. + @inlinable @inline(__always) + public static func optionalGetValue( + optional: VariantHandle, + outputShapes: [TensorShape?] + ) -> OutputTypes { + _RawTFEager.optionalGetValue(optional: optional, outputShapes: outputShapes) + } + + /// Returns true if and only if the given Optional variant has a value. + @inlinable @inline(__always) + public static func optionalHasValue( + optional: VariantHandle + ) -> Tensor { + _RawTFEager.optionalHasValue(optional: optional) + } + + /// Creates an Optional variant with no value. + @inlinable @inline(__always) + public static func optionalNone() -> VariantHandle { + _RawTFEager.optionalNone() + } + + /// Op removes all elements in the underlying container. + @inlinable @inline(__always) + public static func orderedMapClear( + capacity: Int64 = 0, + memoryLimit: Int64 = 0, + dtypes: [TensorDataType], + container: String, + sharedName: String + ) { + _RawTFEager.orderedMapClear( + capacity: capacity, memoryLimit: memoryLimit, dtypes: dtypes, container: container, + sharedName: sharedName) + } + + /// Op returns the number of incomplete elements in the underlying container. + @inlinable @inline(__always) + public static func orderedMapIncompleteSize( + capacity: Int64 = 0, + memoryLimit: Int64 = 0, + dtypes: [TensorDataType], + container: String, + sharedName: String + ) -> Tensor { + _RawTFEager.orderedMapIncompleteSize( + capacity: capacity, memoryLimit: memoryLimit, dtypes: dtypes, container: container, + sharedName: sharedName) + } + + /// Op peeks at the values at the specified key. If the + /// + /// underlying container does not contain this key + /// this op will block until it does. This Op is optimized for + /// performance. + @inlinable @inline(__always) + public static func orderedMapPeek( + key: Tensor, + indices: Tensor, + capacity: Int64 = 0, + memoryLimit: Int64 = 0, + container: String, + sharedName: String + ) -> Dtypes { + _RawTFEager.orderedMapPeek( + key: key, indices: indices, capacity: capacity, memoryLimit: memoryLimit, + container: container, sharedName: sharedName) + } + + /// Op returns the number of elements in the underlying container. + @inlinable @inline(__always) + public static func orderedMapSize( + capacity: Int64 = 0, + memoryLimit: Int64 = 0, + dtypes: [TensorDataType], + container: String, + sharedName: String + ) -> Tensor { + _RawTFEager.orderedMapSize( + capacity: capacity, memoryLimit: memoryLimit, dtypes: dtypes, container: container, + sharedName: sharedName) + } + + /// Stage (key, values) in the underlying container which behaves like a ordered + /// + /// associative container. Elements are ordered by key. + /// + /// - Parameters: + /// - key: int64 + /// - values: a list of tensors + /// dtypes A list of data types that inserted values should adhere to. + /// + /// - Attrs: + /// - capacity: Maximum number of elements in the Staging Area. If > 0, inserts + /// on the container will block when the capacity is reached. + /// - container: If non-empty, this queue is placed in the given container. Otherwise, + /// a default container is used. + /// - shared_name: It is necessary to match this name to the matching Unstage Op. + @inlinable @inline(__always) + public static func orderedMapStage( + key: Tensor, + indices: Tensor, + _ values: FakeDtypes, + capacity: Int64 = 0, + memoryLimit: Int64 = 0, + dtypes: [TensorDataType], + container: String, + sharedName: String + ) { + _RawTFEager.orderedMapStage( + key: key, indices: indices, values, capacity: capacity, memoryLimit: memoryLimit, + dtypes: dtypes, container: container, sharedName: sharedName) + } + + /// Op removes and returns the values associated with the key + /// + /// from the underlying container. If the underlying container + /// does not contain this key, the op will block until it does. + @inlinable @inline(__always) + public static func orderedMapUnstage( + key: Tensor, + indices: Tensor, + capacity: Int64 = 0, + memoryLimit: Int64 = 0, + container: String, + sharedName: String + ) -> Dtypes { + _RawTFEager.orderedMapUnstage( + key: key, indices: indices, capacity: capacity, memoryLimit: memoryLimit, + container: container, sharedName: sharedName) + } + + /// Op removes and returns the (key, value) element with the smallest + /// + /// key from the underlying container. If the underlying container + /// does not contain elements, the op will block until it does. + @inlinable @inline(__always) + public static func orderedMapUnstageNoKey( + indices: Tensor, + capacity: Int64 = 0, + memoryLimit: Int64 = 0, + container: String, + sharedName: String + ) -> (key: Tensor, values: Dtypes) { + _RawTFEager.orderedMapUnstageNoKey( + indices: indices, capacity: capacity, memoryLimit: memoryLimit, container: container, + sharedName: sharedName) + } + + @inlinable @inline(__always) + public static func outT() -> Tensor { + _RawTFEager.outT() + } + + @inlinable @inline(__always) + public static func outTypeList() -> T { + _RawTFEager.outTypeList() + } + + @inlinable @inline(__always) + public static func outTypeListRestrict() -> T { + _RawTFEager.outTypeListRestrict() + } + + /// Retrieves a single tensor from the computation outfeed. + /// + /// This operation will block indefinitely until data is available. + /// + /// - Attrs: + /// - dtype: The type of elements in the tensor. + /// - shape: The shape of the tensor. + /// - device_ordinal: The TPU device to use. This should be -1 when the Op + /// is running on a TPU device, and >= 0 when the Op is running on the CPU + /// device. + /// + /// - Output output: A tensor that will be read from the device outfeed. + @inlinable @inline(__always) + public static func outfeedDequeue( + shape: TensorShape?, + deviceOrdinal: Int64 = -1 + ) -> Tensor { + _RawTFEager.outfeedDequeue(shape: shape, deviceOrdinal: deviceOrdinal) + } + + /// Retrieve multiple values from the computation outfeed. + /// + /// This operation will block indefinitely until data is available. Output `i` + /// corresponds to XLA tuple element `i`. + /// + /// - Attrs: + /// - dtypes: The element types of each element in `outputs`. + /// - shapes: The shapes of each tensor in `outputs`. + /// - device_ordinal: The TPU device to use. This should be -1 when the Op + /// is running on a TPU device, and >= 0 when the Op is running on the CPU + /// device. + /// + /// - Output outputs: A list of tensors that will be read from the outfeed. + @inlinable @inline(__always) + public static func outfeedDequeueTuple( + shapes: [TensorShape?], + deviceOrdinal: Int64 = -1 + ) -> Dtypes { + _RawTFEager.outfeedDequeueTuple(shapes: shapes, deviceOrdinal: deviceOrdinal) + } + + /// Enqueue a Tensor on the computation outfeed. + /// + /// - Parameter input: A tensor that will be inserted into the outfeed queue. + @inlinable @inline(__always) + public static func outfeedEnqueue( + _ input: Tensor + ) { + _RawTFEager.outfeedEnqueue(input) + } + + /// Enqueue multiple Tensor values on the computation outfeed. + /// + /// - Parameter inputs: A list of tensors that will be inserted into the outfeed queue as an + /// XLA tuple. + @inlinable @inline(__always) + public static func outfeedEnqueueTuple( + inputs: Dtypes + ) { + _RawTFEager.outfeedEnqueueTuple(inputs: inputs) + } + + /// Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor. + /// + /// Packs the `N` tensors in `values` into a tensor with rank one higher than each + /// tensor in `values`, by packing them along the `axis` dimension. + /// Given a list of tensors of shape `(A, B, C)`; + /// + /// if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`. + /// if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`. + /// Etc. + /// + /// For example: + /// + /// ``` + /// # 'x' is [1, 4] + /// # 'y' is [2, 5] + /// # 'z' is [3, 6] + /// pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. + /// pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]] + /// ``` + /// + /// This is the opposite of `unpack`. + /// + /// - Parameter values: Must be of same shape and type. + /// + /// - Attr axis: Dimension along which to pack. Negative values wrap around, so the + /// valid range is `[-(R+1), R+1)`. + /// + /// - Output output: The packed tensor. + @inlinable @inline(__always) + public static func pack( + _ values: [Tensor], + axis: Int64 = 0 + ) -> Tensor { + switch commonBackend(values) { + case .XLA: + return _RawXLA.pack(values, axis: axis) + case .TF_EAGER: + return _RawTFEager.pack(values, axis: axis) + } + + } + + /// Pads a tensor with zeros. + /// + /// This operation pads a `input` with zeros according to the `paddings` you + /// specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the + /// rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates + /// how many zeros to add before the contents of `input` in that dimension, and + /// `paddings[D, 1]` indicates how many zeros to add after the contents of `input` + /// in that dimension. + /// + /// The padded size of each dimension D of the output is: + /// + /// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` + /// + /// For example: + /// + /// ``` + /// # 't' is [[1, 1], [2, 2]] + /// # 'paddings' is [[1, 1], [2, 2]] + /// # rank of 't' is 2 + /// pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0] + /// [0, 0, 1, 1, 0, 0] + /// [0, 0, 2, 2, 0, 0] + /// [0, 0, 0, 0, 0, 0]] + /// ``` + /// + @inlinable @inline(__always) + public static func pad< + T: TensorFlowScalar, + Tpaddings: TensorFlowIndex + >( + _ input: Tensor, + paddings: Tensor + ) -> Tensor { + switch commonBackend(input.handle.backend, paddings.handle.backend) { + case .XLA: + return _RawXLA.pad(input, paddings: paddings) + case .TF_EAGER: + return _RawTFEager.pad(input, paddings: paddings) + } + + } + + /// Pads a tensor. + /// + /// This operation pads `input` according to the `paddings` and `constant_values` + /// you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is + /// the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates + /// how many padding values to add before the contents of `input` in that dimension, + /// and `paddings[D, 1]` indicates how many padding values to add after the contents + /// of `input` in that dimension. `constant_values` is a scalar tensor of the same + /// type as `input` that indicates the value to use for padding `input`. + /// + /// The padded size of each dimension D of the output is: + /// + /// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` + /// + /// For example: + /// + /// ``` + /// # 't' is [[1, 1], [2, 2]] + /// # 'paddings' is [[1, 1], [2, 2]] + /// # 'constant_values' is 0 + /// # rank of 't' is 2 + /// pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0] + /// [0, 0, 1, 1, 0, 0] + /// [0, 0, 2, 2, 0, 0] + /// [0, 0, 0, 0, 0, 0]] + /// ``` + @inlinable @inline(__always) + public static func padV2< + T: TensorFlowScalar, + Tpaddings: TensorFlowIndex + >( + _ input: Tensor, + paddings: Tensor, + constantValues: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(input.handle.backend, paddings.handle.backend), constantValues.handle.backend) + { + case .XLA: + return _RawXLA.padV2(input, paddings: paddings, constantValues: constantValues) + case .TF_EAGER: + return _RawTFEager.padV2(input, paddings: paddings, constantValues: constantValues) + } + + } + + /// Creates a dataset that batches and pads `batch_size` elements from the input. + /// + /// - Parameters: + /// - batch_size: A scalar representing the number of elements to accumulate in a + /// batch. + /// - padded_shapes: A list of int64 tensors representing the desired padded shapes + /// of the corresponding output components. These shapes may be partially + /// specified, using `-1` to indicate that a particular dimension should be + /// padded to the maximum size of all batch elements. + /// - padding_values: A list of scalars containing the padding value to use for + /// each of the outputs. + @inlinable @inline(__always) + public static func paddedBatchDataset( + inputDataset: VariantHandle, + batchSize: Tensor, + paddedShapes: [Tensor], + paddingValues: ToutputTypes, + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.paddedBatchDataset( + inputDataset: inputDataset, batchSize: batchSize, paddedShapes: paddedShapes, + paddingValues: paddingValues, outputShapes: outputShapes) + } + + /// Creates a dataset that batches and pads `batch_size` elements from the input. + /// + /// - Parameters: + /// - batch_size: A scalar representing the number of elements to accumulate in a + /// batch. + /// - padded_shapes: A list of int64 tensors representing the desired padded shapes + /// of the corresponding output components. These shapes may be partially + /// specified, using `-1` to indicate that a particular dimension should be + /// padded to the maximum size of all batch elements. + /// - padding_values: A list of scalars containing the padding value to use for + /// each of the outputs. + /// - drop_remainder: A scalar representing whether the last batch should be dropped in case its size + /// is smaller than desired. + @inlinable @inline(__always) + public static func paddedBatchDatasetV2( + inputDataset: VariantHandle, + batchSize: Tensor, + paddedShapes: [Tensor], + paddingValues: ToutputTypes, + dropRemainder: Tensor, + parallelCopy: Bool = false, + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.paddedBatchDatasetV2( + inputDataset: inputDataset, batchSize: batchSize, paddedShapes: paddedShapes, + paddingValues: paddingValues, dropRemainder: dropRemainder, parallelCopy: parallelCopy, + outputShapes: outputShapes) + } + + /// A queue that produces elements in first-in first-out order. + /// + /// Variable-size shapes are allowed by setting the corresponding shape dimensions + /// to 0 in the shape attr. In this case DequeueMany will pad up to the maximum + /// size of any given element in the minibatch. See below for details. + /// + /// - Attrs: + /// - component_types: The type of each component in a value. + /// - shapes: The shape of each component in a value. The length of this attr must + /// be either 0 or the same as the length of component_types. + /// Shapes of fixed rank but variable size are allowed by setting + /// any shape dimension to -1. In this case, the inputs' shape may vary along + /// the given dimension, and DequeueMany will pad the given dimension with + /// zeros up to the maximum shape of all elements in the given batch. + /// If the length of this attr is 0, different queue elements may have + /// different ranks and shapes, but only one element may be dequeued at a time. + /// - capacity: The upper bound on the number of elements in this queue. + /// Negative numbers mean no limit. + /// - container: If non-empty, this queue is placed in the given container. + /// Otherwise, a default container is used. + /// - shared_name: If non-empty, this queue will be shared under the given name + /// across multiple sessions. + /// + /// - Output handle: The handle to the queue. + @inlinable @inline(__always) + public static func paddingFIFOQueueV2( + componentTypes: [TensorDataType], + shapes: [TensorShape?], + capacity: Int64 = -1, + container: String, + sharedName: String + ) -> ResourceHandle { + _RawTFEager.paddingFIFOQueueV2( + componentTypes: componentTypes, shapes: shapes, capacity: capacity, container: container, + sharedName: sharedName) + } + + /// Concatenates a list of `N` tensors along the first dimension. + /// + /// The input tensors are all required to have size 1 in the first dimension. + /// + /// For example: + /// + /// ``` + /// # 'x' is [[1, 4]] + /// # 'y' is [[2, 5]] + /// # 'z' is [[3, 6]] + /// parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. + /// ``` + /// + /// The difference between concat and parallel_concat is that concat requires all + /// of the inputs be computed before the operation will begin but doesn't require + /// that the input shapes be known during graph construction. Parallel concat + /// will copy pieces of the input into the output as they become available, in + /// some situations this can provide a performance benefit. + /// + /// - Parameter values: Tensors to be concatenated. All must have size 1 in the first dimension + /// and same shape. + /// + /// - Attr shape: the final shape of the result; should be equal to the shapes of any input + /// but with the number of input values in the first dimension. + /// + /// - Output output: The concatenated tensor. + @inlinable @inline(__always) + public static func parallelConcat( + _ values: [Tensor], + shape: TensorShape? + ) -> Tensor { + _RawTFEager.parallelConcat(values, shape: shape) + } + + /// Interleave the values from the `data` tensors into a single tensor. + /// + /// Builds a merged tensor such that + /// + /// ```python + /// merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] + /// ``` + /// + /// For example, if each `indices[m]` is scalar or vector, we have + /// + /// ```python + /// # Scalar indices: + /// merged[indices[m], ...] = data[m][...] + /// + /// # Vector indices: + /// merged[indices[m][i], ...] = data[m][i, ...] + /// ``` + /// + /// Each `data[i].shape` must start with the corresponding `indices[i].shape`, + /// and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we + /// must have `data[i].shape = indices[i].shape + constant`. In terms of this + /// `constant`, the output shape is + /// + /// merged.shape = [max(indices)] + constant + /// + /// Values may be merged in parallel, so if an index appears in both `indices[m][i]` + /// and `indices[n][j]`, the result may be invalid. This differs from the normal + /// DynamicStitch operator that defines the behavior in that case. + /// + /// For example: + /// + /// ```python + /// indices[0] = 6 + /// indices[1] = [4, 1] + /// indices[2] = [[5, 2], [0, 3]] + /// data[0] = [61, 62] + /// data[1] = [[41, 42], [11, 12]] + /// data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]] + /// merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], + /// [51, 52], [61, 62]] + /// ``` + /// + /// This method can be used to merge partitions created by `dynamic_partition` + /// as illustrated on the following example: + /// + /// ```python + /// # Apply function (increments x_i) on elements for which a certain condition + /// # apply (x_i != -1 in this example). + /// x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4]) + /// condition_mask=tf.not_equal(x,tf.constant(-1.)) + /// partitioned_data = tf.dynamic_partition( + /// x, tf.cast(condition_mask, tf.int32) , 2) + /// partitioned_data[1] = partitioned_data[1] + 1.0 + /// condition_indices = tf.dynamic_partition( + /// tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2) + /// x = tf.dynamic_stitch(condition_indices, partitioned_data) + /// # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain + /// # unchanged. + /// ``` + /// + ///
+ /// + ///
+ @inlinable @inline(__always) + public static func parallelDynamicStitch( + indices: [Tensor], + data: [Tensor] + ) -> Tensor { + _RawTFEager.parallelDynamicStitch(indices: indices, data: data) + } + + /// Creates a dataset that applies `f` to the outputs of `input_dataset`. + /// + /// The resulting dataset is similar to the `InterleaveDataset`, with the exception + /// that if retrieving the next value from a dataset would cause the requester to + /// block, it will skip that input dataset. This dataset is especially useful + /// when loading data from a variable-latency datastores (e.g. HDFS, GCS), as it + /// allows the training step to proceed so long as some data is available. + /// + /// !! WARNING !! If the `sloppy` parameter is set to `True`, the operation of this + /// dataset will not be deterministic! + /// + /// This dataset has been superseded by `ParallelInterleaveDatasetV2`. New code + /// should use `ParallelInterleaveDatasetV2`. + /// + /// The Python API `tf.data.experimental.parallel_interleave` creates instances of + /// this op. `tf.data.experimental.parallel_interleave` is a deprecated API. + /// + /// - Parameters: + /// - input_dataset: Dataset that produces a stream of arguments for the function `f`. + /// - other_arguments: Additional arguments to pass to `f` beyond those produced by `input_dataset`. + /// Evaluated once when the dataset is instantiated. + /// - cycle_length: Number of datasets (each created by applying `f` to the elements of + /// `input_dataset`) among which the `ParallelInterleaveDataset` will cycle in a + /// round-robin fashion. + /// - block_length: Number of elements at a time to produce from each interleaved invocation of a + /// dataset returned by `f`. + /// - sloppy: If `True`, return elements as they become available, even if that means returning + /// these elements in a non-deterministic order. Sloppy operation may result in better + /// performance in the presence of stragglers, but the dataset will still block if + /// all of its open streams are blocked. + /// If `False`, always return elements in a deterministic order. + /// - buffer_output_elements: The number of elements each iterator being interleaved should buffer (similar + /// to the `.prefetch()` transformation for each interleaved iterator). + /// - prefetch_input_elements: Determines the number of iterators to prefetch, allowing buffers to warm up and + /// data to be pre-fetched without blocking the main thread. + /// + /// - Attrs: + /// - f: A function mapping elements of `input_dataset`, concatenated with + /// `other_arguments`, to a Dataset variant that contains elements matching + /// `output_types` and `output_shapes`. + /// - Targuments: Types of the elements of `other_arguments`. + @inlinable @inline(__always) + public static func parallelInterleaveDataset< + FIn: TensorGroup, + FOut: TensorGroup, + Targuments: TensorArrayProtocol + >( + inputDataset: VariantHandle, + otherArguments: Targuments, + cycleLength: Tensor, + blockLength: Tensor, + sloppy: Tensor, + bufferOutputElements: Tensor, + prefetchInputElements: Tensor, + f: (FIn) -> FOut, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.parallelInterleaveDataset( + inputDataset: inputDataset, otherArguments: otherArguments, cycleLength: cycleLength, + blockLength: blockLength, sloppy: sloppy, bufferOutputElements: bufferOutputElements, + prefetchInputElements: prefetchInputElements, f: f, outputTypes: outputTypes, + outputShapes: outputShapes) + } + + /// Creates a dataset that applies `f` to the outputs of `input_dataset`. + /// + /// The resulting dataset is similar to the `InterleaveDataset`, except that the + /// dataset will fetch records from the interleaved datasets in parallel. + /// + /// The `tf.data` Python API creates instances of this op from + /// `Dataset.interleave()` when the `num_parallel_calls` parameter of that method + /// is set to any value other than `None`. + /// + /// By default, the output of this dataset will be deterministic, which may result + /// in the dataset blocking if the next data item to be returned isn't available. + /// In order to avoid head-of-line blocking, one can set the + /// `experimental_deterministic` parameter of `tf.data.Options` to `False`, + /// which can improve performance at the expense of non-determinism. + /// + /// - Parameters: + /// - input_dataset: Dataset that produces a stream of arguments for the function `f`. + /// - other_arguments: Additional arguments to pass to `f` beyond those produced by `input_dataset`. + /// Evaluated once when the dataset is instantiated. + /// - cycle_length: Number of datasets (each created by applying `f` to the elements of + /// `input_dataset`) among which the `ParallelInterleaveDatasetV2` will cycle in a + /// round-robin fashion. + /// - block_length: Number of elements at a time to produce from each interleaved invocation of a + /// dataset returned by `f`. + /// - num_parallel_calls: Determines the number of threads that should be used for fetching data from + /// input datasets in parallel. The Python API `tf.data.experimental.AUTOTUNE` + /// constant can be used to indicate that the level of parallelism should be autotuned. + /// + /// - Attrs: + /// - f: A function mapping elements of `input_dataset`, concatenated with + /// `other_arguments`, to a Dataset variant that contains elements matching + /// `output_types` and `output_shapes`. + /// - Targuments: Types of the elements of `other_arguments`. + @inlinable @inline(__always) + public static func parallelInterleaveDatasetV2< + FIn: TensorGroup, + FOut: TensorGroup, + Targuments: TensorArrayProtocol + >( + inputDataset: VariantHandle, + otherArguments: Targuments, + cycleLength: Tensor, + blockLength: Tensor, + numParallelCalls: Tensor, + f: (FIn) -> FOut, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?], + sloppy: Bool = false + ) -> VariantHandle { + _RawTFEager.parallelInterleaveDatasetV2( + inputDataset: inputDataset, otherArguments: otherArguments, cycleLength: cycleLength, + blockLength: blockLength, numParallelCalls: numParallelCalls, f: f, + outputTypes: outputTypes, outputShapes: outputShapes, sloppy: sloppy) + } + + /// Creates a dataset that applies `f` to the outputs of `input_dataset`. + /// + /// Unlike a "MapDataset", which applies `f` sequentially, this dataset invokes up + /// to `num_parallel_calls` copies of `f` in parallel. + /// + /// - Parameter num_parallel_calls: The number of concurrent invocations of `f` that process + /// elements from `input_dataset` in parallel. + @inlinable @inline(__always) + public static func parallelMapDataset< + FIn: TensorGroup, + FOut: TensorGroup, + Targuments: TensorArrayProtocol + >( + inputDataset: VariantHandle, + otherArguments: Targuments, + numParallelCalls: Tensor, + f: (FIn) -> FOut, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?], + useInterOpParallelism: Bool = true, + sloppy: Bool = false, + preserveCardinality: Bool = false + ) -> VariantHandle { + _RawTFEager.parallelMapDataset( + inputDataset: inputDataset, otherArguments: otherArguments, + numParallelCalls: numParallelCalls, f: f, outputTypes: outputTypes, + outputShapes: outputShapes, useInterOpParallelism: useInterOpParallelism, sloppy: sloppy, + preserveCardinality: preserveCardinality) + } + + /// Outputs random values from a normal distribution. The parameters may each be a + /// + /// scalar which applies to the entire output, or a vector of length shape[0] which + /// stores the parameters for each batch. + /// + /// - Parameters: + /// - shape: The shape of the output tensor. Batches are indexed by the 0th dimension. + /// - means: The mean parameter of each batch. + /// - stdevs: The standard deviation parameter of each batch. Must be greater than 0. + /// - minvals: The minimum cutoff. May be -infinity. + /// - maxvals: The maximum cutoff. May be +infinity, and must be more than the minval + /// for each batch. + /// + /// - Attrs: + /// - seed: If either `seed` or `seed2` are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// - seed2: A second seed to avoid seed collision. + /// - dtype: The type of the output. + /// + /// - Output output: A matrix of shape num_batches x samples_per_batch, filled with random + /// truncated normal values using the parameters for each row. + @inlinable @inline(__always) + public static func parameterizedTruncatedNormal< + Dtype: FloatingPoint & TensorFlowScalar, + T: TensorFlowIndex + >( + shape: Tensor, + means: Tensor, + stdevs: Tensor, + minvals: Tensor, + maxvals: Tensor, + seed: Int64 = 0, + seed2: Int64 = 0 + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend( + commonBackend(shape.handle.backend, means.handle.backend), stdevs.handle.backend), + minvals.handle.backend), maxvals.handle.backend) + { + case .XLA: + let output_device = maxvals.device + let shape = Tensor(copying: shape, to: .defaultTFEager) + let means = Tensor(copying: means, to: .defaultTFEager) + let stdevs = Tensor(copying: stdevs, to: .defaultTFEager) + let minvals = Tensor(copying: minvals, to: .defaultTFEager) + let maxvals = Tensor(copying: maxvals, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.parameterizedTruncatedNormal( + shape: shape, means: means, stdevs: stdevs, minvals: minvals, maxvals: maxvals, + seed: seed, seed2: seed2), to: output_device) + case .TF_EAGER: + return _RawTFEager.parameterizedTruncatedNormal( + shape: shape, means: means, stdevs: stdevs, minvals: minvals, maxvals: maxvals, + seed: seed, seed2: seed2) + } + + } + + /// Transforms a vector of brain.Example protos (as strings) into typed tensors. + /// + /// - Parameters: + /// - serialized: A vector containing a batch of binary serialized Example protos. + /// - names: A vector containing the names of the serialized protos. + /// May contain, for example, table key (descriptive) names for the + /// corresponding serialized protos. These are purely useful for debugging + /// purposes, and the presence of values here has no effect on the output. + /// May also be an empty vector if no names are available. + /// If non-empty, this vector must be the same length as "serialized". + /// - sparse_keys: A list of Nsparse string Tensors (scalars). + /// The keys expected in the Examples' features associated with sparse values. + /// - dense_keys: A list of Ndense string Tensors (scalars). + /// The keys expected in the Examples' features associated with dense values. + /// - dense_defaults: A list of Ndense Tensors (some may be empty). + /// dense_defaults[j] provides default values + /// when the example's feature_map lacks dense_key[j]. If an empty Tensor is + /// provided for dense_defaults[j], then the Feature dense_keys[j] is required. + /// The input type is inferred from dense_defaults[j], even when it's empty. + /// If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined, + /// then the shape of dense_defaults[j] must match that of dense_shapes[j]. + /// If dense_shapes[j] has an undefined major dimension (variable strides dense + /// feature), dense_defaults[j] must contain a single element: + /// the padding element. + /// + /// - Attrs: + /// - sparse_types: A list of Nsparse types; the data types of data in each Feature + /// given in sparse_keys. + /// Currently the ParseExample supports DT_FLOAT (FloatList), + /// DT_INT64 (Int64List), and DT_STRING (BytesList). + /// - dense_shapes: A list of Ndense shapes; the shapes of data in each Feature + /// given in dense_keys. + /// The number of elements in the Feature corresponding to dense_key[j] + /// must always equal dense_shapes[j].NumEntries(). + /// If dense_shapes[j] == (D0, D1, ..., DN) then the shape of output + /// Tensor dense_values[j] will be (|serialized|, D0, D1, ..., DN): + /// The dense outputs are just the inputs row-stacked by batch. + /// This works for dense_shapes[j] = (-1, D1, ..., DN). In this case + /// the shape of the output Tensor dense_values[j] will be + /// (|serialized|, M, D1, .., DN), where M is the maximum number of blocks + /// of elements of length D1 * .... * DN, across all minibatch entries + /// in the input. Any minibatch entry with less than M blocks of elements of + /// length D1 * ... * DN will be padded with the corresponding default_value + /// scalar element along the second dimension. + @inlinable @inline(__always) + public static func parseExample< + SparseTypes: TensorGroup, + Tdense: TensorArrayProtocol + >( + serialized: StringTensor, + names: StringTensor, + sparseKeys: [StringTensor], + denseKeys: [StringTensor], + denseDefaults: Tdense, + denseShapes: [TensorShape?] + ) -> ( + sparseIndices: [Tensor], sparseValues: SparseTypes, sparseShapes: [Tensor], + denseValues: Tdense + ) { + _RawTFEager.parseExample( + serialized: serialized, names: names, sparseKeys: sparseKeys, denseKeys: denseKeys, + denseDefaults: denseDefaults, denseShapes: denseShapes) + } + + /// Transforms `input_dataset` containing `Example` protos as vectors of DT_STRING into a dataset of `Tensor` or `SparseTensor` objects representing the parsed features. + /// + /// - Parameter dense_defaults: A dict mapping string keys to `Tensor`s. + /// The keys of the dict must match the dense_keys of the feature. + /// + /// - Attrs: + /// - sparse_keys: A list of string keys in the examples features. + /// The results for these keys will be returned as `SparseTensor` objects. + /// - dense_keys: A list of Ndense string Tensors (scalars). + /// The keys expected in the Examples features associated with dense values. + /// - sparse_types: A list of `DTypes` of the same length as `sparse_keys`. + /// Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), + /// and `tf.string` (`BytesList`) are supported. + /// - Tdense: A list of DTypes of the same length as `dense_keys`. + /// Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), + /// and `tf.string` (`BytesList`) are supported. + /// + /// - dense_shapes: List of tuples with the same length as `dense_keys`. + /// The shape of the data for each dense feature referenced by `dense_keys`. + /// Required for any input tensors identified by `dense_keys`. Must be + /// either fully defined, or may contain an unknown first dimension. + /// An unknown first dimension means the feature is treated as having + /// a variable number of blocks, and the output shape along this dimension + /// is considered unknown at graph build time. Padding is applied for + /// minibatch elements smaller than the maximum number of blocks for the + /// given feature along this dimension. + /// - output_types: The type list for the return values. + /// - output_shapes: The list of shapes being produced. + @inlinable @inline(__always) + public static func parseExampleDataset( + inputDataset: VariantHandle, + numParallelCalls: Tensor, + denseDefaults: Tdense, + sparseKeys: [String], + denseKeys: [String], + sparseTypes: [TensorDataType], + denseShapes: [TensorShape?], + outputTypes: [TensorDataType], + outputShapes: [TensorShape?], + sloppy: Bool = false, + raggedKeys: [String], + raggedValueTypes: [TensorDataType], + raggedSplitTypes: [TensorDataType] + ) -> VariantHandle { + _RawTFEager.parseExampleDataset( + inputDataset: inputDataset, numParallelCalls: numParallelCalls, + denseDefaults: denseDefaults, sparseKeys: sparseKeys, denseKeys: denseKeys, + sparseTypes: sparseTypes, denseShapes: denseShapes, outputTypes: outputTypes, + outputShapes: outputShapes, sloppy: sloppy, raggedKeys: raggedKeys, + raggedValueTypes: raggedValueTypes, raggedSplitTypes: raggedSplitTypes) + } + + /// Transforms a vector of tf.Example protos (as strings) into typed tensors. + /// + /// - Parameters: + /// - serialized: A scalar or vector containing binary serialized Example protos. + /// - names: A tensor containing the names of the serialized protos. + /// Corresponds 1:1 with the `serialized` tensor. + /// May contain, for example, table key (descriptive) names for the + /// corresponding serialized protos. These are purely useful for debugging + /// purposes, and the presence of values here has no effect on the output. + /// May also be an empty vector if no names are available. + /// If non-empty, this tensor must have the same shape as "serialized". + /// - sparse_keys: Vector of strings. + /// The keys expected in the Examples' features associated with sparse values. + /// - dense_keys: Vector of strings. + /// The keys expected in the Examples' features associated with dense values. + /// - ragged_keys: Vector of strings. + /// The keys expected in the Examples' features associated with ragged values. + /// - dense_defaults: A list of Tensors (some may be empty). Corresponds 1:1 with `dense_keys`. + /// dense_defaults[j] provides default values + /// when the example's feature_map lacks dense_key[j]. If an empty Tensor is + /// provided for dense_defaults[j], then the Feature dense_keys[j] is required. + /// The input type is inferred from dense_defaults[j], even when it's empty. + /// If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined, + /// then the shape of dense_defaults[j] must match that of dense_shapes[j]. + /// If dense_shapes[j] has an undefined major dimension (variable strides dense + /// feature), dense_defaults[j] must contain a single element: + /// the padding element. + /// + /// - Attrs: + /// - num_sparse: The number of sparse keys. + /// - sparse_types: A list of `num_sparse` types; the data types of data in each Feature + /// given in sparse_keys. + /// Currently the ParseExample supports DT_FLOAT (FloatList), + /// DT_INT64 (Int64List), and DT_STRING (BytesList). + /// - ragged_value_types: A list of `num_ragged` types; the data types of data in each Feature + /// given in ragged_keys (where `num_ragged = sparse_keys.size()`). + /// Currently the ParseExample supports DT_FLOAT (FloatList), + /// DT_INT64 (Int64List), and DT_STRING (BytesList). + /// - ragged_split_types: A list of `num_ragged` types; the data types of row_splits in each Feature + /// given in ragged_keys (where `num_ragged = sparse_keys.size()`). + /// May be DT_INT32 or DT_INT64. + /// - dense_shapes: A list of `num_dense` shapes; the shapes of data in each Feature + /// given in dense_keys (where `num_dense = dense_keys.size()`). + /// The number of elements in the Feature corresponding to dense_key[j] + /// must always equal dense_shapes[j].NumEntries(). + /// If dense_shapes[j] == (D0, D1, ..., DN) then the shape of output + /// Tensor dense_values[j] will be (|serialized|, D0, D1, ..., DN): + /// The dense outputs are just the inputs row-stacked by batch. + /// This works for dense_shapes[j] = (-1, D1, ..., DN). In this case + /// the shape of the output Tensor dense_values[j] will be + /// (|serialized|, M, D1, .., DN), where M is the maximum number of blocks + /// of elements of length D1 * .... * DN, across all minibatch entries + /// in the input. Any minibatch entry with less than M blocks of elements of + /// length D1 * ... * DN will be padded with the corresponding default_value + /// scalar element along the second dimension. + @inlinable @inline(__always) + public static func parseExampleV2< + Tdense: TensorArrayProtocol, + SparseTypes: TensorGroup, + RaggedValueTypes: TensorGroup, + RaggedSplitTypes: TensorGroup + >( + serialized: StringTensor, + names: StringTensor, + sparseKeys: StringTensor, + denseKeys: StringTensor, + raggedKeys: StringTensor, + denseDefaults: Tdense, + numSparse: Int64, + denseShapes: [TensorShape?] + ) -> ( + sparseIndices: [Tensor], sparseValues: SparseTypes, sparseShapes: [Tensor], + denseValues: Tdense, raggedValues: RaggedValueTypes, raggedRowSplits: RaggedSplitTypes + ) { + _RawTFEager.parseExampleV2( + serialized: serialized, names: names, sparseKeys: sparseKeys, denseKeys: denseKeys, + raggedKeys: raggedKeys, denseDefaults: denseDefaults, numSparse: numSparse, + denseShapes: denseShapes) + } + + /// Transforms a vector of brain.SequenceExample protos (as strings) into typed tensors. + /// + /// - Parameters: + /// - serialized: A vector containing binary serialized SequenceExample protos. + /// - debug_name: A vector containing the names of the serialized protos. + /// May contain, for example, table key (descriptive) name for the + /// corresponding serialized proto. This is purely useful for debugging + /// purposes, and the presence of values here has no effect on the output. + /// May also be an empty vector if no name is available. + /// - context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty). + /// context_dense_defaults[j] provides default values + /// when the SequenceExample's context map lacks context_dense_key[j]. + /// If an empty Tensor is provided for context_dense_defaults[j], + /// then the Feature context_dense_keys[j] is required. + /// The input type is inferred from context_dense_defaults[j], even when it's + /// empty. If context_dense_defaults[j] is not empty, its shape must match + /// context_dense_shapes[j]. + /// + /// - Attrs: + /// - feature_list_dense_missing_assumed_empty: A vector listing the + /// FeatureList keys which may be missing from the SequenceExamples. If the + /// associated FeatureList is missing, it is treated as empty. By default, + /// any FeatureList not listed in this vector must exist in the SequenceExamples. + /// - context_sparse_keys: A list of Ncontext_sparse string Tensors (scalars). + /// The keys expected in the Examples' features associated with context_sparse + /// values. + /// - context_dense_keys: A list of Ncontext_dense string Tensors (scalars). + /// The keys expected in the SequenceExamples' context features associated with + /// dense values. + /// - feature_list_sparse_keys: A list of Nfeature_list_sparse string Tensors + /// (scalars). The keys expected in the FeatureLists associated with sparse + /// values. + /// - feature_list_dense_keys: A list of Nfeature_list_dense string Tensors (scalars). + /// The keys expected in the SequenceExamples' feature_lists associated + /// with lists of dense values. + /// - context_sparse_types: A list of Ncontext_sparse types; the data types of data in + /// each context Feature given in context_sparse_keys. + /// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), + /// DT_INT64 (Int64List), and DT_STRING (BytesList). + /// - context_dense_shapes: A list of Ncontext_dense shapes; the shapes of data in + /// each context Feature given in context_dense_keys. + /// The number of elements in the Feature corresponding to context_dense_key[j] + /// must always equal context_dense_shapes[j].NumEntries(). + /// The shape of context_dense_values[j] will match context_dense_shapes[j]. + /// - feature_list_sparse_types: A list of Nfeature_list_sparse types; the data types + /// of data in each FeatureList given in feature_list_sparse_keys. + /// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), + /// DT_INT64 (Int64List), and DT_STRING (BytesList). + /// - feature_list_dense_shapes: A list of Nfeature_list_dense shapes; the shapes of + /// data in each FeatureList given in feature_list_dense_keys. + /// The shape of each Feature in the FeatureList corresponding to + /// feature_list_dense_key[j] must always equal + /// feature_list_dense_shapes[j].NumEntries(). + @inlinable @inline(__always) + public static func parseSequenceExample< + ContextSparseTypes: TensorGroup, + TcontextDense: TensorArrayProtocol, + FeatureListDenseTypes: TensorGroup, + FeatureListSparseTypes: TensorGroup + >( + serialized: StringTensor, + debugName: StringTensor, + contextDenseDefaults: TcontextDense, + featureListDenseMissingAssumedEmpty: [String], + contextSparseKeys: [String], + contextDenseKeys: [String], + featureListSparseKeys: [String], + featureListDenseKeys: [String], + ncontextSparse: Int64 = 0, + ncontextDense: Int64 = 0, + nfeatureListSparse: Int64 = 0, + nfeatureListDense: Int64 = 0, + contextDenseShapes: [TensorShape?], + featureListDenseShapes: [TensorShape?] + ) -> ( + contextSparseIndices: [Tensor], contextSparseValues: ContextSparseTypes, + contextSparseShapes: [Tensor], contextDenseValues: TcontextDense, + featureListSparseIndices: [Tensor], featureListSparseValues: FeatureListSparseTypes, + featureListSparseShapes: [Tensor], featureListDenseValues: FeatureListDenseTypes, + featureListDenseLengths: [Tensor] + ) { + _RawTFEager.parseSequenceExample( + serialized: serialized, debugName: debugName, contextDenseDefaults: contextDenseDefaults, + featureListDenseMissingAssumedEmpty: featureListDenseMissingAssumedEmpty, + contextSparseKeys: contextSparseKeys, contextDenseKeys: contextDenseKeys, + featureListSparseKeys: featureListSparseKeys, featureListDenseKeys: featureListDenseKeys, + ncontextSparse: ncontextSparse, ncontextDense: ncontextDense, + nfeatureListSparse: nfeatureListSparse, nfeatureListDense: nfeatureListDense, + contextDenseShapes: contextDenseShapes, featureListDenseShapes: featureListDenseShapes) + } + + /// Transforms a vector of tf.io.SequenceExample protos (as strings) into + /// typed tensors. + /// + /// - Parameters: + /// - serialized: A scalar or vector containing binary serialized SequenceExample protos. + /// - debug_name: A scalar or vector containing the names of the serialized protos. + /// May contain, for example, table key (descriptive) name for the + /// corresponding serialized proto. This is purely useful for debugging + /// purposes, and the presence of values here has no effect on the output. + /// May also be an empty vector if no name is available. + /// - context_sparse_keys: The keys expected in the Examples' features associated with context_sparse + /// values. + /// - context_dense_keys: The keys expected in the SequenceExamples' context features associated with + /// dense values. + /// - context_ragged_keys: The keys expected in the Examples' features associated with context_ragged + /// values. + /// - feature_list_sparse_keys: The keys expected in the FeatureLists associated with sparse values. + /// - feature_list_dense_keys: The keys expected in the SequenceExamples' feature_lists associated + /// with lists of dense values. + /// - feature_list_ragged_keys: The keys expected in the FeatureLists associated with ragged values. + /// - feature_list_dense_missing_assumed_empty: A vector corresponding 1:1 with featue_list_dense_keys, indicating which + /// features may be missing from the SequenceExamples. If the associated + /// FeatureList is missing, it is treated as empty. + /// - context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty). + /// context_dense_defaults[j] provides default values + /// when the SequenceExample's context map lacks context_dense_key[j]. + /// If an empty Tensor is provided for context_dense_defaults[j], + /// then the Feature context_dense_keys[j] is required. + /// The input type is inferred from context_dense_defaults[j], even when it's + /// empty. If context_dense_defaults[j] is not empty, its shape must match + /// context_dense_shapes[j]. + /// + /// - Attrs: + /// - context_sparse_types: A list of Ncontext_sparse types; the data types of data in + /// each context Feature given in context_sparse_keys. + /// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), + /// DT_INT64 (Int64List), and DT_STRING (BytesList). + /// - context_ragged_value_types: RaggedTensor.value dtypes for the ragged context features. + /// - context_ragged_split_types: RaggedTensor.row_split dtypes for the ragged context features. + /// - context_dense_shapes: A list of Ncontext_dense shapes; the shapes of data in + /// each context Feature given in context_dense_keys. + /// The number of elements in the Feature corresponding to context_dense_key[j] + /// must always equal context_dense_shapes[j].NumEntries(). + /// The shape of context_dense_values[j] will match context_dense_shapes[j]. + /// - feature_list_sparse_types: A list of Nfeature_list_sparse types; the data types + /// of data in each FeatureList given in feature_list_sparse_keys. + /// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), + /// DT_INT64 (Int64List), and DT_STRING (BytesList). + /// - feature_list_ragged_value_types: RaggedTensor.value dtypes for the ragged FeatureList features. + /// - feature_list_ragged_split_types: RaggedTensor.row_split dtypes for the ragged FeatureList features. + /// - feature_list_dense_shapes: A list of Nfeature_list_dense shapes; the shapes of + /// data in each FeatureList given in feature_list_dense_keys. + /// The shape of each Feature in the FeatureList corresponding to + /// feature_list_dense_key[j] must always equal + /// feature_list_dense_shapes[j].NumEntries(). + @inlinable @inline(__always) + public static func parseSequenceExampleV2< + TcontextDense: TensorArrayProtocol, + ContextSparseTypes: TensorGroup, + ContextRaggedValueTypes: TensorGroup, + ContextRaggedSplitTypes: TensorGroup, + FeatureListDenseTypes: TensorGroup, + FeatureListSparseTypes: TensorGroup, + FeatureListRaggedValueTypes: TensorGroup, + FeatureListRaggedSplitTypes: TensorGroup + >( + serialized: StringTensor, + debugName: StringTensor, + contextSparseKeys: StringTensor, + contextDenseKeys: StringTensor, + contextRaggedKeys: StringTensor, + featureListSparseKeys: StringTensor, + featureListDenseKeys: StringTensor, + featureListRaggedKeys: StringTensor, + featureListDenseMissingAssumedEmpty: Tensor, + contextDenseDefaults: TcontextDense, + ncontextSparse: Int64 = 0, + contextDenseShapes: [TensorShape?], + nfeatureListSparse: Int64 = 0, + nfeatureListDense: Int64 = 0, + featureListDenseShapes: [TensorShape?] + ) -> ( + contextSparseIndices: [Tensor], contextSparseValues: ContextSparseTypes, + contextSparseShapes: [Tensor], contextDenseValues: TcontextDense, + contextRaggedValues: ContextRaggedValueTypes, contextRaggedRowSplits: ContextRaggedSplitTypes, + featureListSparseIndices: [Tensor], featureListSparseValues: FeatureListSparseTypes, + featureListSparseShapes: [Tensor], featureListDenseValues: FeatureListDenseTypes, + featureListDenseLengths: [Tensor], + featureListRaggedValues: FeatureListRaggedValueTypes, + featureListRaggedOuterSplits: FeatureListRaggedSplitTypes, + featureListRaggedInnerSplits: FeatureListRaggedSplitTypes + ) { + _RawTFEager.parseSequenceExampleV2( + serialized: serialized, debugName: debugName, contextSparseKeys: contextSparseKeys, + contextDenseKeys: contextDenseKeys, contextRaggedKeys: contextRaggedKeys, + featureListSparseKeys: featureListSparseKeys, featureListDenseKeys: featureListDenseKeys, + featureListRaggedKeys: featureListRaggedKeys, + featureListDenseMissingAssumedEmpty: featureListDenseMissingAssumedEmpty, + contextDenseDefaults: contextDenseDefaults, ncontextSparse: ncontextSparse, + contextDenseShapes: contextDenseShapes, nfeatureListSparse: nfeatureListSparse, + nfeatureListDense: nfeatureListDense, featureListDenseShapes: featureListDenseShapes) + } + + /// Transforms a tf.Example proto (as a string) into typed tensors. + /// + /// - Parameters: + /// - serialized: A vector containing a batch of binary serialized Example protos. + /// - dense_defaults: A list of Tensors (some may be empty), whose length matches + /// the length of `dense_keys`. dense_defaults[j] provides default values + /// when the example's feature_map lacks dense_key[j]. If an empty Tensor is + /// provided for dense_defaults[j], then the Feature dense_keys[j] is required. + /// The input type is inferred from dense_defaults[j], even when it's empty. + /// If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined, + /// then the shape of dense_defaults[j] must match that of dense_shapes[j]. + /// If dense_shapes[j] has an undefined major dimension (variable strides dense + /// feature), dense_defaults[j] must contain a single element: + /// the padding element. + /// + /// - Attrs: + /// - num_sparse: The number of sparse features to be parsed from the example. This + /// must match the lengths of `sparse_keys` and `sparse_types`. + /// - sparse_keys: A list of `num_sparse` strings. + /// The keys expected in the Examples' features associated with sparse values. + /// - dense_keys: The keys expected in the Examples' features associated with dense + /// values. + /// - sparse_types: A list of `num_sparse` types; the data types of data in each + /// Feature given in sparse_keys. + /// Currently the ParseSingleExample op supports DT_FLOAT (FloatList), + /// DT_INT64 (Int64List), and DT_STRING (BytesList). + /// - Tdense: The data types of data in each Feature given in dense_keys. + /// The length of this list must match the length of `dense_keys`. + /// Currently the ParseSingleExample op supports DT_FLOAT (FloatList), + /// DT_INT64 (Int64List), and DT_STRING (BytesList). + /// - dense_shapes: The shapes of data in each Feature given in dense_keys. + /// The length of this list must match the length of `dense_keys`. The + /// number of elements in the Feature corresponding to dense_key[j] must + /// always equal dense_shapes[j].NumEntries(). If dense_shapes[j] == + /// (D0, D1, ..., DN) then the shape of output Tensor dense_values[j] + /// will be (D0, D1, ..., DN): In the case dense_shapes[j] = (-1, D1, + /// ..., DN), the shape of the output Tensor dense_values[j] will be (M, + /// D1, .., DN), where M is the number of blocks of elements of length + /// D1 * .... * DN, in the input. + @inlinable @inline(__always) + public static func parseSingleExample< + SparseTypes: TensorGroup, + Tdense: TensorArrayProtocol + >( + serialized: StringTensor, + denseDefaults: Tdense, + numSparse: Int64, + sparseKeys: [String], + denseKeys: [String], + denseShapes: [TensorShape?] + ) -> ( + sparseIndices: [Tensor], sparseValues: SparseTypes, sparseShapes: [Tensor], + denseValues: Tdense + ) { + _RawTFEager.parseSingleExample( + serialized: serialized, denseDefaults: denseDefaults, numSparse: numSparse, + sparseKeys: sparseKeys, denseKeys: denseKeys, denseShapes: denseShapes) + } + + /// Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors. + /// + /// - Parameters: + /// - serialized: A scalar containing a binary serialized SequenceExample proto. + /// - feature_list_dense_missing_assumed_empty: A vector listing the + /// FeatureList keys which may be missing from the SequenceExample. If the + /// associated FeatureList is missing, it is treated as empty. By default, + /// any FeatureList not listed in this vector must exist in the SequenceExample. + /// - context_sparse_keys: A list of Ncontext_sparse string Tensors (scalars). + /// The keys expected in the Examples' features associated with context_sparse + /// values. + /// - context_dense_keys: A list of Ncontext_dense string Tensors (scalars). + /// The keys expected in the SequenceExamples' context features associated with + /// dense values. + /// - feature_list_sparse_keys: A list of Nfeature_list_sparse string Tensors + /// (scalars). The keys expected in the FeatureLists associated with sparse + /// values. + /// - feature_list_dense_keys: A list of Nfeature_list_dense string Tensors (scalars). + /// The keys expected in the SequenceExamples' feature_lists associated + /// with lists of dense values. + /// - context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty). + /// context_dense_defaults[j] provides default values + /// when the SequenceExample's context map lacks context_dense_key[j]. + /// If an empty Tensor is provided for context_dense_defaults[j], + /// then the Feature context_dense_keys[j] is required. + /// The input type is inferred from context_dense_defaults[j], even when it's + /// empty. If context_dense_defaults[j] is not empty, its shape must match + /// context_dense_shapes[j]. + /// - debug_name: A scalar containing the name of the serialized proto. + /// May contain, for example, table key (descriptive) name for the + /// corresponding serialized proto. This is purely useful for debugging + /// purposes, and the presence of values here has no effect on the output. + /// May also be an empty scalar if no name is available. + /// + /// - Attrs: + /// - context_sparse_types: A list of Ncontext_sparse types; the data types of data in + /// each context Feature given in context_sparse_keys. + /// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), + /// DT_INT64 (Int64List), and DT_STRING (BytesList). + /// - context_dense_shapes: A list of Ncontext_dense shapes; the shapes of data in + /// each context Feature given in context_dense_keys. + /// The number of elements in the Feature corresponding to context_dense_key[j] + /// must always equal context_dense_shapes[j].NumEntries(). + /// The shape of context_dense_values[j] will match context_dense_shapes[j]. + /// - feature_list_sparse_types: A list of Nfeature_list_sparse types; the data types + /// of data in each FeatureList given in feature_list_sparse_keys. + /// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), + /// DT_INT64 (Int64List), and DT_STRING (BytesList). + /// - feature_list_dense_shapes: A list of Nfeature_list_dense shapes; the shapes of + /// data in each FeatureList given in feature_list_dense_keys. + /// The shape of each Feature in the FeatureList corresponding to + /// feature_list_dense_key[j] must always equal + /// feature_list_dense_shapes[j].NumEntries(). + @inlinable @inline(__always) + public static func parseSingleSequenceExample< + ContextSparseTypes: TensorGroup, + TcontextDense: TensorArrayProtocol, + FeatureListDenseTypes: TensorGroup, + FeatureListSparseTypes: TensorGroup + >( + serialized: StringTensor, + featureListDenseMissingAssumedEmpty: StringTensor, + contextSparseKeys: [StringTensor], + contextDenseKeys: [StringTensor], + featureListSparseKeys: [StringTensor], + featureListDenseKeys: [StringTensor], + contextDenseDefaults: TcontextDense, + debugName: StringTensor, + contextDenseShapes: [TensorShape?], + featureListDenseShapes: [TensorShape?] + ) -> ( + contextSparseIndices: [Tensor], contextSparseValues: ContextSparseTypes, + contextSparseShapes: [Tensor], contextDenseValues: TcontextDense, + featureListSparseIndices: [Tensor], featureListSparseValues: FeatureListSparseTypes, + featureListSparseShapes: [Tensor], featureListDenseValues: FeatureListDenseTypes + ) { + _RawTFEager.parseSingleSequenceExample( + serialized: serialized, + featureListDenseMissingAssumedEmpty: featureListDenseMissingAssumedEmpty, + contextSparseKeys: contextSparseKeys, contextDenseKeys: contextDenseKeys, + featureListSparseKeys: featureListSparseKeys, featureListDenseKeys: featureListDenseKeys, + contextDenseDefaults: contextDenseDefaults, debugName: debugName, + contextDenseShapes: contextDenseShapes, featureListDenseShapes: featureListDenseShapes) + } + + /// Transforms a serialized tensorflow.TensorProto proto into a Tensor. + /// + /// - Parameter serialized: A scalar string containing a serialized TensorProto proto. + /// + /// - Attr out_type: The type of the serialized tensor. The provided type must match the + /// type of the serialized tensor and no implicit conversion will take place. + /// + /// - Output output: A Tensor of type `out_type`. + @inlinable @inline(__always) + public static func parseTensor( + serialized: StringTensor + ) -> Tensor { + _RawTFEager.parseTensor(serialized: serialized) + } + + /// returns `f(inputs)`, where `f`'s body is placed and partitioned. + /// + /// - Parameter args: A list of input tensors. + /// + /// - Attrs: + /// - Tin: A list of input types. + /// - Tout: A list of output types. + /// - f: A function that takes 'args', a list of tensors, and returns 'output', + /// another list of tensors. Input and output types are specified by 'Tin' + /// and 'Tout'. The function body of f will be placed and partitioned across + /// devices, setting this op apart from the regular Call op. + /// + /// - Output output: A list of return values. + @inlinable @inline(__always) + public static func partitionedCall< + Tin: TensorArrayProtocol, + Tout: TensorGroup, + FIn: TensorGroup, + FOut: TensorGroup + >( + args: Tin, + f: (FIn) -> FOut, + config: String, + configProto: String, + executorType: String + ) -> Tout { + _RawTFEager.partitionedCall( + args: args, f: f, config: config, configProto: configProto, executorType: executorType) + } + + /// A placeholder op for a value that will be fed into the computation. + /// + /// N.B. This operation will fail with an error if it is executed. It is + /// intended as a way to represent a value that will always be fed, and to + /// provide attrs that enable the fed value to be checked at runtime. + /// + /// - Attrs: + /// - dtype: The type of elements in the tensor. + /// - shape: (Optional) The shape of the tensor. If the shape has 0 dimensions, the + /// shape is unconstrained. + /// + /// - Output output: A placeholder tensor that must be replaced using the feed mechanism. + @inlinable @inline(__always) + public static func placeholder( + shape: TensorShape? + ) -> Tensor { + _RawTFEager.placeholder(shape: shape) + } + + /// A placeholder op for a value that will be fed into the computation. + /// + /// N.B. This operation will fail with an error if it is executed. It is + /// intended as a way to represent a value that will always be fed, and to + /// provide attrs that enable the fed value to be checked at runtime. + /// + /// - Attrs: + /// - dtype: The type of elements in the tensor. + /// - shape: The shape of the tensor. The shape can be any partially-specified + /// shape. To be unconstrained, pass in a shape with unknown rank. + /// + /// - Output output: A placeholder tensor that must be replaced using the feed mechanism. + @inlinable @inline(__always) + public static func placeholderV2( + shape: TensorShape? + ) -> Tensor { + _RawTFEager.placeholderV2(shape: shape) + } + + /// A placeholder op that passes through `input` when its output is not fed. + /// + /// - Parameter input: The default value to produce when `output` is not fed. + /// + /// - Attrs: + /// - dtype: The type of elements in the tensor. + /// - shape: The (possibly partial) shape of the tensor. + /// + /// - Output output: A placeholder tensor that defaults to `input` if it is not fed. + @inlinable @inline(__always) + public static func placeholderWithDefault( + _ input: Tensor, + shape: TensorShape? + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.placeholderWithDefault(input, shape: shape), to: output_device) + case .TF_EAGER: + return _RawTFEager.placeholderWithDefault(input, shape: shape) + } + + } + + /// Compute the polygamma function \\(\psi^{(n)}(x)\\). + /// + /// The polygamma function is defined as: + /// + /// + /// \\(\psi^{(a)}(x) = \frac{d^a}{dx^a} \psi(x)\\) + /// + /// where \\(\psi(x)\\) is the digamma function. + /// The polygamma function is defined only for non-negative integer orders \\a\\. + @inlinable @inline(__always) + public static func polygamma( + _ a: Tensor, + _ x: Tensor + ) -> Tensor { + switch commonBackend(a.handle.backend, x.handle.backend) { + case .XLA: + let output_device = x.device + let a = Tensor(copying: a, to: .defaultTFEager) + let x = Tensor(copying: x, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.polygamma(a, x), to: output_device) + case .TF_EAGER: + return _RawTFEager.polygamma(a, x) + } + + } + + @inlinable @inline(__always) + public static func polymorphic( + _ a: Tensor + ) -> Tensor { + switch a.handle.backend { + case .XLA: + let output_device = a.device + let a = Tensor(copying: a, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.polymorphic(a), to: output_device) + case .TF_EAGER: + return _RawTFEager.polymorphic(a) + } + + } + + @inlinable @inline(__always) + public static func polymorphicDefaultOut() -> Tensor { + _RawTFEager.polymorphicDefaultOut() + } + + @inlinable @inline(__always) + public static func polymorphicOut() -> Tensor { + _RawTFEager.polymorphicOut() + } + + /// Computes element-wise population count (a.k.a. popcount, bitsum, bitcount). + /// + /// For each entry in `x`, calculates the number of `1` (on) bits in the binary + /// representation of that entry. + /// + /// **NOTE**: It is more efficient to first `tf.bitcast` your tensors into + /// `int32` or `int64` and perform the bitcount on the result, than to feed in + /// 8- or 16-bit inputs and then aggregate the resulting counts. + @inlinable @inline(__always) + public static func populationCount( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + let output_device = x.device + let x = Tensor(copying: x, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.populationCount(x), to: output_device) + case .TF_EAGER: + return _RawTFEager.populationCount(x) + } + + } + + /// Computes the power of one value to another. + /// + /// Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for + /// corresponding elements in `x` and `y`. For example: + /// + /// ``` + /// # tensor 'x' is [[2, 2]], [3, 3]] + /// # tensor 'y' is [[8, 16], [2, 3]] + /// tf.pow(x, y) ==> [[256, 65536], [9, 27]] + /// ``` + @inlinable @inline(__always) + public static func pow( + _ x: Tensor, + _ y: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + return _RawXLA.pow(x, y) + case .TF_EAGER: + return _RawTFEager.pow(x, y) + } + + } + + /// Creates a dataset that asynchronously prefetches elements from `input_dataset`. + /// + /// - Parameter buffer_size: The maximum number of elements to buffer in an iterator over + /// this dataset. + @inlinable @inline(__always) + public static func prefetchDataset( + inputDataset: VariantHandle, + bufferSize: Tensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?], + slackPeriod: Int64 = 0, + legacyAutotune: Bool = true + ) -> VariantHandle { + _RawTFEager.prefetchDataset( + inputDataset: inputDataset, bufferSize: bufferSize, outputTypes: outputTypes, + outputShapes: outputShapes, slackPeriod: slackPeriod, legacyAutotune: legacyAutotune) + } + + /// An op which linearizes one Tensor value to an opaque variant tensor. + /// + /// - Parameter input: A tensor that will be linearized. + /// + /// - Attrs: + /// - dtype: The type of elements in the tensor. + /// - shape: The shape of the tensor. + /// - layout: A vector holding the requested layout in minor-to-major sequence. If a layout + /// attribute is passed but its values are all -1 the layout will be computed by + /// the infeed operation. + @inlinable @inline(__always) + public static func prelinearize( + _ input: Tensor, + shape: TensorShape?, + layout: [Int32] + ) -> VariantHandle { + _RawTFEager.prelinearize(input, shape: shape, layout: layout) + } + + /// An op which linearizes multiple Tensor values to an opaque variant tensor. + /// + /// - Parameter inputs: A list of tensors that will be provided using the infeed mechanism. + /// + /// - Attrs: + /// - dtypes: The element types of each element in `inputs`. + /// - shapes: The shapes of each tensor in `inputs`. + /// - layouts: A vector holding the requested layout in minor-to-major sequence for all the + /// tuple shapes in the order the shapes appear in the "shapes" input. The layout + /// elements for a sub-shape can be set to -1 in which case the corresponding layout + /// will be computed by the infeed operation. + @inlinable @inline(__always) + public static func prelinearizeTuple( + inputs: Dtypes, + shapes: [TensorShape?], + layouts: [Int32] + ) -> VariantHandle { + _RawTFEager.prelinearizeTuple(inputs: inputs, shapes: shapes, layouts: layouts) + } + + /// An identity op that triggers an error if a gradient is requested. + /// + /// When executed in a graph, this op outputs its input tensor as-is. + /// + /// When building ops to compute gradients, the TensorFlow gradient system + /// will return an error when trying to lookup the gradient of this op, + /// because no gradient must ever be registered for this function. This + /// op exists to prevent subtle bugs from silently returning unimplemented + /// gradients in some corner cases. + /// + /// - Parameter input: any tensor. + /// + /// - Attr message: Will be printed in the error when anyone tries to differentiate + /// this operation. + /// + /// - Output output: the same input tensor. + @inlinable @inline(__always) + public static func preventGradient( + _ input: Tensor, + message: String + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.preventGradient(input, message: message), to: output_device) + case .TF_EAGER: + return _RawTFEager.preventGradient(input, message: message) + } + + } + + /// Prints a list of tensors. + /// + /// Passes `input` through to `output` and prints `data` when evaluating. + /// + /// - Parameters: + /// - input: The tensor passed to `output` + /// - data: A list of tensors to print out when op is evaluated. + /// + /// - Attrs: + /// - message: A string, prefix of the error message. + /// - first_n: Only log `first_n` number of times. -1 disables logging. + /// - summarize: Only print this many entries of each tensor. + /// + /// - Output output: = The unmodified `input` tensor + @inlinable @inline(__always) + public static func print< + T: TensorFlowScalar, + U: TensorArrayProtocol + >( + _ input: Tensor, + data: U, + message: String, + firstN: Int64 = -1, + summarize: Int64 = 3 + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.print( + input, data: data, message: message, firstN: firstN, summarize: summarize), + to: output_device) + case .TF_EAGER: + return _RawTFEager.print( + input, data: data, message: message, firstN: firstN, summarize: summarize) + } + + } + + /// Prints a string scalar. + /// + /// Prints a string scalar to the desired output_stream. + /// + /// - Parameter input: The string scalar to print. + /// + /// - Attr output_stream: A string specifying the output stream or logging level to print to. + @inlinable @inline(__always) + public static func printV2( + _ input: StringTensor, + outputStream: String = "stderr", + end: String = "\n" + ) { + _RawTFEager.printV2(input, outputStream: outputStream, end: end) + } + + /// A queue that produces elements sorted by the first component value. + /// + /// Note that the PriorityQueue requires the first component of any element + /// to be a scalar int64, in addition to the other elements declared by + /// component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue + /// and DequeueMany) on a PriorityQueue will all require (resp. output) one extra + /// entry in their input (resp. output) lists. + /// + /// - Attrs: + /// - component_types: The type of each component in a value. + /// - shapes: The shape of each component in a value. The length of this attr must + /// be either 0 or the same as the length of component_types. If the length of + /// this attr is 0, the shapes of queue elements are not constrained, and + /// only one element may be dequeued at a time. + /// - capacity: The upper bound on the number of elements in this queue. + /// Negative numbers mean no limit. + /// - container: If non-empty, this queue is placed in the given container. + /// Otherwise, a default container is used. + /// - shared_name: If non-empty, this queue will be shared under the given name + /// across multiple sessions. + /// + /// - Output handle: The handle to the queue. + @inlinable @inline(__always) + public static func priorityQueueV2( + componentTypes: [TensorDataType], + shapes: [TensorShape?], + capacity: Int64 = -1, + container: String, + sharedName: String + ) -> ResourceHandle { + _RawTFEager.priorityQueueV2( + componentTypes: componentTypes, shapes: shapes, capacity: capacity, container: container, + sharedName: sharedName) + } + + /// Creates a dataset that uses a custom thread pool to compute `input_dataset`. + /// + /// - Parameter num_threads: Identifies the number of threads to use for the private threadpool. + @inlinable @inline(__always) + public static func privateThreadPoolDataset( + inputDataset: VariantHandle, + numThreads: Tensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.privateThreadPoolDataset( + inputDataset: inputDataset, numThreads: numThreads, outputTypes: outputTypes, + outputShapes: outputShapes) + } + + /// Computes the product of elements across dimensions of a tensor. + /// + /// Reduces `input` along the dimensions given in `axis`. Unless + /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + /// `axis`. If `keep_dims` is true, the reduced dimensions are + /// retained with length 1. + /// + /// - Parameters: + /// - input: The tensor to reduce. + /// - reduction_indices: The dimensions to reduce. Must be in the range + /// `[-rank(input), rank(input))`. + /// + /// - Attr keep_dims: If true, retain reduced dimensions with length 1. + /// + /// - Output output: The reduced tensor. + @inlinable @inline(__always) + public static func prod< + T: TensorFlowNumeric, + Tidx: TensorFlowIndex + >( + _ input: Tensor, + reductionIndices: Tensor, + keepDims: Bool = false + ) -> Tensor { + switch commonBackend(input.handle.backend, reductionIndices.handle.backend) { + case .XLA: + return _RawXLA.prod(input, reductionIndices: reductionIndices, keepDims: keepDims) + case .TF_EAGER: + return _RawTFEager.prod(input, reductionIndices: reductionIndices, keepDims: keepDims) + } + + } + + /// Invokes a python function to compute func(input)->output. + /// + /// This operation is considered stateful. For a stateless version, see + /// PyFuncStateless. + /// + /// - Parameter input: List of Tensors that will provide input to the Op. + /// + /// - Attrs: + /// - token: A token representing a registered python function in this address space. + /// - Tin: Data types of the inputs to the op. + /// - Tout: Data types of the outputs from the op. + /// The length of the list specifies the number of outputs. + /// + /// - Output output: The outputs from the Op. + @inlinable @inline(__always) + public static func pyFunc< + Tin: TensorArrayProtocol, + Tout: TensorGroup + >( + _ input: Tin, + token: String + ) -> Tout { + _RawTFEager.pyFunc(input, token: token) + } + + /// A stateless version of PyFunc. + @inlinable @inline(__always) + public static func pyFuncStateless< + Tin: TensorArrayProtocol, + Tout: TensorGroup + >( + _ input: Tin, + token: String + ) -> Tout { + _RawTFEager.pyFuncStateless(input, token: token) + } + + /// Computes the QR decompositions of one or more matrices. + /// + /// Computes the QR decomposition of each inner matrix in `tensor` such that + /// `tensor[..., :, :] = q[..., :, :] * r[..., :,:])` + /// + /// ```python + /// # a is a tensor. + /// # q is a tensor of orthonormal matrices. + /// # r is a tensor of upper triangular matrices. + /// q, r = qr(a) + /// q_full, r_full = qr(a, full_matrices=True) + /// ``` + /// + /// - Parameter input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions + /// form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`. + /// + /// - Attr full_matrices: If true, compute full-sized `q` and `r`. If false + /// (the default), compute only the leading `P` columns of `q`. + /// + /// - Outputs: + /// - q: Orthonormal basis for range of `a`. If `full_matrices` is `False` then + /// shape is `[..., M, P]`; if `full_matrices` is `True` then shape is + /// `[..., M, M]`. + /// - r: Triangular factor. If `full_matrices` is `False` then shape is + /// `[..., P, N]`. If `full_matrices` is `True` then shape is `[..., M, N]`. + @inlinable @inline(__always) + public static func qr( + _ input: Tensor, + fullMatrices: Bool = false + ) -> (q: Tensor, r: Tensor) { + switch input.handle.backend { + case .XLA: + return _RawXLA.qr(input, fullMatrices: fullMatrices) + case .TF_EAGER: + return _RawTFEager.qr(input, fullMatrices: fullMatrices) + } + + } + + /// Use QuantizeAndDequantizeV2 instead. + @inlinable @inline(__always) + public static func quantizeAndDequantize( + _ input: Tensor, + signedInput: Bool = true, + numBits: Int64 = 8, + rangeGiven: Bool = false, + inputMin: Double = 0, + inputMax: Double = 0 + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.quantizeAndDequantize( + input, signedInput: signedInput, numBits: numBits, rangeGiven: rangeGiven, + inputMin: inputMin, inputMax: inputMax), to: output_device) + case .TF_EAGER: + return _RawTFEager.quantizeAndDequantize( + input, signedInput: signedInput, numBits: numBits, rangeGiven: rangeGiven, + inputMin: inputMin, inputMax: inputMax) + } + + } + + /// Quantizes then dequantizes a tensor. + /// + /// This op simulates the precision loss from the quantized forward pass by: + /// + /// 1. Quantizing the tensor to fixed point numbers, which should match the target + /// quantization method when it is used in inference. + /// 2. Dequantizing it back to floating point numbers for the following ops, most + /// likely matmul. + /// + /// There are different ways to quantize. This version uses only scaling, so 0.0 + /// maps to 0. + /// + /// From the specified 'num_bits' in the quantized output type, it determines + /// minimum and maximum representable quantized values. + /// + /// e.g. + /// + /// * [-128, 127] for signed, num_bits = 8, or + /// * [0, 255] for unsigned, num_bits = 8. + /// + /// If range_given == False, the initial input_min, input_max will be determined + /// automatically as the minimum and maximum values in the input tensor, otherwise + /// the specified values of input_min, input_max are used. + /// + /// Note: If the input_min, input_max are specified, they do not need to equal the + /// actual minimum and maximum values in the tensor. e.g. in some cases it may be + /// beneficial to specify these values such that the low probability extremes of the + /// input distribution are clipped. + /// + /// This op determines the maximum scale_factor that would map the initial + /// [input_min, input_max] range to a range that lies within the representable + /// quantized range. + /// + /// It determines the scale from one of input_min and input_max, then updates the + /// other one to maximize the representable range. + /// + /// e.g. + /// + /// * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0, + /// 5.0]: it would use a scale_factor of -128 / -10.0 = 12.8 In this case, it + /// would update input_max to be 127 / 12.8 = 9.921875 + /// * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0, + /// 10.0]: it would use a scale_factor of 127 / 10.0 = 12.7 In this case, it + /// would update input_min to be 128.0 / 12.7 = -10.07874 + /// * if the output is unsigned, input_min is forced to be 0, and only the + /// specified input_max is used. + /// + /// After determining the scale_factor and updating the input range, it applies the + /// following to each value in the 'input' tensor. + /// + /// output = round(clamp(value, input_min, input_max) * scale_factor) / scale_factor. + /// + /// The above round function rounds the value based on the given round_mode. + /// + /// + /// - Parameters: + /// - input: Tensor to quantize and then dequantize. + /// - input_min: If `range_given == True`, this specifies the minimum input value that needs to + /// be represented, otherwise it is determined from the min value of the `input` + /// tensor. + /// - input_max: If `range_given == True`, this specifies the maximum input value that needs to + /// be represented, otherwise it is determined from the max value of the `input` + /// tensor. + /// + /// - Attrs: + /// - signed_input: Whether the quantization is signed or unsigned. (actually this parameter should + /// have been called `signed_output`) + /// - num_bits: The bitwidth of the quantization. + /// - range_given: Whether the range is given or should be determined from the `input` tensor. + /// - round_mode: The 'round_mode' attribute controls which rounding tie-breaking algorithm is + /// used when rounding float values to their quantized equivalents. The following + /// rounding modes are currently supported: + /// + /// * HALF_TO_EVEN: this is the default round_mode. + /// * HALF_UP: round towards positive. In this mode 7.5 rounds up to 8 and -7.5 + /// rounds up to -7. + /// + /// - narrow_range: If True, then the absolute value of the quantized minimum value is the same as + /// the quantized maximum value, instead of 1 greater. + /// i.e. for 8 bit quantization, the minimum value is -127 instead of -128. + /// - axis: If specified, this axis is treated as a channel or slice axis, and a separate + /// quantization range is used for each channel or slice along this axis. + @inlinable @inline(__always) + public static func quantizeAndDequantizeV2( + _ input: Tensor, + inputMin: Tensor, + inputMax: Tensor, + signedInput: Bool = true, + numBits: Int64 = 8, + rangeGiven: Bool = false, + roundMode: RoundMode = .halfToEven, + narrowRange: Bool = false, + axis: Int64 = -1 + ) -> Tensor { + switch commonBackend( + commonBackend(input.handle.backend, inputMin.handle.backend), inputMax.handle.backend) + { + case .XLA: + let output_device = inputMax.device + let input = Tensor(copying: input, to: .defaultTFEager) + let inputMin = Tensor(copying: inputMin, to: .defaultTFEager) + let inputMax = Tensor(copying: inputMax, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.quantizeAndDequantizeV2( + input, inputMin: inputMin, inputMax: inputMax, signedInput: signedInput, + numBits: numBits, rangeGiven: rangeGiven, roundMode: roundMode, + narrowRange: narrowRange, axis: axis), to: output_device) + case .TF_EAGER: + return _RawTFEager.quantizeAndDequantizeV2( + input, inputMin: inputMin, inputMax: inputMax, signedInput: signedInput, numBits: numBits, + rangeGiven: rangeGiven, roundMode: roundMode, narrowRange: narrowRange, axis: axis) + } + + } + + /// Quantizes then dequantizes a tensor. + /// + /// This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a + /// tensor, so its value can change during training. + @inlinable @inline(__always) + public static func quantizeAndDequantizeV3( + _ input: Tensor, + inputMin: Tensor, + inputMax: Tensor, + numBits: Tensor, + signedInput: Bool = true, + rangeGiven: Bool = true, + narrowRange: Bool = false, + axis: Int64 = -1 + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(input.handle.backend, inputMin.handle.backend), inputMax.handle.backend), + numBits.handle.backend) + { + case .XLA: + let output_device = numBits.device + let input = Tensor(copying: input, to: .defaultTFEager) + let inputMin = Tensor(copying: inputMin, to: .defaultTFEager) + let inputMax = Tensor(copying: inputMax, to: .defaultTFEager) + let numBits = Tensor(copying: numBits, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.quantizeAndDequantizeV3( + input, inputMin: inputMin, inputMax: inputMax, numBits: numBits, + signedInput: signedInput, rangeGiven: rangeGiven, narrowRange: narrowRange, axis: axis), + to: output_device) + case .TF_EAGER: + return _RawTFEager.quantizeAndDequantizeV3( + input, inputMin: inputMin, inputMax: inputMax, numBits: numBits, signedInput: signedInput, + rangeGiven: rangeGiven, narrowRange: narrowRange, axis: axis) + } + + } + + /// Convert the quantized 'input' tensor into a lower-precision 'output', using the + /// + /// actual distribution of the values to maximize the usage of the lower bit depth + /// and adjusting the output min and max ranges accordingly. + /// + /// [input_min, input_max] are scalar floats that specify the range for the float + /// interpretation of the 'input' data. For example, if input_min is -1.0f and + /// input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 + /// value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. + /// + /// This operator tries to squeeze as much precision as possible into an output with + /// a lower bit depth by calculating the actual min and max values found in the + /// data. For example, maybe that quint16 input has no values lower than 16,384 and + /// none higher than 49,152. That means only half the range is actually needed, all + /// the float interpretations are between -0.5f and 0.5f, so if we want to compress + /// the data into a quint8 output, we can use that range rather than the theoretical + /// -1.0f to 1.0f that is suggested by the input min and max. + /// + /// In practice, this is most useful for taking output from operations like + /// QuantizedMatMul that can produce higher bit-depth outputs than their inputs and + /// may have large potential output ranges, but in practice have a distribution of + /// input values that only uses a small fraction of the possible range. By feeding + /// that output into this operator, we can reduce it from 32 bits down to 8 with + /// minimal loss of accuracy. + /// + /// - Parameters: + /// - input_min: The float value that the minimum quantized input value represents. + /// - input_max: The float value that the maximum quantized input value represents. + /// + /// - Attrs: + /// - Tinput: The type of the input. + /// - out_type: The type of the output. Should be a lower bit depth than Tinput. + /// + /// - Outputs: + /// - output_min: The float value that the minimum quantized output value represents. + /// - output_max: The float value that the maximum quantized output value represents. + @inlinable @inline(__always) + public static func quantizeDownAndShrinkRange< + Tinput: TensorFlowScalar, + OutType: TensorFlowScalar + >( + _ input: Tensor, + inputMin: Tensor, + inputMax: Tensor + ) -> (output: Tensor, outputMin: Tensor, outputMax: Tensor) { + _RawTFEager.quantizeDownAndShrinkRange(input, inputMin: inputMin, inputMax: inputMax) + } + + /// Quantize the 'input' tensor of type float to 'output' tensor of type 'T'. + /// + /// [min_range, max_range] are scalar floats that specify the range for + /// the 'input' data. The 'mode' attribute controls exactly which calculations are + /// used to convert the float values to their quantized equivalents. The + /// 'round_mode' attribute controls which rounding tie-breaking algorithm is used + /// when rounding float values to their quantized equivalents. + /// + /// In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: + /// + /// ``` + /// out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) + /// if T == qint8: out[i] -= (range(T) + 1) / 2.0 + /// ``` + /// + /// here `range(T) = numeric_limits::max() - numeric_limits::min()` + /// + /// *MIN_COMBINED Mode Example* + /// + /// Assume the input is type float and has a possible range of [0.0, 6.0] and the + /// output type is quint8 ([0, 255]). The min_range and max_range values should be + /// specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each + /// value of the input by 255/6 and cast to quint8. + /// + /// If the output type was qint8 ([-128, 127]), the operation will additionally + /// subtract each value by 128 prior to casting, so that the range of values aligns + /// with the range of qint8. + /// + /// If the mode is 'MIN_FIRST', then this approach is used: + /// + /// ``` + /// num_discrete_values = 1 << (# of bits in T) + /// range_adjust = num_discrete_values / (num_discrete_values - 1) + /// range = (range_max - range_min) * range_adjust + /// range_scale = num_discrete_values / range + /// quantized = round(input * range_scale) - round(range_min * range_scale) + + /// numeric_limits::min() + /// quantized = max(quantized, numeric_limits::min()) + /// quantized = min(quantized, numeric_limits::max()) + /// ``` + /// + /// The biggest difference between this and MIN_COMBINED is that the minimum range + /// is rounded first, before it's subtracted from the rounded value. With + /// MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing + /// and dequantizing will introduce a larger and larger error. + /// + /// *SCALED mode Example* + /// + /// `SCALED` mode matches the quantization approach used in + /// `QuantizeAndDequantize{V2|V3}`. + /// + /// If the mode is `SCALED`, the quantization is performed by multiplying each + /// input value by a scaling_factor. + /// The scaling_factor is determined from `min_range` and `max_range` to be as large + /// as possible such that the range from `min_range` to `max_range` is representable + /// within values of type T. + /// + /// ```c++ + /// + /// const int min_T = std::numeric_limits::min(); + /// const int max_T = std::numeric_limits::max(); + /// const float max_float = std::numeric_limits::max(); + /// + /// const float scale_factor_from_min_side = + /// (min_T * min_range > 0) ? min_T / min_range : max_float; + /// const float scale_factor_from_max_side = + /// (max_T * max_range > 0) ? max_T / max_range : max_float; + /// + /// const float scale_factor = std::min(scale_factor_from_min_side, + /// scale_factor_from_max_side); + /// ``` + /// + /// We next use the scale_factor to adjust min_range and max_range as follows: + /// + /// ```c++ + /// min_range = min_T / scale_factor; + /// max_range = max_T / scale_factor; + /// ``` + /// + /// + /// e.g. if T = qint8, and initially min_range = -10, and max_range = 9, we would + /// compare -128/-10.0 = 12.8 to 127/9.0 = 14.11, and set scaling_factor = 12.8 + /// In this case, min_range would remain -10, but max_range would be adjusted to + /// 127 / 12.8 = 9.921875 + /// + /// So we will quantize input values in the range (-10, 9.921875) to (-128, 127). + /// + /// The input tensor can now be quantized by clipping values to the range + /// `min_range` to `max_range`, then multiplying by scale_factor as follows: + /// + /// ```c++ + /// result = round(min(max_range, max(min_range, input)) * scale_factor) + /// ``` + /// + /// The adjusted `min_range` and `max_range` are returned as outputs 2 and 3 of + /// this operation. These outputs should be used as the range for any further + /// calculations. + /// + /// + /// *narrow_range (bool) attribute* + /// + /// If true, we do not use the minimum quantized value. + /// i.e. for int8 the quantized output, it would be restricted to the range + /// -127..127 instead of the full -128..127 range. + /// This is provided for compatibility with certain inference backends. + /// (Only applies to SCALED mode) + /// + /// + /// *axis (int) attribute* + /// + /// An optional `axis` attribute can specify a dimension index of the input tensor, + /// such that quantization ranges will be calculated and applied separately for each + /// slice of the tensor along that dimension. This is useful for per-channel + /// quantization. + /// + /// If axis is specified, min_range and max_range + /// + /// if `axis`=None, per-tensor quantization is performed as normal. + /// + /// + /// *ensure_minimum_range (float) attribute* + /// + /// Ensures the minimum quantization range is at least this value. + /// The legacy default value for this is 0.01, but it is strongly suggested to + /// set it to 0 for new uses. + /// + /// + /// - Parameters: + /// - min_range: The minimum value of the quantization range. This value may be adjusted by the + /// op depending on other parameters. The adjusted value is written to `output_min`. + /// If the `axis` attribute is specified, this must be a 1-D tensor whose size + /// matches the `axis` dimension of the input and output tensors. + /// - max_range: The maximum value of the quantization range. This value may be adjusted by the + /// op depending on other parameters. The adjusted value is written to `output_max`. + /// If the `axis` attribute is specified, this must be a 1-D tensor whose size + /// matches the `axis` dimension of the input and output tensors. + /// + /// - Outputs: + /// - output: The quantized data produced from the float input. + /// - output_min: The final quantization range minimum, used to clip input values before scaling + /// and rounding them to quantized values. + /// If the `axis` attribute is specified, this will be a 1-D tensor whose size + /// matches the `axis` dimension of the input and output tensors. + /// - output_max: The final quantization range maximum, used to clip input values before scaling + /// and rounding them to quantized values. + /// If the `axis` attribute is specified, this will be a 1-D tensor whose size + /// matches the `axis` dimension of the input and output tensors. + @inlinable @inline(__always) + public static func quantizeV2( + _ input: Tensor, + minRange: Tensor, + maxRange: Tensor, + mode: Mode = .minCombined, + roundMode: RoundMode1 = .halfAwayFromZero, + narrowRange: Bool = false, + axis: Int64 = -1, + ensureMinimumRange: Double = 0.01 + ) -> (output: Tensor, outputMin: Tensor, outputMax: Tensor) { + _RawTFEager.quantizeV2( + input, minRange: minRange, maxRange: maxRange, mode: mode, roundMode: roundMode, + narrowRange: narrowRange, axis: axis, ensureMinimumRange: ensureMinimumRange) + } + + /// Returns x + y element-wise, working on quantized buffers. + /// + /// - Parameters: + /// - min_x: The float value that the lowest quantized `x` value represents. + /// - max_x: The float value that the highest quantized `x` value represents. + /// - min_y: The float value that the lowest quantized `y` value represents. + /// - max_y: The float value that the highest quantized `y` value represents. + /// + /// - Outputs: + /// - min_z: The float value that the lowest quantized output value represents. + /// - max_z: The float value that the highest quantized output value represents. + /// + /// *NOTE*: `QuantizedAdd` supports limited forms of broadcasting. More about + /// broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + @inlinable @inline(__always) + public static func quantizedAdd< + T1: TensorFlowScalar, + T2: TensorFlowScalar, + Toutput: TensorFlowScalar + >( + _ x: Tensor, + _ y: Tensor, + minX: Tensor, + maxX: Tensor, + minY: Tensor, + maxY: Tensor + ) -> (z: Tensor, minZ: Tensor, maxZ: Tensor) { + _RawTFEager.quantizedAdd(x, y, minX: minX, maxX: maxX, minY: minY, maxY: maxY) + } + + /// Produces the average pool of the input tensor for quantized types. + /// + /// - Parameters: + /// - input: 4-D with shape `[batch, height, width, channels]`. + /// - min_input: The float value that the lowest quantized input value represents. + /// - max_input: The float value that the highest quantized input value represents. + /// + /// - Attrs: + /// - ksize: The size of the window for each dimension of the input tensor. + /// The length must be 4 to match the number of dimensions of the input. + /// - strides: The stride of the sliding window for each dimension of the input + /// tensor. The length must be 4 to match the number of dimensions of the input. + /// - padding: The type of padding algorithm to use. + /// + /// - Outputs: + /// - min_output: The float value that the lowest quantized output value represents. + /// - max_output: The float value that the highest quantized output value represents. + @inlinable @inline(__always) + public static func quantizedAvgPool( + _ input: Tensor, + minInput: Tensor, + maxInput: Tensor, + ksize: [Int32], + strides: [Int32], + padding: Padding + ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { + _RawTFEager.quantizedAvgPool( + input, minInput: minInput, maxInput: maxInput, ksize: ksize, strides: strides, + padding: padding) + } + + /// Quantized Batch normalization. + /// + /// This op is deprecated and will be removed in the future. Prefer + /// `tf.nn.batch_normalization`. + /// + /// - Parameters: + /// - t: A 4D input Tensor. + /// - t_min: The value represented by the lowest quantized input. + /// - t_max: The value represented by the highest quantized input. + /// - m: A 1D mean Tensor with size matching the last dimension of t. + /// This is the first output from tf.nn.moments, + /// or a saved moving average thereof. + /// - m_min: The value represented by the lowest quantized mean. + /// - m_max: The value represented by the highest quantized mean. + /// - v: A 1D variance Tensor with size matching the last dimension of t. + /// This is the second output from tf.nn.moments, + /// or a saved moving average thereof. + /// - v_min: The value represented by the lowest quantized variance. + /// - v_max: The value represented by the highest quantized variance. + /// - beta: A 1D beta Tensor with size matching the last dimension of t. + /// An offset to be added to the normalized tensor. + /// - beta_min: The value represented by the lowest quantized offset. + /// - beta_max: The value represented by the highest quantized offset. + /// - gamma: A 1D gamma Tensor with size matching the last dimension of t. + /// If "scale_after_normalization" is true, this tensor will be multiplied + /// with the normalized tensor. + /// - gamma_min: The value represented by the lowest quantized gamma. + /// - gamma_max: The value represented by the highest quantized gamma. + /// + /// - Attrs: + /// - variance_epsilon: A small float number to avoid dividing by 0. + /// - scale_after_normalization: A bool indicating whether the resulted tensor + /// needs to be multiplied with gamma. + @inlinable @inline(__always) + public static func quantizedBatchNormWithGlobalNormalization< + Tinput: TensorFlowScalar, + OutType: TensorFlowScalar + >( + t: Tensor, + tMin: Tensor, + tMax: Tensor, + m: Tensor, + mMin: Tensor, + mMax: Tensor, + v: Tensor, + vMin: Tensor, + vMax: Tensor, + beta: Tensor, + betaMin: Tensor, + betaMax: Tensor, + gamma: Tensor, + gammaMin: Tensor, + gammaMax: Tensor, + varianceEpsilon: Double, + scaleAfterNormalization: Bool + ) -> (result: Tensor, resultMin: Tensor, resultMax: Tensor) { + _RawTFEager.quantizedBatchNormWithGlobalNormalization( + t: t, tMin: tMin, tMax: tMax, m: m, mMin: mMin, mMax: mMax, v: v, vMin: vMin, vMax: vMax, + beta: beta, betaMin: betaMin, betaMax: betaMax, gamma: gamma, gammaMin: gammaMin, + gammaMax: gammaMax, varianceEpsilon: varianceEpsilon, + scaleAfterNormalization: scaleAfterNormalization) + } + + /// Adds Tensor 'bias' to Tensor 'input' for Quantized types. + /// + /// Broadcasts the values of bias on dimensions 0..N-2 of 'input'. + /// + /// - Parameters: + /// - bias: A 1D bias Tensor with size matching the last dimension of 'input'. + /// - min_input: The float value that the lowest quantized input value represents. + /// - max_input: The float value that the highest quantized input value represents. + /// - min_bias: The float value that the lowest quantized bias value represents. + /// - max_bias: The float value that the highest quantized bias value represents. + /// + /// - Outputs: + /// - min_out: The float value that the lowest quantized output value represents. + /// - max_out: The float value that the highest quantized output value represents. + @inlinable @inline(__always) + public static func quantizedBiasAdd< + T1: TensorFlowScalar, + T2: TensorFlowScalar, + OutType: TensorFlowScalar + >( + _ input: Tensor, + bias: Tensor, + minInput: Tensor, + maxInput: Tensor, + minBias: Tensor, + maxBias: Tensor + ) -> (output: Tensor, minOut: Tensor, maxOut: Tensor) { + _RawTFEager.quantizedBiasAdd( + input, bias: bias, minInput: minInput, maxInput: maxInput, minBias: minBias, + maxBias: maxBias) + } + + /// Concatenates quantized tensors along one dimension. + /// + /// - Parameters: + /// - concat_dim: 0-D. The dimension along which to concatenate. Must be in the + /// range [0, rank(values)). + /// - values: The `N` Tensors to concatenate. Their ranks and types must match, + /// and their sizes must match in all dimensions except `concat_dim`. + /// - input_mins: The minimum scalar values for each of the input tensors. + /// - input_maxes: The maximum scalar values for each of the input tensors. + /// + /// - Outputs: + /// - output: A `Tensor` with the concatenation of values stacked along the + /// `concat_dim` dimension. This tensor's shape matches that of `values` except + /// in `concat_dim` where it has the sum of the sizes. + /// - output_min: The float value that the minimum quantized output value represents. + /// - output_max: The float value that the maximum quantized output value represents. + @inlinable @inline(__always) + public static func quantizedConcat( + concatDim: Tensor, + _ values: [Tensor], + inputMins: [Tensor], + inputMaxes: [Tensor] + ) -> (output: Tensor, outputMin: Tensor, outputMax: Tensor) { + _RawTFEager.quantizedConcat( + concatDim: concatDim, values, inputMins: inputMins, inputMaxes: inputMaxes) + } + + /// Computes a 2D convolution given quantized 4D input and filter tensors. + /// + /// The inputs are quantized tensors where the lowest value represents the real + /// number of the associated minimum, and the highest represents the maximum. + /// This means that you can only interpret the quantized output in the same way, by + /// taking the returned minimum and maximum values into account. + /// + /// - Parameters: + /// - filter: filter's input_depth dimension must match input's depth dimensions. + /// - min_input: The float value that the lowest quantized input value represents. + /// - max_input: The float value that the highest quantized input value represents. + /// - min_filter: The float value that the lowest quantized filter value represents. + /// - max_filter: The float value that the highest quantized filter value represents. + /// + /// - Attrs: + /// - strides: The stride of the sliding window for each dimension of the input + /// tensor. + /// - padding: The type of padding algorithm to use. + /// - dilations: 1-D tensor of length 4. The dilation factor for each dimension of + /// `input`. If set to k > 1, there will be k-1 skipped cells between each + /// filter element on that dimension. The dimension order is determined by the + /// value of `data_format`, see above for details. Dilations in the batch and + /// depth dimensions must be 1. + /// + /// - Outputs: + /// - min_output: The float value that the lowest quantized output value represents. + /// - max_output: The float value that the highest quantized output value represents. + @inlinable @inline(__always) + public static func quantizedConv2D< + Tinput: TensorFlowScalar, + Tfilter: TensorFlowScalar, + OutType: TensorFlowScalar + >( + _ input: Tensor, + filter: Tensor, + minInput: Tensor, + maxInput: Tensor, + minFilter: Tensor, + maxFilter: Tensor, + strides: [Int32], + padding: Padding, + dilations: [Int32] = [1, 1, 1, 1] + ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { + _RawTFEager.quantizedConv2D( + input, filter: filter, minInput: minInput, maxInput: maxInput, minFilter: minFilter, + maxFilter: maxFilter, strides: strides, padding: padding, dilations: dilations) + } + + @inlinable @inline(__always) + public static func quantizedConv2DAndRelu< + Tinput: TensorFlowScalar, + Tfilter: TensorFlowScalar, + OutType: TensorFlowScalar + >( + _ input: Tensor, + filter: Tensor, + minInput: Tensor, + maxInput: Tensor, + minFilter: Tensor, + maxFilter: Tensor, + strides: [Int32], + padding: Padding, + dilations: [Int32] = [1, 1, 1, 1], + paddingList: [Int32] + ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { + _RawTFEager.quantizedConv2DAndRelu( + input, filter: filter, minInput: minInput, maxInput: maxInput, minFilter: minFilter, + maxFilter: maxFilter, strides: strides, padding: padding, dilations: dilations, + paddingList: paddingList) + } + + @inlinable @inline(__always) + public static func quantizedConv2DAndReluAndRequantize< + Tinput: TensorFlowScalar, + Tfilter: TensorFlowScalar, + OutType: TensorFlowScalar + >( + _ input: Tensor, + filter: Tensor, + minInput: Tensor, + maxInput: Tensor, + minFilter: Tensor, + maxFilter: Tensor, + minFreezedOutput: Tensor, + maxFreezedOutput: Tensor, + strides: [Int32], + padding: Padding, + dilations: [Int32] = [1, 1, 1, 1], + paddingList: [Int32] + ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { + _RawTFEager.quantizedConv2DAndReluAndRequantize( + input, filter: filter, minInput: minInput, maxInput: maxInput, minFilter: minFilter, + maxFilter: maxFilter, minFreezedOutput: minFreezedOutput, + maxFreezedOutput: maxFreezedOutput, strides: strides, padding: padding, + dilations: dilations, paddingList: paddingList) + } + + @inlinable @inline(__always) + public static func quantizedConv2DAndRequantize< + Tinput: TensorFlowScalar, + Tfilter: TensorFlowScalar, + OutType: TensorFlowScalar + >( + _ input: Tensor, + filter: Tensor, + minInput: Tensor, + maxInput: Tensor, + minFilter: Tensor, + maxFilter: Tensor, + minFreezedOutput: Tensor, + maxFreezedOutput: Tensor, + strides: [Int32], + padding: Padding, + dilations: [Int32] = [1, 1, 1, 1], + paddingList: [Int32] + ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { + _RawTFEager.quantizedConv2DAndRequantize( + input, filter: filter, minInput: minInput, maxInput: maxInput, minFilter: minFilter, + maxFilter: maxFilter, minFreezedOutput: minFreezedOutput, + maxFreezedOutput: maxFreezedOutput, strides: strides, padding: padding, + dilations: dilations, paddingList: paddingList) + } + + /// Computes QuantizedConv2D per channel. + /// + /// - Parameters: + /// - input: The original input tensor. + /// - filter: The original filter tensor. + /// - min_input: The minimum value of the input tensor + /// - max_input: The maximum value of the input tensor. + /// - min_filter: The minimum value of the filter tensor. + /// - max_filter: The maximum value of the filter tensor. + /// + /// - Attrs: + /// - Tinput: The quantized type of input tensor that needs to be converted. + /// - Tfilter: The quantized type of filter tensor that needs to be converted. + /// - out_type: The quantized type of output tensor that needs to be converted. + /// - strides: list of stride values. + /// - dilations: list of dilation values. + /// + /// - Outputs: + /// - output: The output tensor. + /// - min_output: The minimum value of the final output tensor. + /// - max_output: The maximum value of the final output tensor. + @inlinable @inline(__always) + public static func quantizedConv2DPerChannel< + Tinput: TensorFlowScalar, + Tfilter: TensorFlowScalar, + OutType: TensorFlowScalar + >( + _ input: Tensor, + filter: Tensor, + minInput: Tensor, + maxInput: Tensor, + minFilter: Tensor, + maxFilter: Tensor, + strides: [Int32], + padding: Padding, + dilations: [Int32] = [1, 1, 1, 1] + ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { + _RawTFEager.quantizedConv2DPerChannel( + input, filter: filter, minInput: minInput, maxInput: maxInput, minFilter: minFilter, + maxFilter: maxFilter, strides: strides, padding: padding, dilations: dilations) + } + + @inlinable @inline(__always) + public static func quantizedConv2DWithBias< + Tinput: TensorFlowScalar, + Tfilter: TensorFlowScalar, + OutType: TensorFlowScalar + >( + _ input: Tensor, + filter: Tensor, + bias: Tensor, + minInput: Tensor, + maxInput: Tensor, + minFilter: Tensor, + maxFilter: Tensor, + strides: [Int32], + padding: Padding, + dilations: [Int32] = [1, 1, 1, 1], + paddingList: [Int32] + ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { + _RawTFEager.quantizedConv2DWithBias( + input, filter: filter, bias: bias, minInput: minInput, maxInput: maxInput, + minFilter: minFilter, maxFilter: maxFilter, strides: strides, padding: padding, + dilations: dilations, paddingList: paddingList) + } + + @inlinable @inline(__always) + public static func quantizedConv2DWithBiasAndRelu< + Tinput: TensorFlowScalar, + Tfilter: TensorFlowScalar, + OutType: TensorFlowScalar + >( + _ input: Tensor, + filter: Tensor, + bias: Tensor, + minInput: Tensor, + maxInput: Tensor, + minFilter: Tensor, + maxFilter: Tensor, + strides: [Int32], + padding: Padding, + dilations: [Int32] = [1, 1, 1, 1], + paddingList: [Int32] + ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { + _RawTFEager.quantizedConv2DWithBiasAndRelu( + input, filter: filter, bias: bias, minInput: minInput, maxInput: maxInput, + minFilter: minFilter, maxFilter: maxFilter, strides: strides, padding: padding, + dilations: dilations, paddingList: paddingList) + } + + @inlinable @inline(__always) + public static func quantizedConv2DWithBiasAndReluAndRequantize< + Tinput: TensorFlowScalar, + Tfilter: TensorFlowScalar, + Tbias: FloatingPoint & TensorFlowScalar, + OutType: TensorFlowScalar + >( + _ input: Tensor, + filter: Tensor, + bias: Tensor, + minInput: Tensor, + maxInput: Tensor, + minFilter: Tensor, + maxFilter: Tensor, + minFreezedOutput: Tensor, + maxFreezedOutput: Tensor, + strides: [Int32], + padding: Padding, + dilations: [Int32] = [1, 1, 1, 1], + paddingList: [Int32] + ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { + _RawTFEager.quantizedConv2DWithBiasAndReluAndRequantize( + input, filter: filter, bias: bias, minInput: minInput, maxInput: maxInput, + minFilter: minFilter, maxFilter: maxFilter, minFreezedOutput: minFreezedOutput, + maxFreezedOutput: maxFreezedOutput, strides: strides, padding: padding, + dilations: dilations, paddingList: paddingList) + } + + @inlinable @inline(__always) + public static func quantizedConv2DWithBiasAndRequantize< + Tinput: TensorFlowScalar, + Tfilter: TensorFlowScalar, + Tbias: FloatingPoint & TensorFlowScalar, + OutType: TensorFlowScalar + >( + _ input: Tensor, + filter: Tensor, + bias: Tensor, + minInput: Tensor, + maxInput: Tensor, + minFilter: Tensor, + maxFilter: Tensor, + minFreezedOutput: Tensor, + maxFreezedOutput: Tensor, + strides: [Int32], + padding: Padding, + dilations: [Int32] = [1, 1, 1, 1], + paddingList: [Int32] + ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { + _RawTFEager.quantizedConv2DWithBiasAndRequantize( + input, filter: filter, bias: bias, minInput: minInput, maxInput: maxInput, + minFilter: minFilter, maxFilter: maxFilter, minFreezedOutput: minFreezedOutput, + maxFreezedOutput: maxFreezedOutput, strides: strides, padding: padding, + dilations: dilations, paddingList: paddingList) + } + + @inlinable @inline(__always) + public static func quantizedConv2DWithBiasSignedSumAndReluAndRequantize< + Tinput: TensorFlowScalar, + Tfilter: TensorFlowScalar, + Tbias: FloatingPoint & TensorFlowScalar, + Tsummand: TensorFlowScalar, + OutType: TensorFlowScalar + >( + _ input: Tensor, + filter: Tensor, + bias: Tensor, + minInput: Tensor, + maxInput: Tensor, + minFilter: Tensor, + maxFilter: Tensor, + minFreezedOutput: Tensor, + maxFreezedOutput: Tensor, + summand: Tensor, + minSummand: Tensor, + maxSummand: Tensor, + strides: [Int32], + padding: Padding, + dilations: [Int32] = [1, 1, 1, 1], + paddingList: [Int32] + ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { + _RawTFEager.quantizedConv2DWithBiasSignedSumAndReluAndRequantize( + input, filter: filter, bias: bias, minInput: minInput, maxInput: maxInput, + minFilter: minFilter, maxFilter: maxFilter, minFreezedOutput: minFreezedOutput, + maxFreezedOutput: maxFreezedOutput, summand: summand, minSummand: minSummand, + maxSummand: maxSummand, strides: strides, padding: padding, dilations: dilations, + paddingList: paddingList) + } + + @inlinable @inline(__always) + public static func quantizedConv2DWithBiasSumAndRelu< + Tinput: TensorFlowScalar, + Tfilter: TensorFlowScalar, + OutType: TensorFlowScalar + >( + _ input: Tensor, + filter: Tensor, + bias: Tensor, + minInput: Tensor, + maxInput: Tensor, + minFilter: Tensor, + maxFilter: Tensor, + summand: Tensor, + strides: [Int32], + padding: Padding, + dilations: [Int32] = [1, 1, 1, 1], + paddingList: [Int32] + ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { + _RawTFEager.quantizedConv2DWithBiasSumAndRelu( + input, filter: filter, bias: bias, minInput: minInput, maxInput: maxInput, + minFilter: minFilter, maxFilter: maxFilter, summand: summand, strides: strides, + padding: padding, dilations: dilations, paddingList: paddingList) + } + + @inlinable @inline(__always) + public static func quantizedConv2DWithBiasSumAndReluAndRequantize< + Tinput: TensorFlowScalar, + Tfilter: TensorFlowScalar, + Tbias: FloatingPoint & TensorFlowScalar, + Tsummand: TensorFlowScalar, + OutType: TensorFlowScalar + >( + _ input: Tensor, + filter: Tensor, + bias: Tensor, + minInput: Tensor, + maxInput: Tensor, + minFilter: Tensor, + maxFilter: Tensor, + minFreezedOutput: Tensor, + maxFreezedOutput: Tensor, + summand: Tensor, + minSummand: Tensor, + maxSummand: Tensor, + strides: [Int32], + padding: Padding, + dilations: [Int32] = [1, 1, 1, 1], + paddingList: [Int32] + ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { + _RawTFEager.quantizedConv2DWithBiasSumAndReluAndRequantize( + input, filter: filter, bias: bias, minInput: minInput, maxInput: maxInput, + minFilter: minFilter, maxFilter: maxFilter, minFreezedOutput: minFreezedOutput, + maxFreezedOutput: maxFreezedOutput, summand: summand, minSummand: minSummand, + maxSummand: maxSummand, strides: strides, padding: padding, dilations: dilations, + paddingList: paddingList) + } + + /// Computes quantized depthwise Conv2D. + /// + /// - Parameters: + /// - input: The original input tensor. + /// - filter: The original filter tensor. + /// - min_input: The float value that the minimum quantized input value represents. + /// - max_input: The float value that the maximum quantized input value represents. + /// - min_filter: The float value that the minimum quantized filter value represents. + /// - max_filter: The float value that the maximum quantized filter value represents. + /// + /// - Attrs: + /// - Tinput: The type of the input. + /// - Tfilter: The type of the filter. + /// - out_type: The type of the output. + /// - strides: List of stride values. + /// - dilations: List of dilation values. + /// + /// - Outputs: + /// - output: The output tensor. + /// - min_output: The float value that the minimum quantized output value represents. + /// - max_output: The float value that the maximum quantized output value represents. + @inlinable @inline(__always) + public static func quantizedDepthwiseConv2D< + Tinput: TensorFlowScalar, + Tfilter: TensorFlowScalar, + OutType: TensorFlowScalar + >( + _ input: Tensor, + filter: Tensor, + minInput: Tensor, + maxInput: Tensor, + minFilter: Tensor, + maxFilter: Tensor, + strides: [Int32], + padding: Padding, + dilations: [Int32] = [1, 1, 1, 1] + ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { + _RawTFEager.quantizedDepthwiseConv2D( + input, filter: filter, minInput: minInput, maxInput: maxInput, minFilter: minFilter, + maxFilter: maxFilter, strides: strides, padding: padding, dilations: dilations) + } + + /// Computes quantized depthwise Conv2D with Bias. + /// + /// - Parameters: + /// - input: The original input tensor. + /// - filter: The original filter tensor. + /// - bias: The original bias tensor. + /// - min_input: The float value that the minimum quantized input value represents. + /// - max_input: The float value that the maximum quantized input value represents. + /// - min_filter: The float value that the minimum quantized filter value represents. + /// - max_filter: The float value that the maximum quantized filter value represents. + /// + /// - Attrs: + /// - Tinput: The type of the input. + /// - Tfilter: The type of the filter. + /// - out_type: The type of the output. + /// - strides: List of stride values. + /// - dilations: List of dilation values. + /// + /// - Outputs: + /// - output: The output tensor. + /// - min_output: The float value that the minimum quantized output value represents. + /// - max_output: The float value that the maximum quantized output value represents. + @inlinable @inline(__always) + public static func quantizedDepthwiseConv2DWithBias< + Tinput: TensorFlowScalar, + Tfilter: TensorFlowScalar, + OutType: TensorFlowScalar + >( + _ input: Tensor, + filter: Tensor, + bias: Tensor, + minInput: Tensor, + maxInput: Tensor, + minFilter: Tensor, + maxFilter: Tensor, + strides: [Int32], + padding: Padding, + dilations: [Int32] = [1, 1, 1, 1] + ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { + _RawTFEager.quantizedDepthwiseConv2DWithBias( + input, filter: filter, bias: bias, minInput: minInput, maxInput: maxInput, + minFilter: minFilter, maxFilter: maxFilter, strides: strides, padding: padding, + dilations: dilations) + } + + /// Computes quantized depthwise Conv2D with Bias and Relu. + /// + /// - Parameters: + /// - input: The original input tensor. + /// - filter: The original filter tensor. + /// - bias: The original bias tensor. + /// - min_input: The float value that the minimum quantized input value represents. + /// - max_input: The float value that the maximum quantized input value represents. + /// - min_filter: The float value that the minimum quantized filter value represents. + /// - max_filter: The float value that the maximum quantized filter value represents. + /// + /// - Attrs: + /// - Tinput: The type of the input. + /// - Tfilter: The type of the filter. + /// - out_type: The type of the output. + /// - strides: List of stride values. + /// - dilations: List of dilation values. + /// + /// - Outputs: + /// - output: The output tensor. + /// - min_output: The float value that the minimum quantized output value represents. + /// - max_output: The float value that the maximum quantized output value represents. + @inlinable @inline(__always) + public static func quantizedDepthwiseConv2DWithBiasAndRelu< + Tinput: TensorFlowScalar, + Tfilter: TensorFlowScalar, + OutType: TensorFlowScalar + >( + _ input: Tensor, + filter: Tensor, + bias: Tensor, + minInput: Tensor, + maxInput: Tensor, + minFilter: Tensor, + maxFilter: Tensor, + strides: [Int32], + padding: Padding, + dilations: [Int32] = [1, 1, 1, 1] + ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { + _RawTFEager.quantizedDepthwiseConv2DWithBiasAndRelu( + input, filter: filter, bias: bias, minInput: minInput, maxInput: maxInput, + minFilter: minFilter, maxFilter: maxFilter, strides: strides, padding: padding, + dilations: dilations) + } + + /// Computes quantized depthwise Conv2D with Bias, Relu and Requantize. + /// + /// - Parameters: + /// - input: The original input tensor. + /// - filter: The original filter tensor. + /// - bias: The original bias tensor. + /// - min_input: The float value that the minimum quantized input value represents. + /// - max_input: The float value that the maximum quantized input value represents. + /// - min_filter: The float value that the minimum quantized filter value represents. + /// - max_filter: The float value that the maximum quantized filter value represents. + /// - min_freezed_output: The minimum float value of the output tensor. + /// - max_freezed_output: The maximum float value of the output tensor. + /// + /// - Attrs: + /// - Tinput: The type of the input. + /// - Tfilter: The type of the filter. + /// - Tbias: The type of the bias. + /// - out_type: The type of the output. + /// - strides: List of stride values. + /// - dilations: List of dilation values. + /// + /// - Outputs: + /// - output: The output tensor. + /// - min_output: The float value that the minimum quantized output value represents. + /// - max_output: The float value that the maximum quantized output value represents. + @inlinable @inline(__always) + public static func quantizedDepthwiseConv2DWithBiasAndReluAndRequantize< + Tinput: TensorFlowScalar, + Tfilter: TensorFlowScalar, + Tbias: FloatingPoint & TensorFlowScalar, + OutType: TensorFlowScalar + >( + _ input: Tensor, + filter: Tensor, + bias: Tensor, + minInput: Tensor, + maxInput: Tensor, + minFilter: Tensor, + maxFilter: Tensor, + minFreezedOutput: Tensor, + maxFreezedOutput: Tensor, + strides: [Int32], + padding: Padding, + dilations: [Int32] = [1, 1, 1, 1] + ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { + _RawTFEager.quantizedDepthwiseConv2DWithBiasAndReluAndRequantize( + input, filter: filter, bias: bias, minInput: minInput, maxInput: maxInput, + minFilter: minFilter, maxFilter: maxFilter, minFreezedOutput: minFreezedOutput, + maxFreezedOutput: maxFreezedOutput, strides: strides, padding: padding, dilations: dilations + ) + } + + /// Quantized Instance normalization. + /// + /// - Parameters: + /// - x: A 4D input Tensor. + /// - x_min: The value represented by the lowest quantized input. + /// - x_max: The value represented by the highest quantized input. + /// + /// - Attrs: + /// - output_range_given: If True, `given_y_min` and `given_y_min` + /// and `given_y_max` are used as the output range. Otherwise, + /// the implementation computes the output range. + /// - given_y_min: Output in `y_min` if `output_range_given` is True. + /// - given_y_max: Output in `y_max` if `output_range_given` is True. + /// - variance_epsilon: A small float number to avoid dividing by 0. + /// - min_separation: Minimum value of `y_max - y_min` + /// + /// - Outputs: + /// - y: A 4D Tensor. + /// - y_min: The value represented by the lowest quantized output. + /// - y_max: The value represented by the highest quantized output. + @inlinable @inline(__always) + public static func quantizedInstanceNorm( + _ x: Tensor, + xMin: Tensor, + xMax: Tensor, + outputRangeGiven: Bool = false, + givenYMin: Double = 0, + givenYMax: Double = 0, + varianceEpsilon: Double = 1e-05, + minSeparation: Double = 0.001 + ) -> (y: Tensor, yMin: Tensor, yMax: Tensor) { + _RawTFEager.quantizedInstanceNorm( + x, xMin: xMin, xMax: xMax, outputRangeGiven: outputRangeGiven, givenYMin: givenYMin, + givenYMax: givenYMax, varianceEpsilon: varianceEpsilon, minSeparation: minSeparation) + } + + /// Perform a quantized matrix multiplication of `a` by the matrix `b`. + /// + /// The inputs must be two-dimensional matrices and the inner dimension of + /// `a` (after being transposed if `transpose_a` is non-zero) must match the + /// outer dimension of `b` (after being transposed if `transposed_b` is + /// non-zero). + /// + /// - Parameters: + /// - a: Must be a two-dimensional tensor. + /// - b: Must be a two-dimensional tensor. + /// - min_a: The float value that the lowest quantized `a` value represents. + /// - max_a: The float value that the highest quantized `a` value represents. + /// - min_b: The float value that the lowest quantized `b` value represents. + /// - max_b: The float value that the highest quantized `b` value represents. + /// + /// - Attrs: + /// - transpose_a: If true, `a` is transposed before multiplication. + /// - transpose_b: If true, `b` is transposed before multiplication. + /// - Tactivation: The type of output produced by activation function + /// following this operation. + /// + /// - Outputs: + /// - min_out: The float value that the lowest quantized output value represents. + /// - max_out: The float value that the highest quantized output value represents. + @inlinable @inline(__always) + public static func quantizedMatMul< + T1: TensorFlowScalar, + T2: TensorFlowScalar, + Toutput: TensorFlowScalar + >( + _ a: Tensor, + _ b: Tensor, + minA: Tensor, + maxA: Tensor, + minB: Tensor, + maxB: Tensor, + transposeA: Bool = false, + transposeB: Bool = false, + tactivation: TensorDataType + ) -> (out: Tensor, minOut: Tensor, maxOut: Tensor) { + _RawTFEager.quantizedMatMul( + a, b, minA: minA, maxA: maxA, minB: minB, maxB: maxB, transposeA: transposeA, + transposeB: transposeB, tactivation: tactivation) + } + + /// Performs a quantized matrix multiplication of `a` by the matrix `b` with bias + /// add. + /// + /// The inputs must be two-dimensional matrices and 1D bias vector. And the inner + /// dimension of `a` (after being transposed if `transpose_a` is non-zero) must + /// match the outer dimension of `b` (after being transposed if `transposed_b` is + /// non-zero). Then do broadcast add operation with bias values on the matrix + /// mulplication result. The bias size must match inner dimension of `b`. + /// + /// - Parameters: + /// - a: A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`. + /// - b: A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`. + /// - bias: A 1D bias tensor with size matching inner dimension of `b` (after being + /// transposed if `transposed_b` is non-zero). + /// - min_a: The float value that the lowest quantized `a` value represents. + /// - max_a: The float value that the highest quantized `a` value represents. + /// - min_b: The float value that the lowest quantized `b` value represents. + /// - max_b: The float value that the highest quantized `b` value represents. + /// + /// - Attrs: + /// - transpose_a: If true, `a` is transposed before multiplication. + /// - transpose_b: If true, `b` is transposed before multiplication. + /// - input_quant_mode: Input data quantization mode. Either MIN_FIRST(default) or SCALED. + /// + /// - Outputs: + /// - min_out: The float value that the lowest quantized output value represents. + /// - max_out: The float value that the highest quantized output value represents. + @inlinable @inline(__always) + public static func quantizedMatMulWithBias< + T1: TensorFlowScalar, + T2: TensorFlowScalar, + Tbias: FloatingPoint & TensorFlowScalar, + Toutput: TensorFlowScalar + >( + _ a: Tensor, + _ b: Tensor, + bias: Tensor, + minA: Tensor, + maxA: Tensor, + minB: Tensor, + maxB: Tensor, + transposeA: Bool = false, + transposeB: Bool = false, + inputQuantMode: InputQuantMode = .minFirst + ) -> (out: Tensor, minOut: Tensor, maxOut: Tensor) { + _RawTFEager.quantizedMatMulWithBias( + a, b, bias: bias, minA: minA, maxA: maxA, minB: minB, maxB: maxB, transposeA: transposeA, + transposeB: transposeB, inputQuantMode: inputQuantMode) + } + + /// Perform a quantized matrix multiplication of `a` by the matrix `b` with bias + /// add and relu fusion. + /// + /// The inputs must be two-dimensional matrices and 1D bias vector. And the inner + /// dimension of `a` (after being transposed if `transpose_a` is non-zero) must + /// match the outer dimension of `b` (after being transposed if `transposed_b` is + /// non-zero). Then do broadcast add operation with bias values on the matrix + /// mulplication result. The bias size must match inner dimension of `b`. Then do + /// relu activation to get non-negative result. + /// + /// - Parameters: + /// - a: A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`. + /// - b: A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`. + /// - bias: A 1D bias tensor with size matching with inner dimension of `b` (after being + /// transposed if `transposed_b` is non-zero). + /// - min_a: The float value that the lowest quantized `a` value represents. + /// - max_a: The float value that the highest quantized `a` value represents. + /// - min_b: The float value that the lowest quantized `b` value represents. + /// - max_b: The float value that the highest quantized `b` value represents. + /// + /// - Attrs: + /// - transpose_a: If true, `a` is transposed before multiplication. + /// - transpose_b: If true, `b` is transposed before multiplication. + /// - input_quant_mode: Input data quantization mode. Either MIN_FIRST(default) or SCALED. + /// + /// - Outputs: + /// - min_out: The float value that the lowest quantized output value represents. + /// - max_out: The float value that the highest quantized output value represents. + @inlinable @inline(__always) + public static func quantizedMatMulWithBiasAndRelu< + T1: TensorFlowScalar, + T2: TensorFlowScalar, + Toutput: TensorFlowScalar + >( + _ a: Tensor, + _ b: Tensor, + bias: Tensor, + minA: Tensor, + maxA: Tensor, + minB: Tensor, + maxB: Tensor, + transposeA: Bool = false, + transposeB: Bool = false, + inputQuantMode: InputQuantMode = .minFirst + ) -> (out: Tensor, minOut: Tensor, maxOut: Tensor) { + _RawTFEager.quantizedMatMulWithBiasAndRelu( + a, b, bias: bias, minA: minA, maxA: maxA, minB: minB, maxB: maxB, transposeA: transposeA, + transposeB: transposeB, inputQuantMode: inputQuantMode) + } + + /// Perform a quantized matrix multiplication of `a` by the matrix `b` with bias + /// add and relu and requantize fusion. + /// + /// The inputs must be two-dimensional matrices and 1D bias vector. And the inner + /// dimension of `a` (after being transposed if `transpose_a` is non-zero) must + /// match the outer dimension of `b` (after being transposed if `transposed_b` is + /// non-zero). Then do broadcast add operation with bias values on the matrix + /// mulplication result. The bias size must match inner dimension of `b`. Then do + /// relu activation to get non-negative result. Then do requantize operation to get + /// final uint8 result. + /// + /// - Parameters: + /// - a: A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`. + /// - b: A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`. + /// - bias: A 1D bias tensor with size matching with inner dimension of `b` (after being + /// transposed if `transposed_b` is non-zero). + /// - min_a: The float value that the lowest quantized `a` value represents. + /// - max_a: The float value that the highest quantized `a` value represents. + /// - min_b: The float value that the lowest quantized `b` value represents. + /// - max_b: The float value that the highest quantized `b` value represents. + /// - min_freezed_output: The float value that the highest quantized output value after requantize. + /// + /// - Attrs: + /// - transpose_a: If true, `a` is transposed before multiplication. + /// - transpose_b: If true, `b` is transposed before multiplication. + /// - input_quant_mode: Input data quantization mode. Either MIN_FIRST(default) or SCALED. + /// + /// - Outputs: + /// - min_out: The float value that the lowest quantized output value represents. + /// - max_out: The float value that the highest quantized output value represents. + @inlinable @inline(__always) + public static func quantizedMatMulWithBiasAndReluAndRequantize< + T1: TensorFlowScalar, + T2: TensorFlowScalar, + Tbias: FloatingPoint & TensorFlowScalar, + Toutput: TensorFlowScalar + >( + _ a: Tensor, + _ b: Tensor, + bias: Tensor, + minA: Tensor, + maxA: Tensor, + minB: Tensor, + maxB: Tensor, + minFreezedOutput: Tensor, + maxFreezedOutput: Tensor, + transposeA: Bool = false, + transposeB: Bool = false, + inputQuantMode: InputQuantMode = .minFirst + ) -> (out: Tensor, minOut: Tensor, maxOut: Tensor) { + _RawTFEager.quantizedMatMulWithBiasAndReluAndRequantize( + a, b, bias: bias, minA: minA, maxA: maxA, minB: minB, maxB: maxB, + minFreezedOutput: minFreezedOutput, maxFreezedOutput: maxFreezedOutput, + transposeA: transposeA, transposeB: transposeB, inputQuantMode: inputQuantMode) + } + + /// Produces the max pool of the input tensor for quantized types. + /// + /// - Parameters: + /// - input: The 4D (batch x rows x cols x depth) Tensor to MaxReduce over. + /// - min_input: The float value that the lowest quantized input value represents. + /// - max_input: The float value that the highest quantized input value represents. + /// + /// - Attrs: + /// - ksize: The size of the window for each dimension of the input tensor. + /// The length must be 4 to match the number of dimensions of the input. + /// - strides: The stride of the sliding window for each dimension of the input + /// tensor. The length must be 4 to match the number of dimensions of the input. + /// - padding: The type of padding algorithm to use. + /// + /// - Outputs: + /// - min_output: The float value that the lowest quantized output value represents. + /// - max_output: The float value that the highest quantized output value represents. + @inlinable @inline(__always) + public static func quantizedMaxPool( + _ input: Tensor, + minInput: Tensor, + maxInput: Tensor, + ksize: [Int32], + strides: [Int32], + padding: Padding + ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { + _RawTFEager.quantizedMaxPool( + input, minInput: minInput, maxInput: maxInput, ksize: ksize, strides: strides, + padding: padding) + } + + /// Returns x * y element-wise, working on quantized buffers. + /// + /// - Parameters: + /// - min_x: The float value that the lowest quantized `x` value represents. + /// - max_x: The float value that the highest quantized `x` value represents. + /// - min_y: The float value that the lowest quantized `y` value represents. + /// - max_y: The float value that the highest quantized `y` value represents. + /// + /// - Outputs: + /// - min_z: The float value that the lowest quantized output value represents. + /// - max_z: The float value that the highest quantized output value represents. + /// + /// *NOTE*: `QuantizedMul` supports limited forms of broadcasting. More about + /// broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + @inlinable @inline(__always) + public static func quantizedMul< + T1: TensorFlowScalar, + T2: TensorFlowScalar, + Toutput: TensorFlowScalar + >( + _ x: Tensor, + _ y: Tensor, + minX: Tensor, + maxX: Tensor, + minY: Tensor, + maxY: Tensor + ) -> (z: Tensor, minZ: Tensor, maxZ: Tensor) { + _RawTFEager.quantizedMul(x, y, minX: minX, maxX: maxX, minY: minY, maxY: maxY) + } + + /// Computes Quantized Rectified Linear: `max(features, 0)` + /// + /// - Parameters: + /// - min_features: The float value that the lowest quantized value represents. + /// - max_features: The float value that the highest quantized value represents. + /// + /// - Outputs: + /// - activations: Has the same output shape as "features". + /// - min_activations: The float value that the lowest quantized value represents. + /// - max_activations: The float value that the highest quantized value represents. + @inlinable @inline(__always) + public static func quantizedRelu< + Tinput: TensorFlowScalar, + OutType: TensorFlowScalar + >( + features: Tensor, + minFeatures: Tensor, + maxFeatures: Tensor + ) -> ( + activations: Tensor, minActivations: Tensor, maxActivations: Tensor + ) { + _RawTFEager.quantizedRelu( + features: features, minFeatures: minFeatures, maxFeatures: maxFeatures) + } + + /// Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)` + /// + /// - Parameters: + /// - min_features: The float value that the lowest quantized value represents. + /// - max_features: The float value that the highest quantized value represents. + /// + /// - Outputs: + /// - activations: Has the same output shape as "features". + /// - min_activations: The float value that the lowest quantized value represents. + /// - max_activations: The float value that the highest quantized value represents. + @inlinable @inline(__always) + public static func quantizedRelu6< + Tinput: TensorFlowScalar, + OutType: TensorFlowScalar + >( + features: Tensor, + minFeatures: Tensor, + maxFeatures: Tensor + ) -> ( + activations: Tensor, minActivations: Tensor, maxActivations: Tensor + ) { + _RawTFEager.quantizedRelu6( + features: features, minFeatures: minFeatures, maxFeatures: maxFeatures) + } + + /// Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)` + /// + /// - Parameters: + /// - min_features: The float value that the lowest quantized value represents. + /// - max_features: The float value that the highest quantized value represents. + /// + /// - Outputs: + /// - activations: Has the same output shape as "features". + /// - min_activations: The float value that the lowest quantized value represents. + /// - max_activations: The float value that the highest quantized value represents. + @inlinable @inline(__always) + public static func quantizedReluX< + Tinput: TensorFlowScalar, + OutType: TensorFlowScalar + >( + features: Tensor, + maxValue: Tensor, + minFeatures: Tensor, + maxFeatures: Tensor + ) -> ( + activations: Tensor, minActivations: Tensor, maxActivations: Tensor + ) { + _RawTFEager.quantizedReluX( + features: features, maxValue: maxValue, minFeatures: minFeatures, maxFeatures: maxFeatures) + } + + /// Reshapes a quantized tensor as per the Reshape op. + /// + /// ``` + /// + /// - Parameters: + /// - shape: Defines the shape of the output tensor. + /// - input_min: The minimum value of the input. + /// - input_max: The maximum value of the input. + /// + /// - Outputs: + /// - output_min: This value is copied from input_min. + /// - output_max: This value is copied from input_max. + @inlinable @inline(__always) + public static func quantizedReshape< + T: TensorFlowScalar, + Tshape: TensorFlowIndex + >( + _ tensor: Tensor, + shape: Tensor, + inputMin: Tensor, + inputMax: Tensor + ) -> (output: Tensor, outputMin: Tensor, outputMax: Tensor) { + _RawTFEager.quantizedReshape(tensor, shape: shape, inputMin: inputMin, inputMax: inputMax) + } + + /// Resize quantized `images` to `size` using quantized bilinear interpolation. + /// + /// Input images and output images must be quantized types. + /// + /// - Parameters: + /// - images: 4-D with shape `[batch, height, width, channels]`. + /// - size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + /// new size for the images. + /// + /// - Attr align_corners: If true, the centers of the 4 corner pixels of the input and output tensors are + /// aligned, preserving the values at the corner pixels. Defaults to false. + /// + /// - Output resized_images: 4-D with shape + /// `[batch, new_height, new_width, channels]`. + @inlinable @inline(__always) + public static func quantizedResizeBilinear( + images: Tensor, + size: Tensor, + min: Tensor, + max: Tensor, + alignCorners: Bool = false, + halfPixelCenters: Bool = false + ) -> (resizedImages: Tensor, outMin: Tensor, outMax: Tensor) { + _RawTFEager.quantizedResizeBilinear( + images: images, size: size, min: min, max: max, alignCorners: alignCorners, + halfPixelCenters: halfPixelCenters) + } + + /// Closes the given queue. + /// + /// This operation signals that no more elements will be enqueued in the + /// given queue. Subsequent Enqueue(Many) operations will fail. + /// Subsequent Dequeue(Many) operations will continue to succeed if + /// sufficient elements remain in the queue. Subsequent Dequeue(Many) + /// operations that would block will fail immediately. + /// + /// - Parameter handle: The handle to a queue. + /// + /// - Attr cancel_pending_enqueues: If true, all pending enqueue requests that are + /// blocked on the given queue will be canceled. + @inlinable @inline(__always) + public static func queueCloseV2( + handle: ResourceHandle, + cancelPendingEnqueues: Bool = false + ) { + _RawTFEager.queueCloseV2(handle: handle, cancelPendingEnqueues: cancelPendingEnqueues) + } + + /// Dequeues `n` tuples of one or more tensors from the given queue. + /// + /// If the queue is closed and there are fewer than `n` elements, then an + /// OutOfRange error is returned. + /// + /// This operation concatenates queue-element component tensors along the + /// 0th dimension to make a single component tensor. All of the components + /// in the dequeued tuple will have size `n` in the 0th dimension. + /// + /// This operation has `k` outputs, where `k` is the number of components in + /// the tuples stored in the given queue, and output `i` is the ith + /// component of the dequeued tuple. + /// + /// N.B. If the queue is empty, this operation will block until `n` elements + /// have been dequeued (or 'timeout_ms' elapses, if specified). + /// + /// - Parameters: + /// - handle: The handle to a queue. + /// - n: The number of tuples to dequeue. + /// + /// - Attrs: + /// - component_types: The type of each component in a tuple. + /// - timeout_ms: If the queue has fewer than n elements, this operation + /// will block for up to timeout_ms milliseconds. + /// Note: This option is not supported yet. + /// + /// - Output components: One or more tensors that were dequeued as a tuple. + @inlinable @inline(__always) + public static func queueDequeueManyV2( + handle: ResourceHandle, + n: Tensor, + timeoutMs: Int64 = -1 + ) -> ComponentTypes { + _RawTFEager.queueDequeueManyV2(handle: handle, n: n, timeoutMs: timeoutMs) + } + + /// Dequeues `n` tuples of one or more tensors from the given queue. + /// + /// This operation is not supported by all queues. If a queue does not support + /// DequeueUpTo, then an Unimplemented error is returned. + /// + /// If the queue is closed and there are more than 0 but less than `n` + /// elements remaining, then instead of returning an OutOfRange error like + /// QueueDequeueMany, less than `n` elements are returned immediately. If + /// the queue is closed and there are 0 elements left in the queue, then + /// an OutOfRange error is returned just like in QueueDequeueMany. + /// Otherwise the behavior is identical to QueueDequeueMany: + /// + /// This operation concatenates queue-element component tensors along the + /// 0th dimension to make a single component tensor. All of the components + /// in the dequeued tuple will have size n in the 0th dimension. + /// + /// This operation has `k` outputs, where `k` is the number of components in + /// the tuples stored in the given queue, and output `i` is the ith + /// component of the dequeued tuple. + /// + /// - Parameters: + /// - handle: The handle to a queue. + /// - n: The number of tuples to dequeue. + /// + /// - Attrs: + /// - component_types: The type of each component in a tuple. + /// - timeout_ms: If the queue has fewer than n elements, this operation + /// will block for up to timeout_ms milliseconds. + /// Note: This option is not supported yet. + /// + /// - Output components: One or more tensors that were dequeued as a tuple. + @inlinable @inline(__always) + public static func queueDequeueUpToV2( + handle: ResourceHandle, + n: Tensor, + timeoutMs: Int64 = -1 + ) -> ComponentTypes { + _RawTFEager.queueDequeueUpToV2(handle: handle, n: n, timeoutMs: timeoutMs) + } + + /// Dequeues a tuple of one or more tensors from the given queue. + /// + /// This operation has k outputs, where k is the number of components + /// in the tuples stored in the given queue, and output i is the ith + /// component of the dequeued tuple. + /// + /// N.B. If the queue is empty, this operation will block until an element + /// has been dequeued (or 'timeout_ms' elapses, if specified). + /// + /// - Parameter handle: The handle to a queue. + /// + /// - Attrs: + /// - component_types: The type of each component in a tuple. + /// - timeout_ms: If the queue is empty, this operation will block for up to + /// timeout_ms milliseconds. + /// Note: This option is not supported yet. + /// + /// - Output components: One or more tensors that were dequeued as a tuple. + @inlinable @inline(__always) + public static func queueDequeueV2( + handle: ResourceHandle, + timeoutMs: Int64 = -1 + ) -> ComponentTypes { + _RawTFEager.queueDequeueV2(handle: handle, timeoutMs: timeoutMs) + } + + /// Enqueues zero or more tuples of one or more tensors in the given queue. + /// + /// This operation slices each component tensor along the 0th dimension to + /// make multiple queue elements. All of the tuple components must have the + /// same size in the 0th dimension. + /// + /// The components input has k elements, which correspond to the components of + /// tuples stored in the given queue. + /// + /// N.B. If the queue is full, this operation will block until the given + /// elements have been enqueued (or 'timeout_ms' elapses, if specified). + /// + /// - Parameters: + /// - handle: The handle to a queue. + /// - components: One or more tensors from which the enqueued tensors should + /// be taken. + /// + /// - Attr timeout_ms: If the queue is too full, this operation will block for up + /// to timeout_ms milliseconds. + /// Note: This option is not supported yet. + @inlinable @inline(__always) + public static func queueEnqueueManyV2( + handle: ResourceHandle, + components: Tcomponents, + timeoutMs: Int64 = -1 + ) { + _RawTFEager.queueEnqueueManyV2(handle: handle, components: components, timeoutMs: timeoutMs) + } + + /// Enqueues a tuple of one or more tensors in the given queue. + /// + /// The components input has k elements, which correspond to the components of + /// tuples stored in the given queue. + /// + /// N.B. If the queue is full, this operation will block until the given + /// element has been enqueued (or 'timeout_ms' elapses, if specified). + /// + /// - Parameters: + /// - handle: The handle to a queue. + /// - components: One or more tensors from which the enqueued tensors should be taken. + /// + /// - Attr timeout_ms: If the queue is full, this operation will block for up to + /// timeout_ms milliseconds. + /// Note: This option is not supported yet. + @inlinable @inline(__always) + public static func queueEnqueueV2( + handle: ResourceHandle, + components: Tcomponents, + timeoutMs: Int64 = -1 + ) { + _RawTFEager.queueEnqueueV2(handle: handle, components: components, timeoutMs: timeoutMs) + } + + /// Returns true if queue is closed. + /// + /// This operation returns true if the queue is closed and false if the queue + /// is open. + /// + /// - Parameter handle: The handle to a queue. + @inlinable @inline(__always) + public static func queueIsClosedV2( + handle: ResourceHandle + ) -> Tensor { + _RawTFEager.queueIsClosedV2(handle: handle) + } + + /// Computes the number of elements in the given queue. + /// + /// - Parameter handle: The handle to a queue. + /// + /// - Output size: The number of elements in the given queue. + @inlinable @inline(__always) + public static func queueSizeV2( + handle: ResourceHandle + ) -> Tensor { + _RawTFEager.queueSizeV2(handle: handle) + } + + /// Real-valued fast Fourier transform. + /// + /// Computes the 1-dimensional discrete Fourier transform of a real-valued signal + /// over the inner-most dimension of `input`. + /// + /// Since the DFT of a real signal is Hermitian-symmetric, `RFFT` only returns the + /// `fft_length / 2 + 1` unique components of the FFT: the zero-frequency term, + /// followed by the `fft_length / 2` positive-frequency terms. + /// + /// Along the axis `RFFT` is computed on, if `fft_length` is smaller than the + /// corresponding dimension of `input`, the dimension is cropped. If it is larger, + /// the dimension is padded with zeros. + /// + /// - Parameters: + /// - input: A float32 tensor. + /// - fft_length: An int32 tensor of shape [1]. The FFT length. + /// + /// - Output output: A complex64 tensor of the same rank as `input`. The inner-most + /// dimension of `input` is replaced with the `fft_length / 2 + 1` unique + /// frequency components of its 1D Fourier transform. + /// + /// @compatibility(numpy) + /// Equivalent to np.fft.rfft + /// @end_compatibility + @inlinable @inline(__always) + public static func rFFT< + Treal: FloatingPoint & TensorFlowScalar, + Tcomplex: TensorFlowScalar + >( + _ input: Tensor, + fftLength: Tensor + ) -> Tensor { + switch commonBackend(input.handle.backend, fftLength.handle.backend) { + case .XLA: + let output_device = fftLength.device + let input = Tensor(copying: input, to: .defaultTFEager) + let fftLength = Tensor(copying: fftLength, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.rFFT(input, fftLength: fftLength), to: output_device) + case .TF_EAGER: + return _RawTFEager.rFFT(input, fftLength: fftLength) + } + + } + + /// 2D real-valued fast Fourier transform. + /// + /// Computes the 2-dimensional discrete Fourier transform of a real-valued signal + /// over the inner-most 2 dimensions of `input`. + /// + /// Since the DFT of a real signal is Hermitian-symmetric, `RFFT2D` only returns the + /// `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension + /// of `output`: the zero-frequency term, followed by the `fft_length / 2` + /// positive-frequency terms. + /// + /// Along each axis `RFFT2D` is computed on, if `fft_length` is smaller than the + /// corresponding dimension of `input`, the dimension is cropped. If it is larger, + /// the dimension is padded with zeros. + /// + /// - Parameters: + /// - input: A float32 tensor. + /// - fft_length: An int32 tensor of shape [2]. The FFT length for each dimension. + /// + /// - Output output: A complex64 tensor of the same rank as `input`. The inner-most 2 + /// dimensions of `input` are replaced with their 2D Fourier transform. The + /// inner-most dimension contains `fft_length / 2 + 1` unique frequency + /// components. + /// + /// @compatibility(numpy) + /// Equivalent to np.fft.rfft2 + /// @end_compatibility + @inlinable @inline(__always) + public static func rFFT2D< + Treal: FloatingPoint & TensorFlowScalar, + Tcomplex: TensorFlowScalar + >( + _ input: Tensor, + fftLength: Tensor + ) -> Tensor { + switch commonBackend(input.handle.backend, fftLength.handle.backend) { + case .XLA: + let output_device = fftLength.device + let input = Tensor(copying: input, to: .defaultTFEager) + let fftLength = Tensor(copying: fftLength, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.rFFT2D(input, fftLength: fftLength), to: output_device) + case .TF_EAGER: + return _RawTFEager.rFFT2D(input, fftLength: fftLength) + } + + } + + /// 3D real-valued fast Fourier transform. + /// + /// Computes the 3-dimensional discrete Fourier transform of a real-valued signal + /// over the inner-most 3 dimensions of `input`. + /// + /// Since the DFT of a real signal is Hermitian-symmetric, `RFFT3D` only returns the + /// `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension + /// of `output`: the zero-frequency term, followed by the `fft_length / 2` + /// positive-frequency terms. + /// + /// Along each axis `RFFT3D` is computed on, if `fft_length` is smaller than the + /// corresponding dimension of `input`, the dimension is cropped. If it is larger, + /// the dimension is padded with zeros. + /// + /// - Parameters: + /// - input: A float32 tensor. + /// - fft_length: An int32 tensor of shape [3]. The FFT length for each dimension. + /// + /// - Output output: A complex64 tensor of the same rank as `input`. The inner-most 3 + /// dimensions of `input` are replaced with the their 3D Fourier transform. The + /// inner-most dimension contains `fft_length / 2 + 1` unique frequency + /// components. + /// + /// @compatibility(numpy) + /// Equivalent to np.fft.rfftn with 3 dimensions. + /// @end_compatibility + @inlinable @inline(__always) + public static func rFFT3D< + Treal: FloatingPoint & TensorFlowScalar, + Tcomplex: TensorFlowScalar + >( + _ input: Tensor, + fftLength: Tensor + ) -> Tensor { + switch commonBackend(input.handle.backend, fftLength.handle.backend) { + case .XLA: + let output_device = fftLength.device + let input = Tensor(copying: input, to: .defaultTFEager) + let fftLength = Tensor(copying: fftLength, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.rFFT3D(input, fftLength: fftLength), to: output_device) + case .TF_EAGER: + return _RawTFEager.rFFT3D(input, fftLength: fftLength) + } + + } + + /// Converts one or more images from RGB to HSV. + /// + /// Outputs a tensor of the same shape as the `images` tensor, containing the HSV + /// value of the pixels. The output is only well defined if the value in `images` + /// are in `[0,1]`. + /// + /// `output[..., 0]` contains hue, `output[..., 1]` contains saturation, and + /// `output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0 + /// corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue. + /// + /// Usage Example: + /// + /// >>> blue_image = tf.stack([ + /// ... tf.zeros([5,5]), + /// ... tf.zeros([5,5]), + /// ... tf.ones([5,5])], + /// ... axis=-1) + /// >>> blue_hsv_image = tf.image.rgb_to_hsv(blue_image) + /// >>> blue_hsv_image[0,0].numpy() + /// array([0.6666667, 1. , 1. ], dtype=float32) + /// + /// + /// - Parameter images: 1-D or higher rank. RGB data to convert. Last dimension must be size 3. + /// + /// - Output output: `images` converted to HSV. + @inlinable @inline(__always) + public static func rGBToHSV( + images: Tensor + ) -> Tensor { + switch images.handle.backend { + case .XLA: + let output_device = images.device + let images = Tensor(copying: images, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.rGBToHSV(images: images), to: output_device) + case .TF_EAGER: + return _RawTFEager.rGBToHSV(images: images) + } + + } + + /// Gather ragged slices from `params` axis `0` according to `indices`. + /// + /// Outputs a `RaggedTensor` output composed from `output_dense_values` and + /// `output_nested_splits`, such that: + /// + /// ```python + /// output.shape = indices.shape + params.shape[1:] + /// output.ragged_rank = indices.shape.ndims + params.ragged_rank + /// output[i...j, d0...dn] = params[indices[i...j], d0...dn] + /// ``` + /// + /// where + /// + /// * `params = + /// ragged.from_nested_row_splits(params_dense_values, params_nested_splits)` + /// provides the values that should be gathered. + /// * `indices` ia a dense tensor with dtype `int32` or `int64`, indicating which + /// values should be gathered. + /// * `output = + /// ragged.from_nested_row_splits(output_dense_values, output_nested_splits)` + /// is the output tensor. + /// + /// (Note: This c++ op is used to implement the higher-level python + /// `tf.ragged.gather` op, which also supports ragged indices.) + /// + /// + /// - Parameters: + /// - params_nested_splits: The `nested_row_splits` tensors that define the row-partitioning for the + /// `params` RaggedTensor input. + /// - params_dense_values: The `flat_values` for the `params` RaggedTensor. There was a terminology change + /// at the python level from dense_values to flat_values, so dense_values is the + /// deprecated name. + /// - indices: Indices in the outermost dimension of `params` of the values that should be + /// gathered. + /// + /// - Attrs: + /// - PARAMS_RAGGED_RANK: The ragged rank of the `params` RaggedTensor. `params_nested_splits` should + /// contain this number of `row_splits` tensors. This value should equal + /// `params.ragged_rank`. + /// - OUTPUT_RAGGED_RANK: The ragged rank of the output RaggedTensor. `output_nested_splits` will contain + /// this number of `row_splits` tensors. This value should equal + /// `indices.shape.ndims + params.ragged_rank - 1`. + /// + /// - Outputs: + /// - output_nested_splits: The `nested_row_splits` tensors that define the row-partitioning for the + /// returned RaggedTensor. + /// - output_dense_values: The `flat_values` for the returned RaggedTensor. + @inlinable @inline(__always) + public static func raggedGather< + Tvalues: TensorFlowScalar, + Tindices: TensorFlowIndex, + Tsplits: TensorFlowIndex + >( + paramsNestedSplits: [Tensor], + paramsDenseValues: Tensor, + indices: Tensor, + oUTPUTRAGGEDRANK: Int64 + ) -> (outputNestedSplits: [Tensor], outputDenseValues: Tensor) { + _RawTFEager.raggedGather( + paramsNestedSplits: paramsNestedSplits, paramsDenseValues: paramsDenseValues, + indices: indices, oUTPUTRAGGEDRANK: oUTPUTRAGGEDRANK) + } + + /// Returns a `RaggedTensor` containing the specified sequences of numbers. + /// + /// + /// Returns a `RaggedTensor` `result` composed from `rt_dense_values` and + /// `rt_nested_splits`, such that + /// `result[i] = range(starts[i], limits[i], deltas[i])`. + /// + /// ```python + /// (rt_nested_splits, rt_dense_values) = ragged_range( + /// starts=[2, 5, 8], limits=[3, 5, 12], deltas=1) + /// result = tf.ragged.from_row_splits(rt_dense_values, rt_nested_splits) + /// print(result) + /// + /// ``` + /// + /// The input tensors `starts`, `limits`, and `deltas` may be scalars or vectors. + /// The vector inputs must all have the same size. Scalar inputs are broadcast + /// to match the size of the vector inputs. + /// + /// - Parameters: + /// - starts: The starts of each range. + /// - limits: The limits of each range. + /// - deltas: The deltas of each range. + /// + /// - Outputs: + /// - rt_nested_splits: The `row_splits` for the returned `RaggedTensor`. + /// - rt_dense_values: The `flat_values` for the returned `RaggedTensor`. + @inlinable @inline(__always) + public static func raggedRange< + T: TensorFlowNumeric, + Tsplits: TensorFlowIndex + >( + starts: Tensor, + limits: Tensor, + deltas: Tensor + ) -> (rtNestedSplits: Tensor, rtDenseValues: Tensor) { + _RawTFEager.raggedRange(starts: starts, limits: limits, deltas: deltas) + } + + /// Decodes a `variant` Tensor into a `RaggedTensor`. + /// + /// Decodes the given `variant` Tensor and returns a `RaggedTensor`. The input + /// could be a scalar, meaning it encodes a single `RaggedTensor` with ragged_rank + /// `output_ragged_rank`. It could also have an arbitrary rank, in which case each + /// element is decoded into a `RaggedTensor` with ragged_rank `input_ragged_rank` + /// and these are then stacked according to the input shape to output a single + /// `RaggedTensor` with ragged_rank `output_ragged_rank`. Each `variant` element in + /// the input Tensor is decoded by retrieving from the element a 1-D `variant` + /// Tensor with `input_ragged_rank + 1` Tensors, corresponding to the splits and + /// values of the decoded `RaggedTensor`. If `input_ragged_rank` is -1, then it is + /// inferred as `output_ragged_rank` - `rank(encoded_ragged)`. See + /// `RaggedTensorToVariant` for the corresponding encoding logic. + /// + /// + /// - Parameter encoded_ragged: A `variant` Tensor containing encoded `RaggedTensor`s. + /// + /// - Attrs: + /// - input_ragged_rank: The ragged rank of each encoded `RaggedTensor` component in the input. If set to + /// -1, this is inferred as `output_ragged_rank` - `rank(encoded_ragged)` + /// - output_ragged_rank: The expected ragged rank of the output `RaggedTensor`. The following must hold: + /// `output_ragged_rank = rank(encoded_ragged) + input_ragged_rank`. + /// + /// - Outputs: + /// - output_nested_splits: A list of one or more Tensors representing the splits of the output + /// `RaggedTensor`. + /// - output_dense_values: A Tensor representing the values of the output `RaggedTensor`. + @inlinable @inline(__always) + public static func raggedTensorFromVariant< + Tvalues: TensorFlowScalar, + Tsplits: TensorFlowIndex + >( + encodedRagged: VariantHandle, + inputRaggedRank: Int64, + outputRaggedRank: Int64 + ) -> (outputNestedSplits: [Tensor], outputDenseValues: Tensor) { + _RawTFEager.raggedTensorFromVariant( + encodedRagged: encodedRagged, inputRaggedRank: inputRaggedRank, + outputRaggedRank: outputRaggedRank) + } + + /// Converts a `RaggedTensor` into a `SparseTensor` with the same values. + /// + /// input=ragged.from_nested_row_splits(rt_dense_values, rt_nested_splits) + /// output=SparseTensor(indices=sparse_indices, values=sparse_values, + /// dense_shape=sparse_dense_shape) + /// + /// - Parameters: + /// - rt_nested_splits: The `row_splits` for the `RaggedTensor`. + /// - rt_dense_values: The `flat_values` for the `RaggedTensor`. + /// + /// - Attr RAGGED_RANK: The ragged rank of the input RaggedTensor. `rt_nested_splits` should contain + /// this number of ragged-splits tensors. This value should equal + /// `input.ragged_rank`. + /// + /// - Outputs: + /// - sparse_indices: The indices for the `SparseTensor`. + /// - sparse_values: The values of the `SparseTensor`. + /// - sparse_dense_shape: `sparse_dense_shape` is a tight bounding box of the input `RaggedTensor`. + @inlinable @inline(__always) + public static func raggedTensorToSparse< + T: TensorFlowScalar, + Tsplits: TensorFlowIndex + >( + rtNestedSplits: [Tensor], + rtDenseValues: Tensor + ) -> (sparseIndices: Tensor, sparseValues: Tensor, sparseDenseShape: Tensor) { + _RawTFEager.raggedTensorToSparse(rtNestedSplits: rtNestedSplits, rtDenseValues: rtDenseValues) + } + + /// Create a dense tensor from a ragged tensor, possibly altering its shape. + /// + /// The `ragged_to_dense` op creates a dense tensor from a list of row partition + /// tensors, a value vector, and default values. If the shape is unspecified, the + /// minimal shape required to contain all the elements in the ragged tensor (the + /// natural shape) will be used. If some dimensions are left unspecified, then the + /// size of the natural shape is used in that dimension. + /// + /// The default_value will be broadcast to the output shape. After that, the values + /// from the ragged tensor overwrite the default values. Note that the default_value + /// must have less dimensions than the value. + /// + /// The row partition tensors are in the order of the dimensions. + /// At present, the types can be: + /// * "ROW_SPLITS": the row_splits tensor from the ragged tensor. + /// * "VALUE_ROWIDS": the value_rowids tensor from the ragged tensor. + /// * "FIRST_DIM_SIZE": if value_rowids is used for the first dimension, then it + /// is preceded by "FIRST_DIM_SIZE". + /// + /// - Parameters: + /// - shape: The desired shape of the the output tensor. If left unspecified (empty), + /// the minimal shape required to contain all the elements in the ragged tensor + /// (the natural shape) will be used. If some dimensions are left unspecified, then + /// the size of the natural shape is used in that dimension. + /// + /// Note that dense dimensions cannot be modified by the shape argument. Trying to + /// change the size of a dense dimension will cause the op to fail. + /// Examples: + /// natural shape: [4, 5, 6] + /// shape: -1 + /// output shape: [4, 5, 6] + /// + /// natural shape: [4, 5, 6] + /// shape: [3, -1, 2] + /// output shape: [3, 5, 2] + /// + /// natural shape: [4, 5, 6] + /// shape: [3, 7, 2] + /// output shape: [3, 7, 2] + /// + /// - values: A 1D tensor representing the values of the ragged tensor. + /// - default_value: The default_value when the shape is larger than the ragged tensor. The + /// default_value is broadcast until it is the shape of the output tensor, and + /// then overwritten by values in the ragged tensor. The default value must be + /// compatible with this broadcast operation, and must have fewer dimensions than + /// the value tensor. + /// + /// - Attr row_partition_types: The types of the row partition tensors. At present, these can be: + /// * "ROW_SPLITS": the row_splits tensor from the ragged tensor. + /// * "VALUE_ROWIDS": the value_rowids tensor from the ragged tensor. + /// * "FIRST_DIM_SIZE": if value_rowids is used for the first dimension, then it + /// is preceeded by "FIRST_DIM_SIZE". + /// The tensors are in the order of the dimensions. + /// + /// - Output result: The resulting dense tensor. + @inlinable @inline(__always) + public static func raggedTensorToTensor< + T: TensorFlowScalar, + Tindex: TensorFlowIndex, + Tshape: TensorFlowIndex + >( + shape: Tensor, + _ values: Tensor, + defaultValue: Tensor, + rowPartitionTensors: [Tensor], + rowPartitionTypes: [String] + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(shape.handle.backend, values.handle.backend), defaultValue.handle.backend), + commonBackend(rowPartitionTensors)) + { + case .XLA: + let output_device = defaultValue.device + let shape = Tensor(copying: shape, to: .defaultTFEager) + let values = Tensor(copying: values, to: .defaultTFEager) + let defaultValue = Tensor(copying: defaultValue, to: .defaultTFEager) + let rowPartitionTensors = [Tensor]( + copying: rowPartitionTensors, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.raggedTensorToTensor( + shape: shape, values, defaultValue: defaultValue, + rowPartitionTensors: rowPartitionTensors, rowPartitionTypes: rowPartitionTypes), + to: output_device) + case .TF_EAGER: + return _RawTFEager.raggedTensorToTensor( + shape: shape, values, defaultValue: defaultValue, + rowPartitionTensors: rowPartitionTensors, rowPartitionTypes: rowPartitionTypes) + } + + } + + /// Encodes a `RaggedTensor` into a `variant` Tensor. + /// + /// + /// Encodes the given `RaggedTensor` and returns a `variant` Tensor. If + /// `batched_input` is True, then input `RaggedTensor` is unbatched along the + /// zero-th dimension, each component `RaggedTensor` is encoded into a scalar + /// `variant` Tensor, and these are stacked to return a 1-D `variant` Tensor. + /// If `batched_input` is False, then the input `RaggedTensor` is encoded as is and + /// a scalar `variant` Tensor is returned. A `RaggedTensor` is encoded by first + /// creating a 1-D `variant` Tensor with `ragged_rank + 1` elements, containing the + /// splits and values Tensors of the `RaggedTensor`. Then the 1-D `variant` Tensor + /// is wrapped in a scalar `variant` Tensor. See `RaggedTensorFromVariant` for the + /// corresponding decoding logic. + /// + /// + /// - Parameters: + /// - rt_nested_splits: A list of one or more Tensors representing the splits of the input + /// `RaggedTensor`. + /// - rt_dense_values: A Tensor representing the values of the input `RaggedTensor`. + /// + /// - Attr batched_input: A `bool` denoting whether the input is a batched `RaggedTensor`. + /// + /// - Output encoded_ragged: A `variant` Tensor that containing encoded `RaggedTensor`. + @inlinable @inline(__always) + public static func raggedTensorToVariant< + Tvalues: TensorFlowScalar, + Tsplits: TensorFlowIndex + >( + rtNestedSplits: [Tensor], + rtDenseValues: Tensor, + batchedInput: Bool + ) -> VariantHandle { + _RawTFEager.raggedTensorToVariant( + rtNestedSplits: rtNestedSplits, rtDenseValues: rtDenseValues, batchedInput: batchedInput) + } + + /// Randomly crop `image`. + /// + /// `size` is a 1-D int64 tensor with 2 elements representing the crop height and + /// width. The values must be non negative. + /// + /// This Op picks a random location in `image` and crops a `height` by `width` + /// rectangle from that location. The random location is picked so the cropped + /// area will fit inside the original image. + /// + /// - Parameters: + /// - image: 3-D of shape `[height, width, channels]`. + /// - size: 1-D of length 2 containing: `crop_height`, `crop_width`.. + /// + /// - Attrs: + /// - seed: If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// - seed2: An second seed to avoid seed collision. + /// + /// - Output output: 3-D of shape `[crop_height, crop_width, channels].` + @inlinable @inline(__always) + public static func randomCrop( + image: Tensor, + size: Tensor, + seed: Int64 = 0, + seed2: Int64 = 0 + ) -> Tensor { + switch commonBackend(image.handle.backend, size.handle.backend) { + case .XLA: + let output_device = size.device + let image = Tensor(copying: image, to: .defaultTFEager) + let size = Tensor(copying: size, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.randomCrop(image: image, size: size, seed: seed, seed2: seed2), + to: output_device) + case .TF_EAGER: + return _RawTFEager.randomCrop(image: image, size: size, seed: seed, seed2: seed2) + } + + } + + /// Creates a Dataset that returns pseudorandom numbers. + /// + /// Creates a Dataset that returns a stream of uniformly distributed + /// pseudorandom 64-bit signed integers. + /// + /// In the TensorFlow Python API, you can instantiate this dataset via the + /// class `tf.data.experimental.RandomDataset`. + /// + /// Instances of this dataset are also created as a result of the + /// `hoist_random_uniform` static optimization. Whether this optimization is + /// performed is determined by the `experimental_optimization.hoist_random_uniform` + /// option of `tf.data.Options`. + /// + /// - Parameters: + /// - seed: A scalar seed for the random number generator. If either seed or + /// seed2 is set to be non-zero, the random number generator is seeded + /// by the given seed. Otherwise, a random seed is used. + /// - seed2: A second scalar seed to avoid seed collision. + @inlinable @inline(__always) + public static func randomDataset( + seed: Tensor, + seed2: Tensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.randomDataset( + seed: seed, seed2: seed2, outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Outputs random values from the Gamma distribution(s) described by alpha. + /// + /// This op uses the algorithm by Marsaglia et al. to acquire samples via + /// transformation-rejection from pairs of uniform and normal random variables. + /// See http://dl.acm.org/citation.cfm?id=358414 + /// + /// - Parameters: + /// - shape: 1-D integer tensor. Shape of independent samples to draw from each + /// distribution described by the shape parameters given in alpha. + /// - alpha: A tensor in which each scalar is a "shape" parameter describing the + /// associated gamma distribution. + /// + /// - Attrs: + /// - seed: If either `seed` or `seed2` are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// - seed2: A second seed to avoid seed collision. + /// + /// - Output output: A tensor with shape `shape + shape(alpha)`. Each slice + /// `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for + /// `alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha. + @inlinable @inline(__always) + public static func randomGamma< + S: TensorFlowIndex, + T: FloatingPoint & TensorFlowScalar + >( + shape: Tensor, + alpha: Tensor, + seed: Int64 = 0, + seed2: Int64 = 0 + ) -> Tensor { + switch commonBackend(shape.handle.backend, alpha.handle.backend) { + case .XLA: + let output_device = alpha.device + let shape = Tensor(copying: shape, to: .defaultTFEager) + let alpha = Tensor(copying: alpha, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.randomGamma(shape: shape, alpha: alpha, seed: seed, seed2: seed2), + to: output_device) + case .TF_EAGER: + return _RawTFEager.randomGamma(shape: shape, alpha: alpha, seed: seed, seed2: seed2) + } + + } + + /// Computes the derivative of a Gamma random sample w.r.t. `alpha`. + @inlinable @inline(__always) + public static func randomGammaGrad( + alpha: Tensor, + sample: Tensor + ) -> Tensor { + switch commonBackend(alpha.handle.backend, sample.handle.backend) { + case .XLA: + let output_device = sample.device + let alpha = Tensor(copying: alpha, to: .defaultTFEager) + let sample = Tensor(copying: sample, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.randomGammaGrad(alpha: alpha, sample: sample), to: output_device) + case .TF_EAGER: + return _RawTFEager.randomGammaGrad(alpha: alpha, sample: sample) + } + + } + + /// Use RandomPoissonV2 instead. + @inlinable @inline(__always) + public static func randomPoisson< + S: TensorFlowIndex, + Dtype: FloatingPoint & TensorFlowScalar + >( + shape: Tensor, + rate: Tensor, + seed: Int64 = 0, + seed2: Int64 = 0 + ) -> Tensor { + switch commonBackend(shape.handle.backend, rate.handle.backend) { + case .XLA: + let output_device = rate.device + let shape = Tensor(copying: shape, to: .defaultTFEager) + let rate = Tensor(copying: rate, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.randomPoisson(shape: shape, rate: rate, seed: seed, seed2: seed2), + to: output_device) + case .TF_EAGER: + return _RawTFEager.randomPoisson(shape: shape, rate: rate, seed: seed, seed2: seed2) + } + + } + + /// Outputs random values from the Poisson distribution(s) described by rate. + /// + /// This op uses two algorithms, depending on rate. If rate >= 10, then + /// the algorithm by Hormann is used to acquire samples via + /// transformation-rejection. + /// See http://www.sciencedirect.com/science/article/pii/0167668793909974. + /// + /// Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform + /// random variables. + /// See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer + /// Programming, Volume 2. Addison Wesley + /// + /// - Parameters: + /// - shape: 1-D integer tensor. Shape of independent samples to draw from each + /// distribution described by the shape parameters given in rate. + /// - rate: A tensor in which each scalar is a "rate" parameter describing the + /// associated poisson distribution. + /// + /// - Attrs: + /// - seed: If either `seed` or `seed2` are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// - seed2: A second seed to avoid seed collision. + /// + /// - Output output: A tensor with shape `shape + shape(rate)`. Each slice + /// `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for + /// `rate[i0, i1, ...iN]`. + @inlinable @inline(__always) + public static func randomPoissonV2< + S: TensorFlowIndex, + R: TensorFlowNumeric, + Dtype: TensorFlowNumeric + >( + shape: Tensor, + rate: Tensor, + seed: Int64 = 0, + seed2: Int64 = 0 + ) -> Tensor { + switch commonBackend(shape.handle.backend, rate.handle.backend) { + case .XLA: + let output_device = rate.device + let shape = Tensor(copying: shape, to: .defaultTFEager) + let rate = Tensor(copying: rate, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.randomPoissonV2(shape: shape, rate: rate, seed: seed, seed2: seed2), + to: output_device) + case .TF_EAGER: + return _RawTFEager.randomPoissonV2(shape: shape, rate: rate, seed: seed, seed2: seed2) + } + + } + + /// Randomly shuffles a tensor along its first dimension. + /// + /// The tensor is shuffled along dimension 0, such that each `value[j]` is mapped + /// to one and only one `output[i]`. For example, a mapping that might occur for a + /// 3x2 tensor is: + /// + /// ``` + /// [[1, 2], [[5, 6], + /// [3, 4], ==> [1, 2], + /// [5, 6]] [3, 4]] + /// ``` + /// + /// - Parameter value: The tensor to be shuffled. + /// + /// - Attrs: + /// - seed: If either `seed` or `seed2` are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// - seed2: A second seed to avoid seed collision. + /// + /// - Output output: A tensor of same shape and type as `value`, shuffled along its first + /// dimension. + @inlinable @inline(__always) + public static func randomShuffle( + value: Tensor, + seed: Int64 = 0, + seed2: Int64 = 0 + ) -> Tensor { + switch value.handle.backend { + case .XLA: + let output_device = value.device + let value = Tensor(copying: value, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.randomShuffle(value: value, seed: seed, seed2: seed2), + to: output_device) + case .TF_EAGER: + return _RawTFEager.randomShuffle(value: value, seed: seed, seed2: seed2) + } + + } + + /// A queue that randomizes the order of elements. + /// + /// - Attrs: + /// - component_types: The type of each component in a value. + /// - shapes: The shape of each component in a value. The length of this attr must + /// be either 0 or the same as the length of component_types. If the length of + /// this attr is 0, the shapes of queue elements are not constrained, and + /// only one element may be dequeued at a time. + /// - capacity: The upper bound on the number of elements in this queue. + /// Negative numbers mean no limit. + /// - min_after_dequeue: Dequeue will block unless there would be this + /// many elements after the dequeue or the queue is closed. This + /// ensures a minimum level of mixing of elements. + /// - seed: If either seed or seed2 is set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, a random seed is used. + /// - seed2: A second seed to avoid seed collision. + /// - container: If non-empty, this queue is placed in the given container. + /// Otherwise, a default container is used. + /// - shared_name: If non-empty, this queue will be shared under the given name + /// across multiple sessions. + /// + /// - Output handle: The handle to the queue. + @inlinable @inline(__always) + public static func randomShuffleQueueV2( + componentTypes: [TensorDataType], + shapes: [TensorShape?], + capacity: Int64 = -1, + minAfterDequeue: Int64 = 0, + seed: Int64 = 0, + seed2: Int64 = 0, + container: String, + sharedName: String + ) -> ResourceHandle { + _RawTFEager.randomShuffleQueueV2( + componentTypes: componentTypes, shapes: shapes, capacity: capacity, + minAfterDequeue: minAfterDequeue, seed: seed, seed2: seed2, container: container, + sharedName: sharedName) + } + + /// Outputs random values from a normal distribution. + /// + /// The generated values will have mean 0 and standard deviation 1. + /// + /// - Parameter shape: The shape of the output tensor. + /// + /// - Attrs: + /// - seed: If either `seed` or `seed2` are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// - seed2: A second seed to avoid seed collision. + /// - dtype: The type of the output. + /// + /// - Output output: A tensor of the specified shape filled with random normal values. + @inlinable @inline(__always) + public static func randomStandardNormal< + Dtype: FloatingPoint & TensorFlowScalar, + T: TensorFlowIndex + >( + shape: Tensor, + seed: Int64 = 0, + seed2: Int64 = 0 + ) -> Tensor { + switch shape.handle.backend { + case .XLA: + let output_device = shape.device + let shape = Tensor(copying: shape, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.randomStandardNormal(shape: shape, seed: seed, seed2: seed2), + to: output_device) + case .TF_EAGER: + return _RawTFEager.randomStandardNormal(shape: shape, seed: seed, seed2: seed2) + } + + } + + /// Outputs random values from a uniform distribution. + /// + /// The generated values follow a uniform distribution in the range `[0, 1)`. The + /// lower bound 0 is included in the range, while the upper bound 1 is excluded. + /// + /// - Parameter shape: The shape of the output tensor. + /// + /// - Attrs: + /// - seed: If either `seed` or `seed2` are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// - seed2: A second seed to avoid seed collision. + /// - dtype: The type of the output. + /// + /// - Output output: A tensor of the specified shape filled with uniform random values. + @inlinable @inline(__always) + public static func randomUniform< + Dtype: FloatingPoint & TensorFlowScalar, + T: TensorFlowIndex + >( + shape: Tensor, + seed: Int64 = 0, + seed2: Int64 = 0 + ) -> Tensor { + switch shape.handle.backend { + case .XLA: + let output_device = shape.device + let shape = Tensor(copying: shape, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.randomUniform(shape: shape, seed: seed, seed2: seed2), + to: output_device) + case .TF_EAGER: + return _RawTFEager.randomUniform(shape: shape, seed: seed, seed2: seed2) + } + + } + + /// Outputs random integers from a uniform distribution. + /// + /// The generated values are uniform integers in the range `[minval, maxval)`. + /// The lower bound `minval` is included in the range, while the upper bound + /// `maxval` is excluded. + /// + /// The random integers are slightly biased unless `maxval - minval` is an exact + /// power of two. The bias is small for values of `maxval - minval` significantly + /// smaller than the range of the output (either `2^32` or `2^64`). + /// + /// - Parameters: + /// - shape: The shape of the output tensor. + /// - minval: 0-D. Inclusive lower bound on the generated integers. + /// - maxval: 0-D. Exclusive upper bound on the generated integers. + /// + /// - Attrs: + /// - seed: If either `seed` or `seed2` are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// - seed2: A second seed to avoid seed collision. + /// + /// - Output output: A tensor of the specified shape filled with uniform random integers. + @inlinable @inline(__always) + public static func randomUniformInt< + Tout: TensorFlowIndex, + T: TensorFlowIndex + >( + shape: Tensor, + minval: Tensor, + maxval: Tensor, + seed: Int64 = 0, + seed2: Int64 = 0 + ) -> Tensor { + switch commonBackend( + commonBackend(shape.handle.backend, minval.handle.backend), maxval.handle.backend) + { + case .XLA: + let output_device = maxval.device + let shape = Tensor(copying: shape, to: .defaultTFEager) + let minval = Tensor(copying: minval, to: .defaultTFEager) + let maxval = Tensor(copying: maxval, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.randomUniformInt( + shape: shape, minval: minval, maxval: maxval, seed: seed, seed2: seed2), + to: output_device) + case .TF_EAGER: + return _RawTFEager.randomUniformInt( + shape: shape, minval: minval, maxval: maxval, seed: seed, seed2: seed2) + } + + } + + /// Creates a sequence of numbers. + /// + /// This operation creates a sequence of numbers that begins at `start` and + /// extends by increments of `delta` up to but not including `limit`. + /// + /// For example: + /// + /// ``` + /// # 'start' is 3 + /// # 'limit' is 18 + /// # 'delta' is 3 + /// tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] + /// ``` + /// + /// - Parameters: + /// - start: 0-D (scalar). First entry in the sequence. + /// - limit: 0-D (scalar). Upper limit of sequence, exclusive. + /// - delta: 0-D (scalar). Optional. Default is 1. Number that increments `start`. + /// + /// - Output output: 1-D. + @inlinable @inline(__always) + public static func range( + start: Tensor, + limit: Tensor, + delta: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(start.handle.backend, limit.handle.backend), delta.handle.backend) + { + case .XLA: + return _RawXLA.range(start: start, limit: limit, delta: delta) + case .TF_EAGER: + return _RawTFEager.range(start: start, limit: limit, delta: delta) + } + + } + + /// Creates a dataset with a range of values. Corresponds to python's xrange. + /// + /// - Parameters: + /// - start: corresponds to start in python's xrange(). + /// - stop: corresponds to stop in python's xrange(). + /// - step: corresponds to step in python's xrange(). + @inlinable @inline(__always) + public static func rangeDataset( + start: Tensor, + stop: Tensor, + step: Tensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.rangeDataset( + start: start, stop: stop, step: step, outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Returns the rank of a tensor. + /// + /// This operation returns an integer representing the rank of `input`. + /// + /// For example: + /// + /// ``` + /// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + /// # shape of tensor 't' is [2, 2, 3] + /// rank(t) ==> 3 + /// ``` + /// + /// **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank + /// of a tensor is the number of indices required to uniquely select each element + /// of the tensor. Rank is also known as "order", "degree", or "ndims." + @inlinable @inline(__always) + public static func rank( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + return _RawXLA.rank(input) + case .TF_EAGER: + return _RawTFEager.rank(input) + } + + } + + /// Reads and outputs the entire contents of the input filename. + @inlinable @inline(__always) + public static func readFile( + filename: StringTensor + ) -> StringTensor { + _RawTFEager.readFile(filename: filename) + } + + /// Reads the value of a variable. + /// + /// The tensor returned by this operation is immutable. + /// + /// The value returned by this operation is guaranteed to be influenced by all the + /// writes on which this operation depends directly or indirectly, and to not be + /// influenced by any of the writes which depend directly or indirectly on this + /// operation. + /// + /// - Parameter resource: handle to the resource in which to store the variable. + /// + /// - Attr dtype: the dtype of the value. + @inlinable @inline(__always) + public static func readVariableOp( + resource: ResourceHandle + ) -> Tensor { + _RawTFEager.readVariableOp(resource: resource) + } + + /// Returns the number of records this Reader has produced. + /// + /// This is the same as the number of ReaderRead executions that have + /// succeeded. + /// + /// - Parameter reader_handle: Handle to a Reader. + @inlinable @inline(__always) + public static func readerNumRecordsProducedV2( + readerHandle: ResourceHandle + ) -> Tensor { + _RawTFEager.readerNumRecordsProducedV2(readerHandle: readerHandle) + } + + /// Returns the number of work units this Reader has finished processing. + /// + /// - Parameter reader_handle: Handle to a Reader. + @inlinable @inline(__always) + public static func readerNumWorkUnitsCompletedV2( + readerHandle: ResourceHandle + ) -> Tensor { + _RawTFEager.readerNumWorkUnitsCompletedV2(readerHandle: readerHandle) + } + + /// Returns up to `num_records` (key, value) pairs produced by a Reader. + /// + /// Will dequeue from the input queue if necessary (e.g. when the + /// Reader needs to start reading from a new file since it has finished + /// with the previous file). + /// It may return less than `num_records` even before the last batch. + /// + /// - Parameters: + /// - reader_handle: Handle to a `Reader`. + /// - queue_handle: Handle to a `Queue`, with string work items. + /// - num_records: number of records to read from `Reader`. + /// + /// - Outputs: + /// - keys: A 1-D tensor. + /// - values: A 1-D tensor. + @inlinable @inline(__always) + public static func readerReadUpToV2( + readerHandle: ResourceHandle, + queueHandle: ResourceHandle, + numRecords: Tensor + ) -> (keys: StringTensor, values: StringTensor) { + _RawTFEager.readerReadUpToV2( + readerHandle: readerHandle, queueHandle: queueHandle, numRecords: numRecords) + } + + /// Returns the next record (key, value pair) produced by a Reader. + /// + /// Will dequeue from the input queue if necessary (e.g. when the + /// Reader needs to start reading from a new file since it has finished + /// with the previous file). + /// + /// - Parameters: + /// - reader_handle: Handle to a Reader. + /// - queue_handle: Handle to a Queue, with string work items. + /// + /// - Outputs: + /// - key: A scalar. + /// - value: A scalar. + @inlinable @inline(__always) + public static func readerReadV2( + readerHandle: ResourceHandle, + queueHandle: ResourceHandle + ) -> (key: StringTensor, value: StringTensor) { + _RawTFEager.readerReadV2(readerHandle: readerHandle, queueHandle: queueHandle) + } + + /// Restore a Reader to its initial clean state. + /// + /// - Parameter reader_handle: Handle to a Reader. + @inlinable @inline(__always) + public static func readerResetV2( + readerHandle: ResourceHandle + ) { + _RawTFEager.readerResetV2(readerHandle: readerHandle) + } + + /// Restore a reader to a previously saved state. + /// + /// Not all Readers support being restored, so this can produce an + /// Unimplemented error. + /// + /// - Parameters: + /// - reader_handle: Handle to a Reader. + /// - state: Result of a ReaderSerializeState of a Reader with type + /// matching reader_handle. + @inlinable @inline(__always) + public static func readerRestoreStateV2( + readerHandle: ResourceHandle, + state: StringTensor + ) { + _RawTFEager.readerRestoreStateV2(readerHandle: readerHandle, state: state) + } + + /// Produce a string tensor that encodes the state of a Reader. + /// + /// Not all Readers support being serialized, so this can produce an + /// Unimplemented error. + /// + /// - Parameter reader_handle: Handle to a Reader. + @inlinable @inline(__always) + public static func readerSerializeStateV2( + readerHandle: ResourceHandle + ) -> StringTensor { + _RawTFEager.readerSerializeStateV2(readerHandle: readerHandle) + } + + /// Returns the real part of a complex number. + /// + /// Given a tensor `input` of complex numbers, this operation returns a tensor of + /// type `float` that is the real part of each element in `input`. All elements in + /// `input` must be complex numbers of the form \\(a + bj\\), where *a* is the real + /// part returned by this operation and *b* is the imaginary part. + /// + /// For example: + /// + /// ``` + /// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + /// tf.real(input) ==> [-2.25, 3.25] + /// ``` + @inlinable @inline(__always) + public static func real< + T: TensorFlowScalar, + Tout: FloatingPoint & TensorFlowScalar + >( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.real(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.real(input) + } + + } + + /// Returns x / y element-wise for real types. + /// + /// If `x` and `y` are reals, this will return the floating-point division. + /// + /// *NOTE*: `Div` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + @inlinable @inline(__always) + public static func realDiv( + _ x: Tensor, + _ y: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + let output_device = y.device + let x = Tensor(copying: x, to: .defaultTFEager) + let y = Tensor(copying: y, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.realDiv(x, y), to: output_device) + case .TF_EAGER: + return _RawTFEager.realDiv(x, y) + } + + } + + /// Creates a dataset that changes the batch size. + /// + /// Creates a dataset that changes the batch size of the dataset to current batch + /// size // num_workers. + /// + /// - Parameters: + /// - input_dataset: A variant tensor representing the input dataset. + /// - num_replicas: A scalar representing the number of replicas to distribute this batch across. As + /// a result of this transformation the current batch size would end up being + /// divided by this parameter. + @inlinable @inline(__always) + public static func rebatchDataset( + inputDataset: VariantHandle, + numReplicas: Tensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?], + useFallback: Bool = true + ) -> VariantHandle { + _RawTFEager.rebatchDataset( + inputDataset: inputDataset, numReplicas: numReplicas, outputTypes: outputTypes, + outputShapes: outputShapes, useFallback: useFallback) + } + + /// Computes the reciprocal of x element-wise. + /// + /// I.e., \\(y = 1 / x\\). + @inlinable @inline(__always) + public static func reciprocal( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + let output_device = x.device + let x = Tensor(copying: x, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.reciprocal(x), to: output_device) + case .TF_EAGER: + return _RawTFEager.reciprocal(x) + } + + } + + /// Computes the gradient for the inverse of `x` wrt its input. + /// + /// Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy` + /// is the corresponding input gradient. + @inlinable @inline(__always) + public static func reciprocalGrad( + _ y: Tensor, + dy: Tensor + ) -> Tensor { + switch commonBackend(y.handle.backend, dy.handle.backend) { + case .XLA: + let output_device = dy.device + let y = Tensor(copying: y, to: .defaultTFEager) + let dy = Tensor(copying: dy, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.reciprocalGrad(y, dy: dy), to: output_device) + case .TF_EAGER: + return _RawTFEager.reciprocalGrad(y, dy: dy) + } + + } + + /// Emits randomized records. + /// + /// - Attrs: + /// - file_pattern: Glob pattern for the data files. + /// - file_random_seed: Random seeds used to produce randomized records. + /// - file_shuffle_shift_ratio: Shifts the list of files after the list is randomly + /// shuffled. + /// - file_buffer_size: The randomization shuffling buffer. + /// - file_parallelism: How many sstables are opened and concurrently iterated over. + /// - batch_size: The batch size. + /// - compression_type: The type of compression for the file. Currently ZLIB and + /// GZIP are supported. Defaults to none. + /// + /// - Output records: A tensor of shape [batch_size]. + @inlinable @inline(__always) + public static func recordInput( + filePattern: String, + fileRandomSeed: Int64 = 301, + fileShuffleShiftRatio: Double = 0, + fileBufferSize: Int64 = 10000, + fileParallelism: Int64 = 16, + batchSize: Int64 = 32, + compressionType: String + ) -> StringTensor { + _RawTFEager.recordInput( + filePattern: filePattern, fileRandomSeed: fileRandomSeed, + fileShuffleShiftRatio: fileShuffleShiftRatio, fileBufferSize: fileBufferSize, + fileParallelism: fileParallelism, batchSize: batchSize, compressionType: compressionType) + } + + /// Receives the named tensor from send_device on recv_device. + /// + /// - Attrs: + /// - tensor_name: The name of the tensor to receive. + /// - send_device: The name of the device sending the tensor. + /// - send_device_incarnation: The current incarnation of send_device. + /// - recv_device: The name of the device receiving the tensor. + /// - client_terminated: If set to true, this indicates that the node was added + /// to the graph as a result of a client-side feed or fetch of Tensor data, + /// in which case the corresponding send or recv is expected to be managed + /// locally by the caller. + /// + /// - Output tensor: The tensor to receive. + @inlinable @inline(__always) + public static func recv( + tensorName: String, + sendDevice: String, + sendDeviceIncarnation: Int64, + recvDevice: String, + clientTerminated: Bool = false + ) -> Tensor { + _RawTFEager.recv( + tensorName: tensorName, sendDevice: sendDevice, + sendDeviceIncarnation: sendDeviceIncarnation, recvDevice: recvDevice, + clientTerminated: clientTerminated) + } + + /// An op that receives embedding activations on the TPU. + /// + /// The TPU system performs the embedding lookups and aggregations specified by + /// the arguments to TPUEmbeddingEnqueue(Integer/Sparse/SparseTensor)Batch. The + /// results of these aggregations are visible to the Tensorflow Graph as the + /// outputs of a RecvTPUEmbeddingActivations op. This op returns a list containing + /// one Tensor of activations per table specified in the model. There can be at + /// most one RecvTPUEmbeddingActivations op in the TPU graph. + /// + /// - Attrs: + /// - num_outputs: The number of output activation tensors, equal to the number of + /// embedding tables in the model. + /// - config: Serialized TPUEmbeddingConfiguration proto. + /// + /// - Output outputs: A TensorList of embedding activations containing one Tensor per + /// embedding table in the model. + @inlinable @inline(__always) + public static func recvTPUEmbeddingActivations( + numOutputs: Int64, + config: String + ) -> [Tensor] { + _RawTFEager.recvTPUEmbeddingActivations(numOutputs: numOutputs, config: config) + } + + /// Reduces the input dataset to a singleton using a reduce function. + /// + /// - Parameters: + /// - input_dataset: A variant tensor representing the input dataset. + /// - initial_state: A nested structure of tensors, representing the initial state of the + /// transformation. + /// + /// - Attr f: A function that maps `(old_state, input_element)` to `new_state`. It must take + /// two arguments and return a nested structures of tensors. The structure of + /// `new_state` must match the structure of `initial_state`. + @inlinable @inline(__always) + public static func reduceDataset< + FIn: TensorGroup, + FOut: TensorGroup, + Tstate: TensorArrayProtocol, + Targuments: TensorArrayProtocol, + OutputTypes: TensorGroup + >( + inputDataset: VariantHandle, + initialState: Tstate, + otherArguments: Targuments, + f: (FIn) -> FOut, + outputShapes: [TensorShape?], + useInterOpParallelism: Bool = true + ) -> OutputTypes { + _RawTFEager.reduceDataset( + inputDataset: inputDataset, initialState: initialState, otherArguments: otherArguments, + f: f, outputShapes: outputShapes, useInterOpParallelism: useInterOpParallelism) + } + + /// Joins a string Tensor across the given dimensions. + /// + /// Computes the string join across dimensions in the given string Tensor of shape + /// `[\\(d_0, d_1, ..., d_{n-1}\\)]`. Returns a new Tensor created by joining the input + /// strings with the given separator (default: empty string). Negative indices are + /// counted backwards from the end, with `-1` being equivalent to `n - 1`. If + /// indices are not specified, joins across all dimensions beginning from `n - 1` + /// through `0`. + /// + /// For example: + /// + /// ```python + /// # tensor `a` is [["a", "b"], ["c", "d"]] + /// tf.reduce_join(a, 0) ==> ["ac", "bd"] + /// tf.reduce_join(a, 1) ==> ["ab", "cd"] + /// tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"] + /// tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"] + /// tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]] + /// tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]] + /// tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"] + /// tf.reduce_join(a, [0, 1]) ==> "acbd" + /// tf.reduce_join(a, [1, 0]) ==> "abcd" + /// tf.reduce_join(a, []) ==> [["a", "b"], ["c", "d"]] + /// tf.reduce_join(a) = tf.reduce_join(a, [1, 0]) ==> "abcd" + /// ``` + /// + /// - Parameters: + /// - inputs: The input to be joined. All reduced indices must have non-zero size. + /// - reduction_indices: The dimensions to reduce over. Dimensions are reduced in the + /// order specified. Omitting `reduction_indices` is equivalent to passing + /// `[n-1, n-2, ..., 0]`. Negative indices from `-n` to `-1` are supported. + /// + /// - Attrs: + /// - keep_dims: If `True`, retain reduced dimensions with length `1`. + /// - separator: The separator to use when joining. + /// + /// - Output output: Has shape equal to that of the input with reduced dimensions removed or + /// set to `1` depending on `keep_dims`. + @inlinable @inline(__always) + public static func reduceJoin( + inputs: StringTensor, + reductionIndices: Tensor, + keepDims: Bool = false, + separator: String + ) -> StringTensor { + _RawTFEager.reduceJoin( + inputs: inputs, reductionIndices: reductionIndices, keepDims: keepDims, separator: separator + ) + } + + /// Check if the input matches the regex pattern. + /// + /// The input is a string tensor of any shape. The pattern is a scalar + /// string tensor which is applied to every element of the input tensor. + /// The boolean values (True or False) of the output tensor indicate + /// if the input matches the regex pattern provided. + /// + /// The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) + /// + /// Examples: + /// + /// >>> tf.strings.regex_full_match(["TF lib", "lib TF"], ".*lib$") + /// + /// >>> tf.strings.regex_full_match(["TF lib", "lib TF"], ".*TF$") + /// + /// + /// - Parameters: + /// - input: A string tensor of the text to be processed. + /// - pattern: A scalar string tensor containing the regular expression to match the input. + /// + /// - Output output: A bool tensor with the same shape as `input`. + @inlinable @inline(__always) + public static func regexFullMatch( + _ input: StringTensor, + pattern: StringTensor + ) -> Tensor { + _RawTFEager.regexFullMatch(input, pattern: pattern) + } + + /// Replaces matches of the `pattern` regular expression in `input` with the + /// replacement string provided in `rewrite`. + /// + /// It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) + /// + /// - Parameters: + /// - input: The text to be processed. + /// - pattern: The regular expression to be matched in the `input` strings. + /// - rewrite: The rewrite string to be substituted for the `pattern` expression where it is + /// matched in the `input` strings. + /// + /// - Attr replace_global: If True, the replacement is global (that is, all matches of the `pattern` regular + /// expression in each input string are rewritten), otherwise the `rewrite` + /// substitution is only made for the first `pattern` match. + /// + /// - Output output: The text after applying pattern match and rewrite substitution. + @inlinable @inline(__always) + public static func regexReplace( + _ input: StringTensor, + pattern: StringTensor, + rewrite: StringTensor, + replaceGlobal: Bool = true + ) -> StringTensor { + _RawTFEager.regexReplace( + input, pattern: pattern, rewrite: rewrite, replaceGlobal: replaceGlobal) + } + + /// Computes rectified linear: `max(features, 0)`. + /// + /// See: https://en.wikipedia.org/wiki/Rectifier_(neural_networks) + /// Example usage: + /// >>> tf.nn.relu([-2., 0., -0., 3.]).numpy() + /// array([ 0., 0., -0., 3.], dtype=float32) + @inlinable @inline(__always) + public static func relu( + features: Tensor + ) -> Tensor { + switch features.handle.backend { + case .XLA: + return _RawXLA.relu(features: features) + case .TF_EAGER: + return _RawTFEager.relu(features: features) + } + + } + + /// Computes rectified linear 6: `min(max(features, 0), 6)`. + @inlinable @inline(__always) + public static func relu6( + features: Tensor + ) -> Tensor { + switch features.handle.backend { + case .XLA: + return _RawXLA.relu6(features: features) + case .TF_EAGER: + return _RawTFEager.relu6(features: features) + } + + } + + /// Computes rectified linear 6 gradients for a Relu6 operation. + /// + /// - Parameters: + /// - gradients: The backpropagated gradients to the corresponding Relu6 operation. + /// - features: The features passed as input to the corresponding Relu6 operation, or + /// its output; using either one produces the same result. + /// + /// - Output backprops: The gradients: + /// `gradients * (features > 0) * (features < 6)`. + @inlinable @inline(__always) + public static func relu6Grad( + gradients: Tensor, + features: Tensor + ) -> Tensor { + switch commonBackend(gradients.handle.backend, features.handle.backend) { + case .XLA: + return _RawXLA.relu6Grad(gradients: gradients, features: features) + case .TF_EAGER: + return _RawTFEager.relu6Grad(gradients: gradients, features: features) + } + + } + + /// Computes rectified linear gradients for a Relu operation. + /// + /// - Parameters: + /// - gradients: The backpropagated gradients to the corresponding Relu operation. + /// - features: The features passed as input to the corresponding Relu operation, OR + /// the outputs of that operation (both work equivalently). + /// + /// - Output backprops: `gradients * (features > 0)`. + @inlinable @inline(__always) + public static func reluGrad( + gradients: Tensor, + features: Tensor + ) -> Tensor { + switch commonBackend(gradients.handle.backend, features.handle.backend) { + case .XLA: + return _RawXLA.reluGrad(gradients: gradients, features: features) + case .TF_EAGER: + return _RawTFEager.reluGrad(gradients: gradients, features: features) + } + + } + + /// Runs function `f` on a remote device indicated by `target`. + /// + /// - Parameters: + /// - target: A fully specified device name where we want to run the function. + /// - args: A list of arguments for the function. + /// + /// - Attrs: + /// - Tin: The type list for the arguments. + /// - Tout: The type list for the return values. + /// - f: The function to run remotely. + /// + /// - Output output: A list of return values. + @inlinable @inline(__always) + public static func remoteCall< + Tin: TensorArrayProtocol, + Tout: TensorGroup, + FIn: TensorGroup, + FOut: TensorGroup + >( + target: StringTensor, + args: Tin, + f: (FIn) -> FOut + ) -> Tout { + _RawTFEager.remoteCall(target: target, args: args, f: f) + } + + /// Execute a sub graph on a remote processor. + /// + /// The graph specifications(such as graph itself, input tensors and output names) + /// are stored as a serialized protocol buffer of RemoteFusedGraphExecuteInfo + /// as serialized_remote_fused_graph_execute_info. + /// The specifications will be passed to a dedicated registered + /// remote fused graph executor. The executor will send the graph specifications + /// to a remote processor and execute that graph. The execution results + /// will be passed to consumer nodes as outputs of this node. + /// + /// - Parameter inputs: Arbitrary number of tensors with arbitrary data types + /// + /// - Attr serialized_remote_fused_graph_execute_info: Serialized protocol buffer + /// of RemoteFusedGraphExecuteInfo which contains graph specifications. + /// + /// - Output outputs: Arbitrary number of tensors with arbitrary data types + @inlinable @inline(__always) + public static func remoteFusedGraphExecute< + Tinputs: TensorArrayProtocol, + Toutputs: TensorGroup + >( + inputs: Tinputs, + serializedRemoteFusedGraphExecuteInfo: String + ) -> Toutputs { + _RawTFEager.remoteFusedGraphExecute( + inputs: inputs, serializedRemoteFusedGraphExecuteInfo: serializedRemoteFusedGraphExecuteInfo + ) + } + + /// Creates a dataset that emits the outputs of `input_dataset` `count` times. + /// + /// - Parameter count: A scalar representing the number of times that `input_dataset` should + /// be repeated. A value of `-1` indicates that it should be repeated infinitely. + @inlinable @inline(__always) + public static func repeatDataset( + inputDataset: VariantHandle, + count: Tensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.repeatDataset( + inputDataset: inputDataset, count: count, outputTypes: outputTypes, + outputShapes: outputShapes) + } + + /// Computes a range that covers the actual values present in a quantized tensor. + /// + /// Given a quantized tensor described by `(input, input_min, input_max)`, outputs a + /// range that covers the actual values present in that tensor. This op is typically + /// used to produce the `requested_output_min` and `requested_output_max` for + /// `Requantize`. + /// + /// - Parameters: + /// - input_min: The float value that the minimum quantized input value represents. + /// - input_max: The float value that the maximum quantized input value represents. + /// + /// - Attr Tinput: The type of the input. + /// + /// - Outputs: + /// - output_min: The computed min output. + /// - output_max: the computed max output. + @inlinable @inline(__always) + public static func requantizationRange( + _ input: Tensor, + inputMin: Tensor, + inputMax: Tensor + ) -> (outputMin: Tensor, outputMax: Tensor) { + _RawTFEager.requantizationRange(input, inputMin: inputMin, inputMax: inputMax) + } + + /// Computes requantization range per channel. + /// + /// - Parameters: + /// - input: The original input tensor. + /// - input_min: The minimum value of the input tensor + /// - input_max: The maximum value of the input tensor. + /// + /// - Attrs: + /// - T: The quantized type of input tensor that needs to be converted. + /// - clip_value_max: The maximum value of the output that needs to be clipped. + /// Example: set this to 6 for Relu6. + /// + /// - Outputs: + /// - output_min: The minimum value of the final output tensor + /// - output_max: The maximum value of the final output tensor. + @inlinable @inline(__always) + public static func requantizationRangePerChannel( + _ input: Tensor, + inputMin: Tensor, + inputMax: Tensor, + clipValueMax: Double + ) -> (outputMin: Tensor, outputMax: Tensor) { + _RawTFEager.requantizationRangePerChannel( + input, inputMin: inputMin, inputMax: inputMax, clipValueMax: clipValueMax) + } + + /// Converts the quantized `input` tensor into a lower-precision `output`. + /// + /// Converts the quantized `input` tensor into a lower-precision `output`, using the + /// output range specified with `requested_output_min` and `requested_output_max`. + /// + /// `[input_min, input_max]` are scalar floats that specify the range for the float + /// interpretation of the `input` data. For example, if `input_min` is -1.0f and + /// `input_max` is 1.0f, and we are dealing with `quint16` quantized data, then a 0 + /// value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. + /// + /// - Parameters: + /// - input_min: The float value that the minimum quantized input value represents. + /// - input_max: The float value that the maximum quantized input value represents. + /// - requested_output_min: The float value that the minimum quantized output value represents. + /// - requested_output_max: The float value that the maximum quantized output value represents. + /// + /// - Attrs: + /// - Tinput: The type of the input. + /// - out_type: The type of the output. Should be a lower bit depth than Tinput. + /// + /// - Outputs: + /// - output_min: The requested_output_min value is copied into this output. + /// - output_max: The requested_output_max value is copied into this output. + @inlinable @inline(__always) + public static func requantize< + Tinput: TensorFlowScalar, + OutType: TensorFlowScalar + >( + _ input: Tensor, + inputMin: Tensor, + inputMax: Tensor, + requestedOutputMin: Tensor, + requestedOutputMax: Tensor + ) -> (output: Tensor, outputMin: Tensor, outputMax: Tensor) { + _RawTFEager.requantize( + input, inputMin: inputMin, inputMax: inputMax, requestedOutputMin: requestedOutputMin, + requestedOutputMax: requestedOutputMax) + } + + /// Requantizes input with min and max values known per channel. + /// + /// - Parameters: + /// - input: The original input tensor. + /// - input_min: The minimum value of the input tensor + /// - input_max: The maximum value of the input tensor. + /// - requested_output_min: The minimum value of the output tensor requested. + /// - requested_output_max: The maximum value of the output tensor requested. + /// + /// - Attrs: + /// - T: The quantized type of input tensor that needs to be converted. + /// - out_type: The quantized type of output tensor that needs to be converted. + /// + /// - Outputs: + /// - output: Output tensor. + /// - output_min: The minimum value of the final output tensor + /// - output_max: The maximum value of the final output tensor. + @inlinable @inline(__always) + public static func requantizePerChannel< + T: TensorFlowScalar, + OutType: TensorFlowScalar + >( + _ input: Tensor, + inputMin: Tensor, + inputMax: Tensor, + requestedOutputMin: Tensor, + requestedOutputMax: Tensor + ) -> (output: Tensor, outputMin: Tensor, outputMax: Tensor) { + _RawTFEager.requantizePerChannel( + input, inputMin: inputMin, inputMax: inputMax, requestedOutputMin: requestedOutputMin, + requestedOutputMax: requestedOutputMax) + } + + @inlinable @inline(__always) + public static func requiresOlderGraphVersion() -> Tensor { + _RawTFEager.requiresOlderGraphVersion() + } + + @inlinable @inline(__always) + public static func reservedAttr( + range: Int64 + ) { + _RawTFEager.reservedAttr(range: range) + } + + @inlinable @inline(__always) + public static func reservedInput( + _ input: Tensor + ) { + _RawTFEager.reservedInput(input) + } + + /// Reshapes a tensor. + /// + /// Given `tensor`, this operation returns a tensor that has the same values + /// as `tensor` with shape `shape`. + /// + /// If one component of 1-D tensor `shape` is the special value -1, the size of that + /// dimension is computed so that the total size remains constant. In particular, a + /// `shape` of `[-1]` flattens into 1-D. At most one component of `shape` may be + /// unknown. + /// + /// The `shape` must be 1-D and the operation returns a tensor with shape + /// `shape` filled with the values of `tensor`. In this case, the number of elements + /// implied by `shape` must be the same as the number of elements in `tensor`. + /// + /// It is an error if `shape` is not 1-D. + /// + /// For example: + /// + /// ``` + /// # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9] + /// # tensor 't' has shape [9] + /// reshape(t, [3, 3]) ==> [[1, 2, 3], + /// [4, 5, 6], + /// [7, 8, 9]] + /// + /// # tensor 't' is [[[1, 1], [2, 2]], + /// # [[3, 3], [4, 4]]] + /// # tensor 't' has shape [2, 2, 2] + /// reshape(t, [2, 4]) ==> [[1, 1, 2, 2], + /// [3, 3, 4, 4]] + /// + /// # tensor 't' is [[[1, 1, 1], + /// # [2, 2, 2]], + /// # [[3, 3, 3], + /// # [4, 4, 4]], + /// # [[5, 5, 5], + /// # [6, 6, 6]]] + /// # tensor 't' has shape [3, 2, 3] + /// # pass '[-1]' to flatten 't' + /// reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6] + /// + /// # -1 can also be used to infer the shape + /// + /// # -1 is inferred to be 9: + /// reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], + /// [4, 4, 4, 5, 5, 5, 6, 6, 6]] + /// # -1 is inferred to be 2: + /// reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], + /// [4, 4, 4, 5, 5, 5, 6, 6, 6]] + /// # -1 is inferred to be 3: + /// reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1], + /// [2, 2, 2], + /// [3, 3, 3]], + /// [[4, 4, 4], + /// [5, 5, 5], + /// [6, 6, 6]]] + /// + /// # tensor 't' is [7] + /// # shape `[]` reshapes to a scalar + /// reshape(t, []) ==> 7 + /// ``` + /// + /// - Parameter shape: Defines the shape of the output tensor. + @inlinable @inline(__always) + public static func reshape< + T: TensorFlowScalar, + Tshape: TensorFlowIndex + >( + _ tensor: Tensor, + shape: Tensor + ) -> Tensor { + switch commonBackend(tensor.handle.backend, shape.handle.backend) { + case .XLA: + return _RawXLA.reshape(tensor, shape: shape) + case .TF_EAGER: + return _RawTFEager.reshape(tensor, shape: shape) + } + + } + + /// Resize `images` to `size` using area interpolation. + /// + /// Input images can be of different types but output images are always float. + /// + /// The range of pixel values for the output image might be slightly different + /// from the range for the input image because of limited numerical precision. + /// To guarantee an output range, for example `[0.0, 1.0]`, apply + /// `tf.clip_by_value` to the output. + /// + /// Each output pixel is computed by first transforming the pixel's footprint into + /// the input tensor and then averaging the pixels that intersect the footprint. An + /// input pixel's contribution to the average is weighted by the fraction of its + /// area that intersects the footprint. This is the same as OpenCV's INTER_AREA. + /// + /// - Parameters: + /// - images: 4-D with shape `[batch, height, width, channels]`. + /// - size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + /// new size for the images. + /// + /// - Attr align_corners: If true, the centers of the 4 corner pixels of the input and output tensors are + /// aligned, preserving the values at the corner pixels. Defaults to false. + /// + /// - Output resized_images: 4-D with shape + /// `[batch, new_height, new_width, channels]`. + @inlinable @inline(__always) + public static func resizeArea( + images: Tensor, + size: Tensor, + alignCorners: Bool = false + ) -> Tensor { + switch commonBackend(images.handle.backend, size.handle.backend) { + case .XLA: + let output_device = size.device + let images = Tensor(copying: images, to: .defaultTFEager) + let size = Tensor(copying: size, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.resizeArea(images: images, size: size, alignCorners: alignCorners), + to: output_device) + case .TF_EAGER: + return _RawTFEager.resizeArea(images: images, size: size, alignCorners: alignCorners) + } + + } + + /// Resize `images` to `size` using bicubic interpolation. + /// + /// Input images can be of different types but output images are always float. + /// + /// - Parameters: + /// - images: 4-D with shape `[batch, height, width, channels]`. + /// - size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + /// new size for the images. + /// + /// - Attr align_corners: If true, the centers of the 4 corner pixels of the input and output tensors are + /// aligned, preserving the values at the corner pixels. Defaults to false. + /// + /// - Output resized_images: 4-D with shape + /// `[batch, new_height, new_width, channels]`. + @inlinable @inline(__always) + public static func resizeBicubic( + images: Tensor, + size: Tensor, + alignCorners: Bool = false, + halfPixelCenters: Bool = false + ) -> Tensor { + switch commonBackend(images.handle.backend, size.handle.backend) { + case .XLA: + let output_device = size.device + let images = Tensor(copying: images, to: .defaultTFEager) + let size = Tensor(copying: size, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.resizeBicubic( + images: images, size: size, alignCorners: alignCorners, + halfPixelCenters: halfPixelCenters), to: output_device) + case .TF_EAGER: + return _RawTFEager.resizeBicubic( + images: images, size: size, alignCorners: alignCorners, halfPixelCenters: halfPixelCenters + ) + } + + } + + /// Computes the gradient of bicubic interpolation. + /// + /// - Parameters: + /// - grads: 4-D with shape `[batch, height, width, channels]`. + /// - original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`, + /// The image tensor that was resized. + /// + /// - Attr align_corners: If true, the centers of the 4 corner pixels of the input and grad tensors are + /// aligned. Defaults to false. + /// + /// - Output output: 4-D with shape `[batch, orig_height, orig_width, channels]`. + /// Gradients with respect to the input image. Input image must have been + /// float or double. + @inlinable @inline(__always) + public static func resizeBicubicGrad( + grads: Tensor, + originalImage: Tensor, + alignCorners: Bool = false, + halfPixelCenters: Bool = false + ) -> Tensor { + switch commonBackend(grads.handle.backend, originalImage.handle.backend) { + case .XLA: + let output_device = originalImage.device + let grads = Tensor(copying: grads, to: .defaultTFEager) + let originalImage = Tensor(copying: originalImage, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.resizeBicubicGrad( + grads: grads, originalImage: originalImage, alignCorners: alignCorners, + halfPixelCenters: halfPixelCenters), to: output_device) + case .TF_EAGER: + return _RawTFEager.resizeBicubicGrad( + grads: grads, originalImage: originalImage, alignCorners: alignCorners, + halfPixelCenters: halfPixelCenters) + } + + } + + /// Resize `images` to `size` using bilinear interpolation. + /// + /// Input images can be of different types but output images are always float. + /// + /// - Parameters: + /// - images: 4-D with shape `[batch, height, width, channels]`. + /// - size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + /// new size for the images. + /// + /// - Attr align_corners: If true, the centers of the 4 corner pixels of the input and output tensors are + /// aligned, preserving the values at the corner pixels. Defaults to false. + /// + /// - Output resized_images: 4-D with shape + /// `[batch, new_height, new_width, channels]`. + @inlinable @inline(__always) + public static func resizeBilinear( + images: Tensor, + size: Tensor, + alignCorners: Bool = false, + halfPixelCenters: Bool = false + ) -> Tensor { + switch commonBackend(images.handle.backend, size.handle.backend) { + case .XLA: + let output_device = size.device + let images = Tensor(copying: images, to: .defaultTFEager) + let size = Tensor(copying: size, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.resizeBilinear( + images: images, size: size, alignCorners: alignCorners, + halfPixelCenters: halfPixelCenters), to: output_device) + case .TF_EAGER: + return _RawTFEager.resizeBilinear( + images: images, size: size, alignCorners: alignCorners, halfPixelCenters: halfPixelCenters + ) + } + + } + + /// Computes the gradient of bilinear interpolation. + /// + /// - Parameters: + /// - grads: 4-D with shape `[batch, height, width, channels]`. + /// - original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`, + /// The image tensor that was resized. + /// + /// - Attr align_corners: If true, the centers of the 4 corner pixels of the input and grad tensors are + /// aligned. Defaults to false. + /// + /// - Output output: 4-D with shape `[batch, orig_height, orig_width, channels]`. + /// Gradients with respect to the input image. Input image must have been + /// float or double. + @inlinable @inline(__always) + public static func resizeBilinearGrad( + grads: Tensor, + originalImage: Tensor, + alignCorners: Bool = false, + halfPixelCenters: Bool = false + ) -> Tensor { + switch commonBackend(grads.handle.backend, originalImage.handle.backend) { + case .XLA: + let output_device = originalImage.device + let grads = Tensor(copying: grads, to: .defaultTFEager) + let originalImage = Tensor(copying: originalImage, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.resizeBilinearGrad( + grads: grads, originalImage: originalImage, alignCorners: alignCorners, + halfPixelCenters: halfPixelCenters), to: output_device) + case .TF_EAGER: + return _RawTFEager.resizeBilinearGrad( + grads: grads, originalImage: originalImage, alignCorners: alignCorners, + halfPixelCenters: halfPixelCenters) + } + + } + + /// Resize `images` to `size` using nearest neighbor interpolation. + /// + /// - Parameters: + /// - images: 4-D with shape `[batch, height, width, channels]`. + /// - size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + /// new size for the images. + /// + /// - Attr align_corners: If true, the centers of the 4 corner pixels of the input and output tensors are + /// aligned, preserving the values at the corner pixels. Defaults to false. + /// + /// - Output resized_images: 4-D with shape + /// `[batch, new_height, new_width, channels]`. + @inlinable @inline(__always) + public static func resizeNearestNeighbor( + images: Tensor, + size: Tensor, + alignCorners: Bool = false, + halfPixelCenters: Bool = false + ) -> Tensor { + switch commonBackend(images.handle.backend, size.handle.backend) { + case .XLA: + let output_device = size.device + let images = Tensor(copying: images, to: .defaultTFEager) + let size = Tensor(copying: size, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.resizeNearestNeighbor( + images: images, size: size, alignCorners: alignCorners, + halfPixelCenters: halfPixelCenters), to: output_device) + case .TF_EAGER: + return _RawTFEager.resizeNearestNeighbor( + images: images, size: size, alignCorners: alignCorners, halfPixelCenters: halfPixelCenters + ) + } + + } + + /// Computes the gradient of nearest neighbor interpolation. + /// + /// - Parameters: + /// - grads: 4-D with shape `[batch, height, width, channels]`. + /// - size: = A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The + /// original input size. + /// + /// - Attr align_corners: If true, the centers of the 4 corner pixels of the input and grad tensors are + /// aligned. Defaults to false. + /// + /// - Output output: 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients + /// with respect to the input image. + @inlinable @inline(__always) + public static func resizeNearestNeighborGrad( + grads: Tensor, + size: Tensor, + alignCorners: Bool = false, + halfPixelCenters: Bool = false + ) -> Tensor { + switch commonBackend(grads.handle.backend, size.handle.backend) { + case .XLA: + let output_device = size.device + let grads = Tensor(copying: grads, to: .defaultTFEager) + let size = Tensor(copying: size, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.resizeNearestNeighborGrad( + grads: grads, size: size, alignCorners: alignCorners, halfPixelCenters: halfPixelCenters + ), to: output_device) + case .TF_EAGER: + return _RawTFEager.resizeNearestNeighborGrad( + grads: grads, size: size, alignCorners: alignCorners, halfPixelCenters: halfPixelCenters) + } + + } + + /// Applies a gradient to a given accumulator. + /// + /// Does not add if local_step is lesser than the accumulator's global_step. + /// + /// - Parameters: + /// - handle: The handle to a accumulator. + /// - local_step: The local_step value at which the gradient was computed. + /// - gradient: A tensor of the gradient to be accumulated. + /// + /// - Attr dtype: The data type of accumulated gradients. Needs to correspond to the type + /// of the accumulator. + @inlinable @inline(__always) + public static func resourceAccumulatorApplyGradient( + handle: ResourceHandle, + localStep: Tensor, + gradient: Tensor + ) { + _RawTFEager.resourceAccumulatorApplyGradient( + handle: handle, localStep: localStep, gradient: gradient) + } + + /// Returns the number of gradients aggregated in the given accumulators. + /// + /// - Parameter handle: The handle to an accumulator. + /// + /// - Output num_accumulated: The number of gradients aggregated in the given accumulator. + @inlinable @inline(__always) + public static func resourceAccumulatorNumAccumulated( + handle: ResourceHandle + ) -> Tensor { + _RawTFEager.resourceAccumulatorNumAccumulated(handle: handle) + } + + /// Updates the accumulator with a new value for global_step. + /// + /// Logs warning if the accumulator's value is already higher than + /// new_global_step. + /// + /// - Parameters: + /// - handle: The handle to an accumulator. + /// - new_global_step: The new global_step value to set. + @inlinable @inline(__always) + public static func resourceAccumulatorSetGlobalStep( + handle: ResourceHandle, + newGlobalStep: Tensor + ) { + _RawTFEager.resourceAccumulatorSetGlobalStep(handle: handle, newGlobalStep: newGlobalStep) + } + + /// Extracts the average gradient in the given ConditionalAccumulator. + /// + /// The op blocks until sufficient (i.e., more than num_required) + /// gradients have been accumulated. If the accumulator has already + /// aggregated more than num_required gradients, it returns the average of + /// the accumulated gradients. Also automatically increments the recorded + /// global_step in the accumulator by 1, and resets the aggregate to 0. + /// + /// - Parameters: + /// - handle: The handle to an accumulator. + /// - num_required: Number of gradients required before we return an aggregate. + /// + /// - Attr dtype: The data type of accumulated gradients. Needs to correspond to the type + /// of the accumulator. + /// + /// - Output average: The average of the accumulated gradients. + @inlinable @inline(__always) + public static func resourceAccumulatorTakeGradient( + handle: ResourceHandle, + numRequired: Tensor + ) -> Tensor { + switch numRequired.handle.backend { + case .XLA: + let output_device = numRequired.device + let numRequired = Tensor(copying: numRequired, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.resourceAccumulatorTakeGradient( + handle: handle, numRequired: numRequired), to: output_device) + case .TF_EAGER: + return _RawTFEager.resourceAccumulatorTakeGradient(handle: handle, numRequired: numRequired) + } + + } + + /// Update '*var' according to the AdaMax algorithm. + /// + /// m_t <- beta1 * m_{t-1} + (1 - beta1) * g + /// v_t <- max(beta2 * v_{t-1}, abs(g)) + /// variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon) + /// + /// - Parameters: + /// - var: Should be from a Variable(). + /// - m: Should be from a Variable(). + /// - v: Should be from a Variable(). + /// - beta1_power: Must be a scalar. + /// - lr: Scaling factor. Must be a scalar. + /// - beta1: Momentum factor. Must be a scalar. + /// - beta2: Momentum factor. Must be a scalar. + /// - epsilon: Ridge term. Must be a scalar. + /// - grad: The gradient. + /// + /// - Attr use_locking: If `True`, updating of the var, m, and v tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + @inlinable @inline(__always) + public static func resourceApplyAdaMax( + var_: ResourceHandle, + m: ResourceHandle, + v: ResourceHandle, + beta1Power: Tensor, + lr: Tensor, + beta1: Tensor, + beta2: Tensor, + epsilon: Tensor, + grad: Tensor, + useLocking: Bool = false + ) { + _RawTFEager.resourceApplyAdaMax( + var_: var_, m: m, v: v, beta1Power: beta1Power, lr: lr, beta1: beta1, beta2: beta2, + epsilon: epsilon, grad: grad, useLocking: useLocking) + } + + /// Update '*var' according to the adadelta scheme. + /// + /// accum = rho() * accum + (1 - rho()) * grad.square(); + /// update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; + /// update_accum = rho() * update_accum + (1 - rho()) * update.square(); + /// var -= update; + /// + /// - Parameters: + /// - var: Should be from a Variable(). + /// - accum: Should be from a Variable(). + /// - accum_update: Should be from a Variable(). + /// - lr: Scaling factor. Must be a scalar. + /// - rho: Decay factor. Must be a scalar. + /// - epsilon: Constant factor. Must be a scalar. + /// - grad: The gradient. + /// + /// - Attr use_locking: If True, updating of the var, accum and update_accum tensors will be protected by + /// a lock; otherwise the behavior is undefined, but may exhibit less contention. + @inlinable @inline(__always) + public static func resourceApplyAdadelta( + var_: ResourceHandle, + accum: ResourceHandle, + accumUpdate: ResourceHandle, + lr: Tensor, + rho: Tensor, + epsilon: Tensor, + grad: Tensor, + useLocking: Bool = false + ) { + _RawTFEager.resourceApplyAdadelta( + var_: var_, accum: accum, accumUpdate: accumUpdate, lr: lr, rho: rho, epsilon: epsilon, + grad: grad, useLocking: useLocking) + } + + /// Update '*var' according to the adagrad scheme. + /// + /// accum += grad * grad + /// var -= lr * grad * (1 / sqrt(accum)) + /// + /// - Parameters: + /// - var: Should be from a Variable(). + /// - accum: Should be from a Variable(). + /// - lr: Scaling factor. Must be a scalar. + /// - grad: The gradient. + /// + /// - Attr use_locking: If `True`, updating of the var and accum tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + @inlinable @inline(__always) + public static func resourceApplyAdagrad( + var_: ResourceHandle, + accum: ResourceHandle, + lr: Tensor, + grad: Tensor, + useLocking: Bool = false, + updateSlots: Bool = true + ) { + _RawTFEager.resourceApplyAdagrad( + var_: var_, accum: accum, lr: lr, grad: grad, useLocking: useLocking, + updateSlots: updateSlots) + } + + /// Update '*var' according to the proximal adagrad scheme. + /// + /// - Parameters: + /// - var: Should be from a Variable(). + /// - gradient_accumulator: Should be from a Variable(). + /// - gradient_squared_accumulator: Should be from a Variable(). + /// - grad: The gradient. + /// - lr: Scaling factor. Must be a scalar. + /// - l1: L1 regularization. Must be a scalar. + /// - l2: L2 regularization. Must be a scalar. + /// - global_step: Training step number. Must be a scalar. + /// + /// - Attr use_locking: If True, updating of the var and accum tensors will be protected by + /// a lock; otherwise the behavior is undefined, but may exhibit less contention. + @inlinable @inline(__always) + public static func resourceApplyAdagradDA( + var_: ResourceHandle, + gradientAccumulator: ResourceHandle, + gradientSquaredAccumulator: ResourceHandle, + grad: Tensor, + lr: Tensor, + l1: Tensor, + l2: Tensor, + globalStep: Tensor, + useLocking: Bool = false + ) { + _RawTFEager.resourceApplyAdagradDA( + var_: var_, gradientAccumulator: gradientAccumulator, + gradientSquaredAccumulator: gradientSquaredAccumulator, grad: grad, lr: lr, l1: l1, l2: l2, + globalStep: globalStep, useLocking: useLocking) + } + + /// Update '*var' according to the adagrad scheme. + /// + /// accum += grad * grad + /// var -= lr * grad * (1 / sqrt(accum)) + /// + /// - Parameters: + /// - var: Should be from a Variable(). + /// - accum: Should be from a Variable(). + /// - lr: Scaling factor. Must be a scalar. + /// - epsilon: Constant factor. Must be a scalar. + /// - grad: The gradient. + /// + /// - Attr use_locking: If `True`, updating of the var and accum tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + @inlinable @inline(__always) + public static func resourceApplyAdagradV2( + var_: ResourceHandle, + accum: ResourceHandle, + lr: Tensor, + epsilon: Tensor, + grad: Tensor, + useLocking: Bool = false, + updateSlots: Bool = true + ) { + _RawTFEager.resourceApplyAdagradV2( + var_: var_, accum: accum, lr: lr, epsilon: epsilon, grad: grad, useLocking: useLocking, + updateSlots: updateSlots) + } + + /// Update '*var' according to the Adam algorithm. + /// + /// $$\text{lr}_t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ + /// $$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$ + /// $$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$ + /// $$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{v_t} + \epsilon)$$ + /// + /// - Parameters: + /// - var: Should be from a Variable(). + /// - m: Should be from a Variable(). + /// - v: Should be from a Variable(). + /// - beta1_power: Must be a scalar. + /// - beta2_power: Must be a scalar. + /// - lr: Scaling factor. Must be a scalar. + /// - beta1: Momentum factor. Must be a scalar. + /// - beta2: Momentum factor. Must be a scalar. + /// - epsilon: Ridge term. Must be a scalar. + /// - grad: The gradient. + /// + /// - Attrs: + /// - use_locking: If `True`, updating of the var, m, and v tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// - use_nesterov: If `True`, uses the nesterov update. + @inlinable @inline(__always) + public static func resourceApplyAdam( + var_: ResourceHandle, + m: ResourceHandle, + v: ResourceHandle, + beta1Power: Tensor, + beta2Power: Tensor, + lr: Tensor, + beta1: Tensor, + beta2: Tensor, + epsilon: Tensor, + grad: Tensor, + useLocking: Bool = false, + useNesterov: Bool = false + ) { + _RawTFEager.resourceApplyAdam( + var_: var_, m: m, v: v, beta1Power: beta1Power, beta2Power: beta2Power, lr: lr, + beta1: beta1, beta2: beta2, epsilon: epsilon, grad: grad, useLocking: useLocking, + useNesterov: useNesterov) + } + + /// Update '*var' according to the Adam algorithm. + /// + /// $$\text{lr}_t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ + /// $$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$ + /// $$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$ + /// $$\hat{v}_t := max{\hat{v}_{t-1}, v_t}$$ + /// $$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{\hat{v}_t} + \epsilon)$$ + /// + /// - Parameters: + /// - var: Should be from a Variable(). + /// - m: Should be from a Variable(). + /// - v: Should be from a Variable(). + /// - vhat: Should be from a Variable(). + /// - beta1_power: Must be a scalar. + /// - beta2_power: Must be a scalar. + /// - lr: Scaling factor. Must be a scalar. + /// - beta1: Momentum factor. Must be a scalar. + /// - beta2: Momentum factor. Must be a scalar. + /// - epsilon: Ridge term. Must be a scalar. + /// - grad: The gradient. + /// + /// - Attr use_locking: If `True`, updating of the var, m, and v tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + @inlinable @inline(__always) + public static func resourceApplyAdamWithAmsgrad( + var_: ResourceHandle, + m: ResourceHandle, + v: ResourceHandle, + vhat: ResourceHandle, + beta1Power: Tensor, + beta2Power: Tensor, + lr: Tensor, + beta1: Tensor, + beta2: Tensor, + epsilon: Tensor, + grad: Tensor, + useLocking: Bool = false + ) { + _RawTFEager.resourceApplyAdamWithAmsgrad( + var_: var_, m: m, v: v, vhat: vhat, beta1Power: beta1Power, beta2Power: beta2Power, lr: lr, + beta1: beta1, beta2: beta2, epsilon: epsilon, grad: grad, useLocking: useLocking) + } + + /// Update '*var' according to the AddSign update. + /// + /// m_t <- beta1 * m_{t-1} + (1 - beta1) * g + /// update <- (alpha + sign_decay * sign(g) *sign(m)) * g + /// variable <- variable - lr_t * update + /// + /// - Parameters: + /// - var: Should be from a Variable(). + /// - m: Should be from a Variable(). + /// - lr: Scaling factor. Must be a scalar. + /// - alpha: Must be a scalar. + /// - sign_decay: Must be a scalar. + /// - beta: Must be a scalar. + /// - grad: The gradient. + /// + /// - Attr use_locking: If `True`, updating of the var and m tensors is + /// protected by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + @inlinable @inline(__always) + public static func resourceApplyAddSign( + var_: ResourceHandle, + m: ResourceHandle, + lr: Tensor, + alpha: Tensor, + signDecay: Tensor, + beta: Tensor, + grad: Tensor, + useLocking: Bool = false + ) { + _RawTFEager.resourceApplyAddSign( + var_: var_, m: m, lr: lr, alpha: alpha, signDecay: signDecay, beta: beta, grad: grad, + useLocking: useLocking) + } + + /// Update '*var' according to the centered RMSProp algorithm. + /// + /// The centered RMSProp algorithm uses an estimate of the centered second moment + /// (i.e., the variance) for normalization, as opposed to regular RMSProp, which + /// uses the (uncentered) second moment. This often helps with training, but is + /// slightly more expensive in terms of computation and memory. + /// + /// Note that in dense implementation of this algorithm, mg, ms, and mom will + /// update even if the grad is zero, but in this sparse implementation, mg, ms, + /// and mom will not update in iterations during which the grad is zero. + /// + /// mean_square = decay * mean_square + (1-decay) * gradient ** 2 + /// mean_grad = decay * mean_grad + (1-decay) * gradient + /// + /// Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) + /// + /// mg <- rho * mg_{t-1} + (1-rho) * grad + /// ms <- rho * ms_{t-1} + (1-rho) * grad * grad + /// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) + /// var <- var - mom + /// + /// - Parameters: + /// - var: Should be from a Variable(). + /// - mg: Should be from a Variable(). + /// - ms: Should be from a Variable(). + /// - mom: Should be from a Variable(). + /// - lr: Scaling factor. Must be a scalar. + /// - rho: Decay rate. Must be a scalar. + /// - epsilon: Ridge term. Must be a scalar. + /// - grad: The gradient. + /// + /// - Attr use_locking: If `True`, updating of the var, mg, ms, and mom tensors is + /// protected by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + @inlinable @inline(__always) + public static func resourceApplyCenteredRMSProp( + var_: ResourceHandle, + mg: ResourceHandle, + ms: ResourceHandle, + mom: ResourceHandle, + lr: Tensor, + rho: Tensor, + momentum: Tensor, + epsilon: Tensor, + grad: Tensor, + useLocking: Bool = false + ) { + _RawTFEager.resourceApplyCenteredRMSProp( + var_: var_, mg: mg, ms: ms, mom: mom, lr: lr, rho: rho, momentum: momentum, + epsilon: epsilon, grad: grad, useLocking: useLocking) + } + + /// Update '*var' according to the Ftrl-proximal scheme. + /// + /// accum_new = accum + grad * grad + /// linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + /// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + /// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + /// accum = accum_new + /// + /// - Parameters: + /// - var: Should be from a Variable(). + /// - accum: Should be from a Variable(). + /// - linear: Should be from a Variable(). + /// - grad: The gradient. + /// - lr: Scaling factor. Must be a scalar. + /// - l1: L1 regularization. Must be a scalar. + /// - l2: L2 regularization. Must be a scalar. + /// - lr_power: Scaling factor. Must be a scalar. + /// + /// - Attr use_locking: If `True`, updating of the var and accum tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + @inlinable @inline(__always) + public static func resourceApplyFtrl( + var_: ResourceHandle, + accum: ResourceHandle, + linear: ResourceHandle, + grad: Tensor, + lr: Tensor, + l1: Tensor, + l2: Tensor, + lrPower: Tensor, + useLocking: Bool = false + ) { + _RawTFEager.resourceApplyFtrl( + var_: var_, accum: accum, linear: linear, grad: grad, lr: lr, l1: l1, l2: l2, + lrPower: lrPower, useLocking: useLocking) + } + + /// Update '*var' according to the Ftrl-proximal scheme. + /// + /// grad_with_shrinkage = grad + 2 * l2_shrinkage * var + /// accum_new = accum + grad_with_shrinkage * grad_with_shrinkage + /// linear += grad_with_shrinkage + + /// (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + /// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + /// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + /// accum = accum_new + /// + /// - Parameters: + /// - var: Should be from a Variable(). + /// - accum: Should be from a Variable(). + /// - linear: Should be from a Variable(). + /// - grad: The gradient. + /// - lr: Scaling factor. Must be a scalar. + /// - l1: L1 regularization. Must be a scalar. + /// - l2: L2 shrinkage regularization. Must be a scalar. + /// - lr_power: Scaling factor. Must be a scalar. + /// + /// - Attr use_locking: If `True`, updating of the var and accum tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + @inlinable @inline(__always) + public static func resourceApplyFtrlV2( + var_: ResourceHandle, + accum: ResourceHandle, + linear: ResourceHandle, + grad: Tensor, + lr: Tensor, + l1: Tensor, + l2: Tensor, + l2Shrinkage: Tensor, + lrPower: Tensor, + useLocking: Bool = false + ) { + _RawTFEager.resourceApplyFtrlV2( + var_: var_, accum: accum, linear: linear, grad: grad, lr: lr, l1: l1, l2: l2, + l2Shrinkage: l2Shrinkage, lrPower: lrPower, useLocking: useLocking) + } + + /// Update '*var' by subtracting 'alpha' * 'delta' from it. + /// + /// - Parameters: + /// - var: Should be from a Variable(). + /// - alpha: Scaling factor. Must be a scalar. + /// - delta: The change. + /// + /// - Attr use_locking: If `True`, the subtraction will be protected by a lock; + /// otherwise the behavior is undefined, but may exhibit less contention. + @inlinable @inline(__always) + public static func resourceApplyGradientDescent( + var_: ResourceHandle, + alpha: Tensor, + delta: Tensor, + useLocking: Bool = false + ) { + _RawTFEager.resourceApplyGradientDescent( + var_: var_, alpha: alpha, delta: delta, useLocking: useLocking) + } + + /// Update '*var' according to the momentum scheme. + /// + /// Set use_nesterov = True if you want to use Nesterov momentum. + /// + /// accum = accum * momentum - lr * grad + /// var += accum + /// + /// - Parameters: + /// - var: Should be from a Variable(). + /// - accum: Should be from a Variable(). + /// - lr: Scaling factor. Must be a scalar. + /// - grad: The gradient. + /// - momentum: Momentum. Must be a scalar. + /// + /// - Attrs: + /// - use_locking: If `True`, updating of the var and accum tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// - use_nesterov: If `True`, the tensor passed to compute grad will be + /// var + momentum * accum, so in the end, the var you get is actually + /// var + momentum * accum. + @inlinable @inline(__always) + public static func resourceApplyKerasMomentum( + var_: ResourceHandle, + accum: ResourceHandle, + lr: Tensor, + grad: Tensor, + momentum: Tensor, + useLocking: Bool = false, + useNesterov: Bool = false + ) { + _RawTFEager.resourceApplyKerasMomentum( + var_: var_, accum: accum, lr: lr, grad: grad, momentum: momentum, useLocking: useLocking, + useNesterov: useNesterov) + } + + /// Update '*var' according to the momentum scheme. Set use_nesterov = True if you + /// + /// want to use Nesterov momentum. + /// + /// accum = accum * momentum + grad + /// var -= lr * accum + /// + /// - Parameters: + /// - var: Should be from a Variable(). + /// - accum: Should be from a Variable(). + /// - lr: Scaling factor. Must be a scalar. + /// - grad: The gradient. + /// - momentum: Momentum. Must be a scalar. + /// + /// - Attrs: + /// - use_locking: If `True`, updating of the var and accum tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// - use_nesterov: If `True`, the tensor passed to compute grad will be + /// var - lr * momentum * accum, so in the end, the var you get is actually + /// var - lr * momentum * accum. + @inlinable @inline(__always) + public static func resourceApplyMomentum( + var_: ResourceHandle, + accum: ResourceHandle, + lr: Tensor, + grad: Tensor, + momentum: Tensor, + useLocking: Bool = false, + useNesterov: Bool = false + ) { + _RawTFEager.resourceApplyMomentum( + var_: var_, accum: accum, lr: lr, grad: grad, momentum: momentum, useLocking: useLocking, + useNesterov: useNesterov) + } + + /// Update '*var' according to the AddSign update. + /// + /// m_t <- beta1 * m_{t-1} + (1 - beta1) * g + /// update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g + /// variable <- variable - lr_t * update + /// + /// - Parameters: + /// - var: Should be from a Variable(). + /// - m: Should be from a Variable(). + /// - lr: Scaling factor. Must be a scalar. + /// - logbase: Must be a scalar. + /// - sign_decay: Must be a scalar. + /// - beta: Must be a scalar. + /// - grad: The gradient. + /// + /// - Attr use_locking: If `True`, updating of the var and m tensors is + /// protected by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + @inlinable @inline(__always) + public static func resourceApplyPowerSign( + var_: ResourceHandle, + m: ResourceHandle, + lr: Tensor, + logbase: Tensor, + signDecay: Tensor, + beta: Tensor, + grad: Tensor, + useLocking: Bool = false + ) { + _RawTFEager.resourceApplyPowerSign( + var_: var_, m: m, lr: lr, logbase: logbase, signDecay: signDecay, beta: beta, grad: grad, + useLocking: useLocking) + } + + /// Update '*var' and '*accum' according to FOBOS with Adagrad learning rate. + /// + /// accum += grad * grad + /// prox_v = var - lr * grad * (1 / sqrt(accum)) + /// var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} + /// + /// - Parameters: + /// - var: Should be from a Variable(). + /// - accum: Should be from a Variable(). + /// - lr: Scaling factor. Must be a scalar. + /// - l1: L1 regularization. Must be a scalar. + /// - l2: L2 regularization. Must be a scalar. + /// - grad: The gradient. + /// + /// - Attr use_locking: If True, updating of the var and accum tensors will be protected by + /// a lock; otherwise the behavior is undefined, but may exhibit less contention. + @inlinable @inline(__always) + public static func resourceApplyProximalAdagrad( + var_: ResourceHandle, + accum: ResourceHandle, + lr: Tensor, + l1: Tensor, + l2: Tensor, + grad: Tensor, + useLocking: Bool = false + ) { + _RawTFEager.resourceApplyProximalAdagrad( + var_: var_, accum: accum, lr: lr, l1: l1, l2: l2, grad: grad, useLocking: useLocking) + } + + /// Update '*var' as FOBOS algorithm with fixed learning rate. + /// + /// prox_v = var - alpha * delta + /// var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0} + /// + /// - Parameters: + /// - var: Should be from a Variable(). + /// - alpha: Scaling factor. Must be a scalar. + /// - l1: L1 regularization. Must be a scalar. + /// - l2: L2 regularization. Must be a scalar. + /// - delta: The change. + /// + /// - Attr use_locking: If True, the subtraction will be protected by a lock; + /// otherwise the behavior is undefined, but may exhibit less contention. + @inlinable @inline(__always) + public static func resourceApplyProximalGradientDescent( + var_: ResourceHandle, + alpha: Tensor, + l1: Tensor, + l2: Tensor, + delta: Tensor, + useLocking: Bool = false + ) { + _RawTFEager.resourceApplyProximalGradientDescent( + var_: var_, alpha: alpha, l1: l1, l2: l2, delta: delta, useLocking: useLocking) + } + + /// Update '*var' according to the RMSProp algorithm. + /// + /// Note that in dense implementation of this algorithm, ms and mom will + /// update even if the grad is zero, but in this sparse implementation, ms + /// and mom will not update in iterations during which the grad is zero. + /// + /// mean_square = decay * mean_square + (1-decay) * gradient ** 2 + /// Delta = learning_rate * gradient / sqrt(mean_square + epsilon) + /// + /// ms <- rho * ms_{t-1} + (1-rho) * grad * grad + /// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + /// var <- var - mom + /// + /// - Parameters: + /// - var: Should be from a Variable(). + /// - ms: Should be from a Variable(). + /// - mom: Should be from a Variable(). + /// - lr: Scaling factor. Must be a scalar. + /// - rho: Decay rate. Must be a scalar. + /// - epsilon: Ridge term. Must be a scalar. + /// - grad: The gradient. + /// + /// - Attr use_locking: If `True`, updating of the var, ms, and mom tensors is protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + @inlinable @inline(__always) + public static func resourceApplyRMSProp( + var_: ResourceHandle, + ms: ResourceHandle, + mom: ResourceHandle, + lr: Tensor, + rho: Tensor, + momentum: Tensor, + epsilon: Tensor, + grad: Tensor, + useLocking: Bool = false + ) { + _RawTFEager.resourceApplyRMSProp( + var_: var_, ms: ms, mom: mom, lr: lr, rho: rho, momentum: momentum, epsilon: epsilon, + grad: grad, useLocking: useLocking) + } + + /// A conditional accumulator for aggregating gradients. + /// + /// The accumulator accepts gradients marked with local_step greater or + /// equal to the most recent global_step known to the accumulator. The + /// average can be extracted from the accumulator, provided sufficient + /// gradients have been accumulated. Extracting the average automatically + /// resets the aggregate to 0, and increments the global_step recorded by + /// the accumulator. + /// This is a resource version of ConditionalAccumulator that will work in TF2.0 + /// with tf.cond version 2. + /// + /// - Attrs: + /// - dtype: The type of the value being accumulated. + /// - shape: The shape of the values, can be [], in which case shape is unknown. + /// - container: If non-empty, this accumulator is placed in the given container. + /// Otherwise, a default container is used. + /// - shared_name: If non-empty, this accumulator will be shared under the + /// given name across multiple sessions. + /// + /// - Output handle: The handle to the accumulator. + @inlinable @inline(__always) + public static func resourceConditionalAccumulator( + dtype: TensorDataType, + shape: TensorShape?, + container: String, + sharedName: String, + reductionType: ReductionType = .mean + ) -> ResourceHandle { + _RawTFEager.resourceConditionalAccumulator( + dtype: dtype, shape: shape, container: container, sharedName: sharedName, + reductionType: reductionType) + } + + /// Increments variable pointed to by 'resource' until it reaches 'limit'. + /// + /// - Parameter resource: Should be from a scalar `Variable` node. + /// + /// - Attr limit: If incrementing ref would bring it above limit, instead generates an + /// 'OutOfRange' error. + /// + /// - Output output: A copy of the input before increment. If nothing else modifies the + /// input, the values produced will all be distinct. + @inlinable @inline(__always) + public static func resourceCountUpTo( + resource: ResourceHandle, + limit: Int64 + ) -> Tensor { + _RawTFEager.resourceCountUpTo(resource: resource, limit: limit) + } + + @inlinable @inline(__always) + public static func resourceCreateOp( + resource: ResourceHandle + ) { + _RawTFEager.resourceCreateOp(resource: resource) + } + + /// Gather slices from the variable pointed to by `resource` according to `indices`. + /// + /// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). + /// Produces an output tensor with shape `indices.shape + params.shape[1:]` where: + /// + /// ```python + /// # Scalar indices + /// output[:, ..., :] = params[indices, :, ... :] + /// + /// # Vector indices + /// output[i, :, ..., :] = params[indices[i], :, ... :] + /// + /// # Higher rank indices + /// output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] + /// ``` + @inlinable @inline(__always) + public static func resourceGather< + Dtype: TensorFlowScalar, + Tindices: TensorFlowIndex + >( + resource: ResourceHandle, + indices: Tensor, + batchDims: Int64 = 0, + validateIndices: Bool = true + ) -> Tensor { + switch indices.handle.backend { + case .XLA: + let output_device = indices.device + let indices = Tensor(copying: indices, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.resourceGather( + resource: resource, indices: indices, batchDims: batchDims, + validateIndices: validateIndices), to: output_device) + case .TF_EAGER: + return _RawTFEager.resourceGather( + resource: resource, indices: indices, batchDims: batchDims, + validateIndices: validateIndices) + } + + } + + @inlinable @inline(__always) + public static func resourceGatherNd< + Dtype: TensorFlowScalar, + Tindices: TensorFlowIndex + >( + resource: ResourceHandle, + indices: Tensor + ) -> Tensor { + switch indices.handle.backend { + case .XLA: + let output_device = indices.device + let indices = Tensor(copying: indices, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.resourceGatherNd(resource: resource, indices: indices), + to: output_device) + case .TF_EAGER: + return _RawTFEager.resourceGatherNd(resource: resource, indices: indices) + } + + } + + @inlinable @inline(__always) + public static func resourceInitializedOp( + resource: ResourceHandle + ) -> Tensor { + _RawTFEager.resourceInitializedOp(resource: resource) + } + + /// Adds sparse updates to the variable referenced by `resource`. + /// + /// This operation computes + /// + /// # Scalar indices + /// ref[indices, ...] += updates[...] + /// + /// # Vector indices (for each i) + /// ref[indices[i], ...] += updates[i, ...] + /// + /// # High rank indices (for each i, ..., j) + /// ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] + /// + /// Duplicate entries are handled correctly: if multiple `indices` reference + /// the same location, their contributions add. + /// + /// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + /// + ///
+ /// + ///
+ /// + /// - Parameters: + /// - resource: Should be from a `Variable` node. + /// - indices: A tensor of indices into the first dimension of `ref`. + /// - updates: A tensor of updated values to add to `ref`. + @inlinable @inline(__always) + public static func resourceScatterAdd< + Dtype: TensorFlowNumeric, + Tindices: TensorFlowIndex + >( + resource: ResourceHandle, + indices: Tensor, + updates: Tensor + ) { + _RawTFEager.resourceScatterAdd(resource: resource, indices: indices, updates: updates) + } + + /// Divides sparse updates into the variable referenced by `resource`. + /// + /// This operation computes + /// + /// # Scalar indices + /// ref[indices, ...] /= updates[...] + /// + /// # Vector indices (for each i) + /// ref[indices[i], ...] /= updates[i, ...] + /// + /// # High rank indices (for each i, ..., j) + /// ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] + /// + /// Duplicate entries are handled correctly: if multiple `indices` reference + /// the same location, their contributions multiply. + /// + /// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + /// + ///
+ /// + ///
+ /// + /// - Parameters: + /// - resource: Should be from a `Variable` node. + /// - indices: A tensor of indices into the first dimension of `ref`. + /// - updates: A tensor of updated values to add to `ref`. + @inlinable @inline(__always) + public static func resourceScatterDiv< + Dtype: TensorFlowNumeric, + Tindices: TensorFlowIndex + >( + resource: ResourceHandle, + indices: Tensor, + updates: Tensor + ) { + _RawTFEager.resourceScatterDiv(resource: resource, indices: indices, updates: updates) + } + + /// Reduces sparse updates into the variable referenced by `resource` using the `max` operation. + /// + /// This operation computes + /// + /// # Scalar indices + /// ref[indices, ...] = max(ref[indices, ...], updates[...]) + /// + /// # Vector indices (for each i) + /// ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) + /// + /// # High rank indices (for each i, ..., j) + /// ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) + /// + /// Duplicate entries are handled correctly: if multiple `indices` reference + /// the same location, their contributions are combined. + /// + /// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + /// + ///
+ /// + ///
+ /// + /// - Parameters: + /// - resource: Should be from a `Variable` node. + /// - indices: A tensor of indices into the first dimension of `ref`. + /// - updates: A tensor of updated values to add to `ref`. + @inlinable @inline(__always) + public static func resourceScatterMax< + Dtype: TensorFlowNumeric, + Tindices: TensorFlowIndex + >( + resource: ResourceHandle, + indices: Tensor, + updates: Tensor + ) { + _RawTFEager.resourceScatterMax(resource: resource, indices: indices, updates: updates) + } + + /// Reduces sparse updates into the variable referenced by `resource` using the `min` operation. + /// + /// This operation computes + /// + /// # Scalar indices + /// ref[indices, ...] = min(ref[indices, ...], updates[...]) + /// + /// # Vector indices (for each i) + /// ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) + /// + /// # High rank indices (for each i, ..., j) + /// ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) + /// + /// Duplicate entries are handled correctly: if multiple `indices` reference + /// the same location, their contributions are combined. + /// + /// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + /// + ///
+ /// + ///
+ /// + /// - Parameters: + /// - resource: Should be from a `Variable` node. + /// - indices: A tensor of indices into the first dimension of `ref`. + /// - updates: A tensor of updated values to add to `ref`. + @inlinable @inline(__always) + public static func resourceScatterMin< + Dtype: TensorFlowNumeric, + Tindices: TensorFlowIndex + >( + resource: ResourceHandle, + indices: Tensor, + updates: Tensor + ) { + _RawTFEager.resourceScatterMin(resource: resource, indices: indices, updates: updates) + } + + /// Multiplies sparse updates into the variable referenced by `resource`. + /// + /// This operation computes + /// + /// # Scalar indices + /// ref[indices, ...] *= updates[...] + /// + /// # Vector indices (for each i) + /// ref[indices[i], ...] *= updates[i, ...] + /// + /// # High rank indices (for each i, ..., j) + /// ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] + /// + /// Duplicate entries are handled correctly: if multiple `indices` reference + /// the same location, their contributions multiply. + /// + /// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + /// + ///
+ /// + ///
+ /// + /// - Parameters: + /// - resource: Should be from a `Variable` node. + /// - indices: A tensor of indices into the first dimension of `ref`. + /// - updates: A tensor of updated values to add to `ref`. + @inlinable @inline(__always) + public static func resourceScatterMul< + Dtype: TensorFlowNumeric, + Tindices: TensorFlowIndex + >( + resource: ResourceHandle, + indices: Tensor, + updates: Tensor + ) { + _RawTFEager.resourceScatterMul(resource: resource, indices: indices, updates: updates) + } + + /// Applies sparse addition to individual values or slices in a Variable. + /// + /// `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + /// + /// `indices` must be integer tensor, containing indices into `ref`. + /// It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + /// + /// The innermost dimension of `indices` (with length `K`) corresponds to + /// indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + /// dimension of `ref`. + /// + /// `updates` is `Tensor` of rank `Q-1+P-K` with shape: + /// + /// ``` + /// [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] + /// ``` + /// + /// For example, say we want to add 4 scattered elements to a rank-1 tensor to + /// 8 elements. In Python, that addition would look like this: + /// + /// ```python + /// ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True) + /// indices = tf.constant([[4], [3], [1], [7]]) + /// updates = tf.constant([9, 10, 11, 12]) + /// add = tf.scatter_nd_add(ref, indices, updates) + /// with tf.Session() as sess: + /// print sess.run(add) + /// ``` + /// + /// The resulting update to ref would look like this: + /// + /// [1, 13, 3, 14, 14, 6, 7, 20] + /// + /// See `tf.scatter_nd` for more details about how to make updates to + /// slices. + /// + /// - Parameters: + /// - ref: A resource handle. Must be from a VarHandleOp. + /// - indices: A Tensor. Must be one of the following types: int32, int64. + /// A tensor of indices into ref. + /// - updates: A Tensor. Must have the same type as ref. A tensor of + /// values to add to ref. + /// + /// - Attr use_locking: An optional bool. Defaults to True. If True, the assignment will + /// be protected by a lock; otherwise the behavior is undefined, + /// but may exhibit less contention. + @inlinable @inline(__always) + public static func resourceScatterNdAdd< + T: TensorFlowScalar, + Tindices: TensorFlowIndex + >( + ref: ResourceHandle, + indices: Tensor, + updates: Tensor, + useLocking: Bool = true + ) { + _RawTFEager.resourceScatterNdAdd( + ref: ref, indices: indices, updates: updates, useLocking: useLocking) + } + + /// Applies sparse subtraction to individual values or slices in a Variable. + /// + /// `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + /// + /// `indices` must be integer tensor, containing indices into `ref`. + /// It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + /// + /// The innermost dimension of `indices` (with length `K`) corresponds to + /// indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + /// dimension of `ref`. + /// + /// `updates` is `Tensor` of rank `Q-1+P-K` with shape: + /// + /// ``` + /// [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] + /// ``` + /// + /// For example, say we want to subtract 4 scattered elements from a rank-1 tensor + /// with 8 elements. In Python, that subtraction would look like this: + /// + /// ```python + /// ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True) + /// indices = tf.constant([[4], [3], [1], [7]]) + /// updates = tf.constant([9, 10, 11, 12]) + /// sub = tf.scatter_nd_sub(ref, indices, updates) + /// with tf.Session() as sess: + /// print sess.run(sub) + /// ``` + /// + /// The resulting update to ref would look like this: + /// + /// [1, -9, 3, -6, -4, 6, 7, -4] + /// + /// See `tf.scatter_nd` for more details about how to make updates to + /// slices. + /// + /// - Parameters: + /// - ref: A resource handle. Must be from a VarHandleOp. + /// - indices: A Tensor. Must be one of the following types: int32, int64. + /// A tensor of indices into ref. + /// - updates: A Tensor. Must have the same type as ref. A tensor of + /// values to add to ref. + /// + /// - Attr use_locking: An optional bool. Defaults to True. If True, the assignment will + /// be protected by a lock; otherwise the behavior is undefined, + /// but may exhibit less contention. + @inlinable @inline(__always) + public static func resourceScatterNdSub< + T: TensorFlowScalar, + Tindices: TensorFlowIndex + >( + ref: ResourceHandle, + indices: Tensor, + updates: Tensor, + useLocking: Bool = true + ) { + _RawTFEager.resourceScatterNdSub( + ref: ref, indices: indices, updates: updates, useLocking: useLocking) + } + + /// Applies sparse `updates` to individual values or slices within a given + /// + /// variable according to `indices`. + /// + /// `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + /// + /// `indices` must be integer tensor, containing indices into `ref`. + /// It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + /// + /// The innermost dimension of `indices` (with length `K`) corresponds to + /// indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + /// dimension of `ref`. + /// + /// `updates` is `Tensor` of rank `Q-1+P-K` with shape: + /// + /// ``` + /// [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. + /// ``` + /// + /// For example, say we want to update 4 scattered elements to a rank-1 tensor to + /// 8 elements. In Python, that update would look like this: + /// + /// ```python + /// ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + /// indices = tf.constant([[4], [3], [1] ,[7]]) + /// updates = tf.constant([9, 10, 11, 12]) + /// update = tf.scatter_nd_update(ref, indices, updates) + /// with tf.Session() as sess: + /// print sess.run(update) + /// ``` + /// + /// The resulting update to ref would look like this: + /// + /// [1, 11, 3, 10, 9, 6, 7, 12] + /// + /// See `tf.scatter_nd` for more details about how to make updates to + /// slices. + /// + /// - Parameters: + /// - ref: A resource handle. Must be from a VarHandleOp. + /// - indices: A Tensor. Must be one of the following types: int32, int64. + /// A tensor of indices into ref. + /// - updates: A Tensor. Must have the same type as ref. A tensor of updated + /// values to add to ref. + /// + /// - Attr use_locking: An optional bool. Defaults to True. If True, the assignment will + /// be protected by a lock; otherwise the behavior is undefined, + /// but may exhibit less contention. + @inlinable @inline(__always) + public static func resourceScatterNdUpdate< + T: TensorFlowScalar, + Tindices: TensorFlowIndex + >( + ref: ResourceHandle, + indices: Tensor, + updates: Tensor, + useLocking: Bool = true + ) { + _RawTFEager.resourceScatterNdUpdate( + ref: ref, indices: indices, updates: updates, useLocking: useLocking) + } + + /// Subtracts sparse updates from the variable referenced by `resource`. + /// + /// This operation computes + /// + /// # Scalar indices + /// ref[indices, ...] -= updates[...] + /// + /// # Vector indices (for each i) + /// ref[indices[i], ...] -= updates[i, ...] + /// + /// # High rank indices (for each i, ..., j) + /// ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] + /// + /// Duplicate entries are handled correctly: if multiple `indices` reference + /// the same location, their contributions add. + /// + /// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + /// + ///
+ /// + ///
+ /// + /// - Parameters: + /// - resource: Should be from a `Variable` node. + /// - indices: A tensor of indices into the first dimension of `ref`. + /// - updates: A tensor of updated values to add to `ref`. + @inlinable @inline(__always) + public static func resourceScatterSub< + Dtype: TensorFlowNumeric, + Tindices: TensorFlowIndex + >( + resource: ResourceHandle, + indices: Tensor, + updates: Tensor + ) { + _RawTFEager.resourceScatterSub(resource: resource, indices: indices, updates: updates) + } + + /// Assigns sparse updates to the variable referenced by `resource`. + /// + /// This operation computes + /// + /// # Scalar indices + /// ref[indices, ...] = updates[...] + /// + /// # Vector indices (for each i) + /// ref[indices[i], ...] = updates[i, ...] + /// + /// # High rank indices (for each i, ..., j) + /// ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] + /// + /// - Parameters: + /// - resource: Should be from a `Variable` node. + /// - indices: A tensor of indices into the first dimension of `ref`. + /// - updates: A tensor of updated values to add to `ref`. + @inlinable @inline(__always) + public static func resourceScatterUpdate< + Dtype: TensorFlowScalar, + Tindices: TensorFlowIndex + >( + resource: ResourceHandle, + indices: Tensor, + updates: Tensor + ) { + _RawTFEager.resourceScatterUpdate(resource: resource, indices: indices, updates: updates) + } + + /// var: Should be from a Variable(). + /// + /// - Parameters: + /// - accum: Should be from a Variable(). + /// - accum_update: : Should be from a Variable(). + /// - lr: Learning rate. Must be a scalar. + /// - rho: Decay factor. Must be a scalar. + /// - epsilon: Constant factor. Must be a scalar. + /// - grad: The gradient. + /// - indices: A vector of indices into the first dimension of var and accum. + /// + /// - Attr use_locking: If True, updating of the var and accum tensors will be protected by + /// a lock; otherwise the behavior is undefined, but may exhibit less contention. + @inlinable @inline(__always) + public static func resourceSparseApplyAdadelta< + T: TensorFlowNumeric, + Tindices: TensorFlowIndex + >( + var_: ResourceHandle, + accum: ResourceHandle, + accumUpdate: ResourceHandle, + lr: Tensor, + rho: Tensor, + epsilon: Tensor, + grad: Tensor, + indices: Tensor, + useLocking: Bool = false + ) { + _RawTFEager.resourceSparseApplyAdadelta( + var_: var_, accum: accum, accumUpdate: accumUpdate, lr: lr, rho: rho, epsilon: epsilon, + grad: grad, indices: indices, useLocking: useLocking) + } + + /// Update relevant entries in '*var' and '*accum' according to the adagrad scheme. + /// + /// That is for rows we have grad for, we update var and accum as follows: + /// accum += grad * grad + /// var -= lr * grad * (1 / sqrt(accum)) + /// + /// - Parameters: + /// - var: Should be from a Variable(). + /// - accum: Should be from a Variable(). + /// - lr: Learning rate. Must be a scalar. + /// - grad: The gradient. + /// - indices: A vector of indices into the first dimension of var and accum. + /// + /// - Attr use_locking: If `True`, updating of the var and accum tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + @inlinable @inline(__always) + public static func resourceSparseApplyAdagrad< + T: TensorFlowNumeric, + Tindices: TensorFlowIndex + >( + var_: ResourceHandle, + accum: ResourceHandle, + lr: Tensor, + grad: Tensor, + indices: Tensor, + useLocking: Bool = false, + updateSlots: Bool = true + ) { + _RawTFEager.resourceSparseApplyAdagrad( + var_: var_, accum: accum, lr: lr, grad: grad, indices: indices, useLocking: useLocking, + updateSlots: updateSlots) + } + + /// Update entries in '*var' and '*accum' according to the proximal adagrad scheme. + /// + /// - Parameters: + /// - var: Should be from a Variable(). + /// - gradient_accumulator: Should be from a Variable(). + /// - gradient_squared_accumulator: Should be from a Variable(). + /// - grad: The gradient. + /// - indices: A vector of indices into the first dimension of var and accum. + /// - lr: Learning rate. Must be a scalar. + /// - l1: L1 regularization. Must be a scalar. + /// - l2: L2 regularization. Must be a scalar. + /// - global_step: Training step number. Must be a scalar. + /// + /// - Attr use_locking: If True, updating of the var and accum tensors will be protected by + /// a lock; otherwise the behavior is undefined, but may exhibit less contention. + @inlinable @inline(__always) + public static func resourceSparseApplyAdagradDA< + T: TensorFlowNumeric, + Tindices: TensorFlowIndex + >( + var_: ResourceHandle, + gradientAccumulator: ResourceHandle, + gradientSquaredAccumulator: ResourceHandle, + grad: Tensor, + indices: Tensor, + lr: Tensor, + l1: Tensor, + l2: Tensor, + globalStep: Tensor, + useLocking: Bool = false + ) { + _RawTFEager.resourceSparseApplyAdagradDA( + var_: var_, gradientAccumulator: gradientAccumulator, + gradientSquaredAccumulator: gradientSquaredAccumulator, grad: grad, indices: indices, + lr: lr, l1: l1, l2: l2, globalStep: globalStep, useLocking: useLocking) + } + + /// Update relevant entries in '*var' and '*accum' according to the adagrad scheme. + /// + /// That is for rows we have grad for, we update var and accum as follows: + /// accum += grad * grad + /// var -= lr * grad * (1 / sqrt(accum)) + /// + /// - Parameters: + /// - var: Should be from a Variable(). + /// - accum: Should be from a Variable(). + /// - lr: Learning rate. Must be a scalar. + /// - epsilon: Constant factor. Must be a scalar. + /// - grad: The gradient. + /// - indices: A vector of indices into the first dimension of var and accum. + /// + /// - Attr use_locking: If `True`, updating of the var and accum tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + @inlinable @inline(__always) + public static func resourceSparseApplyAdagradV2< + T: TensorFlowNumeric, + Tindices: TensorFlowIndex + >( + var_: ResourceHandle, + accum: ResourceHandle, + lr: Tensor, + epsilon: Tensor, + grad: Tensor, + indices: Tensor, + useLocking: Bool = false, + updateSlots: Bool = true + ) { + _RawTFEager.resourceSparseApplyAdagradV2( + var_: var_, accum: accum, lr: lr, epsilon: epsilon, grad: grad, indices: indices, + useLocking: useLocking, updateSlots: updateSlots) + } + + /// Update '*var' according to the centered RMSProp algorithm. + /// + /// The centered RMSProp algorithm uses an estimate of the centered second moment + /// (i.e., the variance) for normalization, as opposed to regular RMSProp, which + /// uses the (uncentered) second moment. This often helps with training, but is + /// slightly more expensive in terms of computation and memory. + /// + /// Note that in dense implementation of this algorithm, mg, ms, and mom will + /// update even if the grad is zero, but in this sparse implementation, mg, ms, + /// and mom will not update in iterations during which the grad is zero. + /// + /// mean_square = decay * mean_square + (1-decay) * gradient ** 2 + /// mean_grad = decay * mean_grad + (1-decay) * gradient + /// Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) + /// + /// ms <- rho * ms_{t-1} + (1-rho) * grad * grad + /// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + /// var <- var - mom + /// + /// - Parameters: + /// - var: Should be from a Variable(). + /// - mg: Should be from a Variable(). + /// - ms: Should be from a Variable(). + /// - mom: Should be from a Variable(). + /// - lr: Scaling factor. Must be a scalar. + /// - rho: Decay rate. Must be a scalar. + /// - epsilon: Ridge term. Must be a scalar. + /// - grad: The gradient. + /// - indices: A vector of indices into the first dimension of var, ms and mom. + /// + /// - Attr use_locking: If `True`, updating of the var, mg, ms, and mom tensors is + /// protected by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + @inlinable @inline(__always) + public static func resourceSparseApplyCenteredRMSProp< + T: TensorFlowNumeric, + Tindices: TensorFlowIndex + >( + var_: ResourceHandle, + mg: ResourceHandle, + ms: ResourceHandle, + mom: ResourceHandle, + lr: Tensor, + rho: Tensor, + momentum: Tensor, + epsilon: Tensor, + grad: Tensor, + indices: Tensor, + useLocking: Bool = false + ) { + _RawTFEager.resourceSparseApplyCenteredRMSProp( + var_: var_, mg: mg, ms: ms, mom: mom, lr: lr, rho: rho, momentum: momentum, + epsilon: epsilon, grad: grad, indices: indices, useLocking: useLocking) + } + + /// Update relevant entries in '*var' according to the Ftrl-proximal scheme. + /// + /// That is for rows we have grad for, we update var, accum and linear as follows: + /// accum_new = accum + grad * grad + /// linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + /// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + /// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + /// accum = accum_new + /// + /// - Parameters: + /// - var: Should be from a Variable(). + /// - accum: Should be from a Variable(). + /// - linear: Should be from a Variable(). + /// - grad: The gradient. + /// - indices: A vector of indices into the first dimension of var and accum. + /// - lr: Scaling factor. Must be a scalar. + /// - l1: L1 regularization. Must be a scalar. + /// - l2: L2 regularization. Must be a scalar. + /// - lr_power: Scaling factor. Must be a scalar. + /// + /// - Attr use_locking: If `True`, updating of the var and accum tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + @inlinable @inline(__always) + public static func resourceSparseApplyFtrl< + T: TensorFlowNumeric, + Tindices: TensorFlowIndex + >( + var_: ResourceHandle, + accum: ResourceHandle, + linear: ResourceHandle, + grad: Tensor, + indices: Tensor, + lr: Tensor, + l1: Tensor, + l2: Tensor, + lrPower: Tensor, + useLocking: Bool = false + ) { + _RawTFEager.resourceSparseApplyFtrl( + var_: var_, accum: accum, linear: linear, grad: grad, indices: indices, lr: lr, l1: l1, + l2: l2, lrPower: lrPower, useLocking: useLocking) + } + + /// Update relevant entries in '*var' according to the Ftrl-proximal scheme. + /// + /// That is for rows we have grad for, we update var, accum and linear as follows: + /// grad_with_shrinkage = grad + 2 * l2_shrinkage * var + /// accum_new = accum + grad_with_shrinkage * grad_with_shrinkage + /// linear += grad_with_shrinkage + + /// (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + /// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + /// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + /// accum = accum_new + /// + /// - Parameters: + /// - var: Should be from a Variable(). + /// - accum: Should be from a Variable(). + /// - linear: Should be from a Variable(). + /// - grad: The gradient. + /// - indices: A vector of indices into the first dimension of var and accum. + /// - lr: Scaling factor. Must be a scalar. + /// - l1: L1 regularization. Must be a scalar. + /// - l2: L2 shrinkage regularization. Must be a scalar. + /// - lr_power: Scaling factor. Must be a scalar. + /// + /// - Attr use_locking: If `True`, updating of the var and accum tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + @inlinable @inline(__always) + public static func resourceSparseApplyFtrlV2< + T: TensorFlowNumeric, + Tindices: TensorFlowIndex + >( + var_: ResourceHandle, + accum: ResourceHandle, + linear: ResourceHandle, + grad: Tensor, + indices: Tensor, + lr: Tensor, + l1: Tensor, + l2: Tensor, + l2Shrinkage: Tensor, + lrPower: Tensor, + useLocking: Bool = false + ) { + _RawTFEager.resourceSparseApplyFtrlV2( + var_: var_, accum: accum, linear: linear, grad: grad, indices: indices, lr: lr, l1: l1, + l2: l2, l2Shrinkage: l2Shrinkage, lrPower: lrPower, useLocking: useLocking) + } + + /// Update relevant entries in '*var' and '*accum' according to the momentum scheme. + /// + /// Set use_nesterov = True if you want to use Nesterov momentum. + /// + /// That is for rows we have grad for, we update var and accum as follows: + /// + /// accum = accum * momentum - lr * grad + /// var += accum + /// + /// - Parameters: + /// - var: Should be from a Variable(). + /// - accum: Should be from a Variable(). + /// - lr: Learning rate. Must be a scalar. + /// - grad: The gradient. + /// - indices: A vector of indices into the first dimension of var and accum. + /// - momentum: Momentum. Must be a scalar. + /// + /// - Attrs: + /// - use_locking: If `True`, updating of the var and accum tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// - use_nesterov: If `True`, the tensor passed to compute grad will be + /// var + momentum * accum, so in the end, the var you get is actually + /// var + momentum * accum. + @inlinable @inline(__always) + public static func resourceSparseApplyKerasMomentum< + T: TensorFlowNumeric, + Tindices: TensorFlowIndex + >( + var_: ResourceHandle, + accum: ResourceHandle, + lr: Tensor, + grad: Tensor, + indices: Tensor, + momentum: Tensor, + useLocking: Bool = false, + useNesterov: Bool = false + ) { + _RawTFEager.resourceSparseApplyKerasMomentum( + var_: var_, accum: accum, lr: lr, grad: grad, indices: indices, momentum: momentum, + useLocking: useLocking, useNesterov: useNesterov) + } + + /// Update relevant entries in '*var' and '*accum' according to the momentum scheme. + /// + /// Set use_nesterov = True if you want to use Nesterov momentum. + /// + /// That is for rows we have grad for, we update var and accum as follows: + /// + /// accum = accum * momentum + grad + /// var -= lr * accum + /// + /// - Parameters: + /// - var: Should be from a Variable(). + /// - accum: Should be from a Variable(). + /// - lr: Learning rate. Must be a scalar. + /// - grad: The gradient. + /// - indices: A vector of indices into the first dimension of var and accum. + /// - momentum: Momentum. Must be a scalar. + /// + /// - Attrs: + /// - use_locking: If `True`, updating of the var and accum tensors will be protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + /// - use_nesterov: If `True`, the tensor passed to compute grad will be + /// var - lr * momentum * accum, so in the end, the var you get is actually + /// var - lr * momentum * accum. + @inlinable @inline(__always) + public static func resourceSparseApplyMomentum< + T: TensorFlowNumeric, + Tindices: TensorFlowIndex + >( + var_: ResourceHandle, + accum: ResourceHandle, + lr: Tensor, + grad: Tensor, + indices: Tensor, + momentum: Tensor, + useLocking: Bool = false, + useNesterov: Bool = false + ) { + _RawTFEager.resourceSparseApplyMomentum( + var_: var_, accum: accum, lr: lr, grad: grad, indices: indices, momentum: momentum, + useLocking: useLocking, useNesterov: useNesterov) + } + + /// Sparse update entries in '*var' and '*accum' according to FOBOS algorithm. + /// + /// That is for rows we have grad for, we update var and accum as follows: + /// accum += grad * grad + /// prox_v = var + /// prox_v -= lr * grad * (1 / sqrt(accum)) + /// var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} + /// + /// - Parameters: + /// - var: Should be from a Variable(). + /// - accum: Should be from a Variable(). + /// - lr: Learning rate. Must be a scalar. + /// - l1: L1 regularization. Must be a scalar. + /// - l2: L2 regularization. Must be a scalar. + /// - grad: The gradient. + /// - indices: A vector of indices into the first dimension of var and accum. + /// + /// - Attr use_locking: If True, updating of the var and accum tensors will be protected by + /// a lock; otherwise the behavior is undefined, but may exhibit less contention. + @inlinable @inline(__always) + public static func resourceSparseApplyProximalAdagrad< + T: TensorFlowNumeric, + Tindices: TensorFlowIndex + >( + var_: ResourceHandle, + accum: ResourceHandle, + lr: Tensor, + l1: Tensor, + l2: Tensor, + grad: Tensor, + indices: Tensor, + useLocking: Bool = false + ) { + _RawTFEager.resourceSparseApplyProximalAdagrad( + var_: var_, accum: accum, lr: lr, l1: l1, l2: l2, grad: grad, indices: indices, + useLocking: useLocking) + } + + /// Sparse update '*var' as FOBOS algorithm with fixed learning rate. + /// + /// That is for rows we have grad for, we update var as follows: + /// prox_v = var - alpha * grad + /// var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0} + /// + /// - Parameters: + /// - var: Should be from a Variable(). + /// - alpha: Scaling factor. Must be a scalar. + /// - l1: L1 regularization. Must be a scalar. + /// - l2: L2 regularization. Must be a scalar. + /// - grad: The gradient. + /// - indices: A vector of indices into the first dimension of var and accum. + /// + /// - Attr use_locking: If True, the subtraction will be protected by a lock; + /// otherwise the behavior is undefined, but may exhibit less contention. + @inlinable @inline(__always) + public static func resourceSparseApplyProximalGradientDescent< + T: TensorFlowNumeric, + Tindices: TensorFlowIndex + >( + var_: ResourceHandle, + alpha: Tensor, + l1: Tensor, + l2: Tensor, + grad: Tensor, + indices: Tensor, + useLocking: Bool = false + ) { + _RawTFEager.resourceSparseApplyProximalGradientDescent( + var_: var_, alpha: alpha, l1: l1, l2: l2, grad: grad, indices: indices, + useLocking: useLocking) + } + + /// Update '*var' according to the RMSProp algorithm. + /// + /// Note that in dense implementation of this algorithm, ms and mom will + /// update even if the grad is zero, but in this sparse implementation, ms + /// and mom will not update in iterations during which the grad is zero. + /// + /// mean_square = decay * mean_square + (1-decay) * gradient ** 2 + /// Delta = learning_rate * gradient / sqrt(mean_square + epsilon) + /// + /// ms <- rho * ms_{t-1} + (1-rho) * grad * grad + /// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + /// var <- var - mom + /// + /// - Parameters: + /// - var: Should be from a Variable(). + /// - ms: Should be from a Variable(). + /// - mom: Should be from a Variable(). + /// - lr: Scaling factor. Must be a scalar. + /// - rho: Decay rate. Must be a scalar. + /// - epsilon: Ridge term. Must be a scalar. + /// - grad: The gradient. + /// - indices: A vector of indices into the first dimension of var, ms and mom. + /// + /// - Attr use_locking: If `True`, updating of the var, ms, and mom tensors is protected + /// by a lock; otherwise the behavior is undefined, but may exhibit less + /// contention. + @inlinable @inline(__always) + public static func resourceSparseApplyRMSProp< + T: TensorFlowNumeric, + Tindices: TensorFlowIndex + >( + var_: ResourceHandle, + ms: ResourceHandle, + mom: ResourceHandle, + lr: Tensor, + rho: Tensor, + momentum: Tensor, + epsilon: Tensor, + grad: Tensor, + indices: Tensor, + useLocking: Bool = false + ) { + _RawTFEager.resourceSparseApplyRMSProp( + var_: var_, ms: ms, mom: mom, lr: lr, rho: rho, momentum: momentum, epsilon: epsilon, + grad: grad, indices: indices, useLocking: useLocking) + } + + /// Assign `value` to the sliced l-value reference of `ref`. + /// + /// The values of `value` are assigned to the positions in the variable + /// `ref` that are selected by the slice parameters. The slice parameters + /// `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`. + /// + /// NOTE this op currently does not support broadcasting and so `value`'s + /// shape must be exactly the shape produced by the slice of `ref`. + @inlinable @inline(__always) + public static func resourceStridedSliceAssign< + T: TensorFlowScalar, + Index: TensorFlowIndex + >( + ref: ResourceHandle, + begin: Tensor, + end: Tensor, + strides: Tensor, + value: Tensor, + beginMask: Int64 = 0, + endMask: Int64 = 0, + ellipsisMask: Int64 = 0, + newAxisMask: Int64 = 0, + shrinkAxisMask: Int64 = 0 + ) { + _RawTFEager.resourceStridedSliceAssign( + ref: ref, begin: begin, end: end, strides: strides, value: value, beginMask: beginMask, + endMask: endMask, ellipsisMask: ellipsisMask, newAxisMask: newAxisMask, + shrinkAxisMask: shrinkAxisMask) + } + + @inlinable @inline(__always) + public static func resourceUsingOp( + resource: ResourceHandle + ) { + _RawTFEager.resourceUsingOp(resource: resource) + } + + /// Restores a tensor from checkpoint files. + /// + /// Reads a tensor stored in one or several files. If there are several files (for + /// instance because a tensor was saved as slices), `file_pattern` may contain + /// wildcard symbols (`*` and `?`) in the filename portion only, not in the + /// directory portion. + /// + /// If a `file_pattern` matches several files, `preferred_shard` can be used to hint + /// in which file the requested tensor is likely to be found. This op will first + /// open the file at index `preferred_shard` in the list of matching files and try + /// to restore tensors from that file. Only if some tensors or tensor slices are + /// not found in that first file, then the Op opens all the files. Setting + /// `preferred_shard` to match the value passed as the `shard` input + /// of a matching `Save` Op may speed up Restore. This attribute only affects + /// performance, not correctness. The default value -1 means files are processed in + /// order. + /// + /// See also `RestoreSlice`. + /// + /// - Parameters: + /// - file_pattern: Must have a single element. The pattern of the files from + /// which we read the tensor. + /// - tensor_name: Must have a single element. The name of the tensor to be + /// restored. + /// + /// - Attrs: + /// - dt: The type of the tensor to be restored. + /// - preferred_shard: Index of file to open first if multiple files match + /// `file_pattern`. + /// + /// - Output tensor: The restored tensor. + @inlinable @inline(__always) + public static func restore( + filePattern: StringTensor, + tensorName: StringTensor, + preferredShard: Int64 = -1 + ) -> Tensor
{ + _RawTFEager.restore( + filePattern: filePattern, tensorName: tensorName, preferredShard: preferredShard) + } + + /// Restores a tensor from checkpoint files. + /// + /// This is like `Restore` except that restored tensor can be listed as filling + /// only a slice of a larger tensor. `shape_and_slice` specifies the shape of the + /// larger tensor and the slice that the restored tensor covers. + /// + /// The `shape_and_slice` input has the same format as the + /// elements of the `shapes_and_slices` input of the `SaveSlices` op. + /// + /// - Parameters: + /// - file_pattern: Must have a single element. The pattern of the files from + /// which we read the tensor. + /// - tensor_name: Must have a single element. The name of the tensor to be + /// restored. + /// - shape_and_slice: Scalar. The shapes and slice specifications to use when + /// restoring a tensors. + /// + /// - Attrs: + /// - dt: The type of the tensor to be restored. + /// - preferred_shard: Index of file to open first if multiple files match + /// `file_pattern`. See the documentation for `Restore`. + /// + /// - Output tensor: The restored tensor. + @inlinable @inline(__always) + public static func restoreSlice( + filePattern: StringTensor, + tensorName: StringTensor, + shapeAndSlice: StringTensor, + preferredShard: Int64 = -1 + ) -> Tensor
{ + _RawTFEager.restoreSlice( + filePattern: filePattern, tensorName: tensorName, shapeAndSlice: shapeAndSlice, + preferredShard: preferredShard) + } + + /// Restores tensors from a V2 checkpoint. + /// + /// For backward compatibility with the V1 format, this Op currently allows + /// restoring from a V1 checkpoint as well: + /// - This Op first attempts to find the V2 index file pointed to by "prefix", and + /// if found proceed to read it as a V2 checkpoint; + /// - Otherwise the V1 read path is invoked. + /// Relying on this behavior is not recommended, as the ability to fall back to read + /// V1 might be deprecated and eventually removed. + /// + /// By default, restores the named tensors in full. If the caller wishes to restore + /// specific slices of stored tensors, "shape_and_slices" should be non-empty + /// strings and correspondingly well-formed. + /// + /// Callers must ensure all the named tensors are indeed stored in the checkpoint. + /// + /// - Parameters: + /// - prefix: Must have a single element. The prefix of a V2 checkpoint. + /// - tensor_names: shape {N}. The names of the tensors to be restored. + /// - shape_and_slices: shape {N}. The slice specs of the tensors to be restored. + /// Empty strings indicate that they are non-partitioned tensors. + /// + /// - Attr dtypes: shape {N}. The list of expected dtype for the tensors. Must match + /// those stored in the checkpoint. + /// + /// - Output tensors: shape {N}. The restored tensors, whose shapes are read from the + /// checkpoint directly. + @inlinable @inline(__always) + public static func restoreV2( + prefix: StringTensor, + tensorNames: StringTensor, + shapeAndSlices: StringTensor + ) -> Dtypes { + _RawTFEager.restoreV2( + prefix: prefix, tensorNames: tensorNames, shapeAndSlices: shapeAndSlices) + } + + @inlinable @inline(__always) + public static func restrict( + _ a: Tensor + ) -> Tensor { + switch a.handle.backend { + case .XLA: + let output_device = a.device + let a = Tensor(copying: a, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.restrict(a), to: output_device) + case .TF_EAGER: + return _RawTFEager.restrict(a) + } + + } + + @inlinable @inline(__always) + public static func restrict( + _ a: StringTensor + ) -> StringTensor { + _RawTFEager.restrict(a) + } + + /// Retrieve ADAM embedding parameters. + /// + /// An op that retrieves optimization parameters from embedding to host + /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + /// the correct embedding table configuration. For example, this op is + /// used to retrieve updated parameters before saving a checkpoint. + /// + /// - Outputs: + /// - parameters: Parameter parameters updated by the ADAM optimization algorithm. + /// - momenta: Parameter momenta updated by the ADAM optimization algorithm. + /// - velocities: Parameter velocities updated by the ADAM optimization algorithm. + @inlinable @inline(__always) + public static func retrieveTPUEmbeddingADAMParameters( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) -> (parameters: Tensor, momenta: Tensor, velocities: Tensor) { + _RawTFEager.retrieveTPUEmbeddingADAMParameters( + tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, + config: config) + } + + /// Retrieve ADAM embedding parameters with debug support. + /// + /// An op that retrieves optimization parameters from embedding to host + /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + /// the correct embedding table configuration. For example, this op is + /// used to retrieve updated parameters before saving a checkpoint. + /// + /// - Outputs: + /// - parameters: Parameter parameters updated by the ADAM optimization algorithm. + /// - momenta: Parameter momenta updated by the ADAM optimization algorithm. + /// - velocities: Parameter velocities updated by the ADAM optimization algorithm. + /// - gradient_accumulators: Parameter gradient_accumulators updated by the ADAM optimization algorithm. + @inlinable @inline(__always) + public static func retrieveTPUEmbeddingADAMParametersGradAccumDebug( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) -> ( + parameters: Tensor, momenta: Tensor, velocities: Tensor, + gradientAccumulators: Tensor + ) { + _RawTFEager.retrieveTPUEmbeddingADAMParametersGradAccumDebug( + tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, + config: config) + } + + /// Retrieve Adadelta embedding parameters. + /// + /// An op that retrieves optimization parameters from embedding to host + /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + /// the correct embedding table configuration. For example, this op is + /// used to retrieve updated parameters before saving a checkpoint. + /// + /// - Outputs: + /// - parameters: Parameter parameters updated by the Adadelta optimization algorithm. + /// - accumulators: Parameter accumulators updated by the Adadelta optimization algorithm. + /// - updates: Parameter updates updated by the Adadelta optimization algorithm. + @inlinable @inline(__always) + public static func retrieveTPUEmbeddingAdadeltaParameters( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) -> (parameters: Tensor, accumulators: Tensor, updates: Tensor) { + _RawTFEager.retrieveTPUEmbeddingAdadeltaParameters( + tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, + config: config) + } + + /// Retrieve Adadelta embedding parameters with debug support. + /// + /// An op that retrieves optimization parameters from embedding to host + /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + /// the correct embedding table configuration. For example, this op is + /// used to retrieve updated parameters before saving a checkpoint. + /// + /// - Outputs: + /// - parameters: Parameter parameters updated by the Adadelta optimization algorithm. + /// - accumulators: Parameter accumulators updated by the Adadelta optimization algorithm. + /// - updates: Parameter updates updated by the Adadelta optimization algorithm. + /// - gradient_accumulators: Parameter gradient_accumulators updated by the Adadelta optimization algorithm. + @inlinable @inline(__always) + public static func retrieveTPUEmbeddingAdadeltaParametersGradAccumDebug( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) -> ( + parameters: Tensor, accumulators: Tensor, updates: Tensor, + gradientAccumulators: Tensor + ) { + _RawTFEager.retrieveTPUEmbeddingAdadeltaParametersGradAccumDebug( + tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, + config: config) + } + + /// Retrieve Adagrad embedding parameters. + /// + /// An op that retrieves optimization parameters from embedding to host + /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + /// the correct embedding table configuration. For example, this op is + /// used to retrieve updated parameters before saving a checkpoint. + /// + /// - Outputs: + /// - parameters: Parameter parameters updated by the Adagrad optimization algorithm. + /// - accumulators: Parameter accumulators updated by the Adagrad optimization algorithm. + @inlinable @inline(__always) + public static func retrieveTPUEmbeddingAdagradParameters( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) -> (parameters: Tensor, accumulators: Tensor) { + _RawTFEager.retrieveTPUEmbeddingAdagradParameters( + tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, + config: config) + } + + /// Retrieve Adagrad embedding parameters with debug support. + /// + /// An op that retrieves optimization parameters from embedding to host + /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + /// the correct embedding table configuration. For example, this op is + /// used to retrieve updated parameters before saving a checkpoint. + /// + /// - Outputs: + /// - parameters: Parameter parameters updated by the Adagrad optimization algorithm. + /// - accumulators: Parameter accumulators updated by the Adagrad optimization algorithm. + /// - gradient_accumulators: Parameter gradient_accumulators updated by the Adagrad optimization algorithm. + @inlinable @inline(__always) + public static func retrieveTPUEmbeddingAdagradParametersGradAccumDebug( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) -> ( + parameters: Tensor, accumulators: Tensor, gradientAccumulators: Tensor + ) { + _RawTFEager.retrieveTPUEmbeddingAdagradParametersGradAccumDebug( + tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, + config: config) + } + + /// Retrieve centered RMSProp embedding parameters. + /// + /// An op that retrieves optimization parameters from embedding to host + /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + /// the correct embedding table configuration. For example, this op is + /// used to retrieve updated parameters before saving a checkpoint. + /// + /// - Outputs: + /// - parameters: Parameter parameters updated by the centered RMSProp optimization algorithm. + /// - ms: Parameter ms updated by the centered RMSProp optimization algorithm. + /// - mom: Parameter mom updated by the centered RMSProp optimization algorithm. + /// - mg: Parameter mg updated by the centered RMSProp optimization algorithm. + @inlinable @inline(__always) + public static func retrieveTPUEmbeddingCenteredRMSPropParameters( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) -> (parameters: Tensor, ms: Tensor, mom: Tensor, mg: Tensor) { + _RawTFEager.retrieveTPUEmbeddingCenteredRMSPropParameters( + tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, + config: config) + } + + /// Retrieve FTRL embedding parameters. + /// + /// An op that retrieves optimization parameters from embedding to host + /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + /// the correct embedding table configuration. For example, this op is + /// used to retrieve updated parameters before saving a checkpoint. + /// + /// - Outputs: + /// - parameters: Parameter parameters updated by the FTRL optimization algorithm. + /// - accumulators: Parameter accumulators updated by the FTRL optimization algorithm. + /// - linears: Parameter linears updated by the FTRL optimization algorithm. + @inlinable @inline(__always) + public static func retrieveTPUEmbeddingFTRLParameters( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) -> (parameters: Tensor, accumulators: Tensor, linears: Tensor) { + _RawTFEager.retrieveTPUEmbeddingFTRLParameters( + tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, + config: config) + } + + /// Retrieve FTRL embedding parameters with debug support. + /// + /// An op that retrieves optimization parameters from embedding to host + /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + /// the correct embedding table configuration. For example, this op is + /// used to retrieve updated parameters before saving a checkpoint. + /// + /// - Outputs: + /// - parameters: Parameter parameters updated by the FTRL optimization algorithm. + /// - accumulators: Parameter accumulators updated by the FTRL optimization algorithm. + /// - linears: Parameter linears updated by the FTRL optimization algorithm. + /// - gradient_accumulators: Parameter gradient_accumulators updated by the FTRL optimization algorithm. + @inlinable @inline(__always) + public static func retrieveTPUEmbeddingFTRLParametersGradAccumDebug( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) -> ( + parameters: Tensor, accumulators: Tensor, linears: Tensor, + gradientAccumulators: Tensor + ) { + _RawTFEager.retrieveTPUEmbeddingFTRLParametersGradAccumDebug( + tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, + config: config) + } + + /// Retrieve MDL Adagrad Light embedding parameters. + /// + /// An op that retrieves optimization parameters from embedding to host + /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + /// the correct embedding table configuration. For example, this op is + /// used to retrieve updated parameters before saving a checkpoint. + /// + /// - Outputs: + /// - parameters: Parameter parameters updated by the MDL Adagrad Light optimization algorithm. + /// - accumulators: Parameter accumulators updated by the MDL Adagrad Light optimization algorithm. + /// - weights: Parameter weights updated by the MDL Adagrad Light optimization algorithm. + /// - benefits: Parameter benefits updated by the MDL Adagrad Light optimization algorithm. + @inlinable @inline(__always) + public static func retrieveTPUEmbeddingMDLAdagradLightParameters( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) -> ( + parameters: Tensor, accumulators: Tensor, weights: Tensor, + benefits: Tensor + ) { + _RawTFEager.retrieveTPUEmbeddingMDLAdagradLightParameters( + tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, + config: config) + } + + /// Retrieve Momentum embedding parameters. + /// + /// An op that retrieves optimization parameters from embedding to host + /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + /// the correct embedding table configuration. For example, this op is + /// used to retrieve updated parameters before saving a checkpoint. + /// + /// - Outputs: + /// - parameters: Parameter parameters updated by the Momentum optimization algorithm. + /// - momenta: Parameter momenta updated by the Momentum optimization algorithm. + @inlinable @inline(__always) + public static func retrieveTPUEmbeddingMomentumParameters( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) -> (parameters: Tensor, momenta: Tensor) { + _RawTFEager.retrieveTPUEmbeddingMomentumParameters( + tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, + config: config) + } + + /// Retrieve Momentum embedding parameters with debug support. + /// + /// An op that retrieves optimization parameters from embedding to host + /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + /// the correct embedding table configuration. For example, this op is + /// used to retrieve updated parameters before saving a checkpoint. + /// + /// - Outputs: + /// - parameters: Parameter parameters updated by the Momentum optimization algorithm. + /// - momenta: Parameter momenta updated by the Momentum optimization algorithm. + /// - gradient_accumulators: Parameter gradient_accumulators updated by the Momentum optimization algorithm. + @inlinable @inline(__always) + public static func retrieveTPUEmbeddingMomentumParametersGradAccumDebug( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) -> (parameters: Tensor, momenta: Tensor, gradientAccumulators: Tensor) { + _RawTFEager.retrieveTPUEmbeddingMomentumParametersGradAccumDebug( + tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, + config: config) + } + + /// Retrieve proximal Adagrad embedding parameters. + /// + /// An op that retrieves optimization parameters from embedding to host + /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + /// the correct embedding table configuration. For example, this op is + /// used to retrieve updated parameters before saving a checkpoint. + /// + /// - Outputs: + /// - parameters: Parameter parameters updated by the proximal Adagrad optimization algorithm. + /// - accumulators: Parameter accumulators updated by the proximal Adagrad optimization algorithm. + @inlinable @inline(__always) + public static func retrieveTPUEmbeddingProximalAdagradParameters( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) -> (parameters: Tensor, accumulators: Tensor) { + _RawTFEager.retrieveTPUEmbeddingProximalAdagradParameters( + tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, + config: config) + } + + /// Retrieve proximal Adagrad embedding parameters with debug support. + /// + /// An op that retrieves optimization parameters from embedding to host + /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + /// the correct embedding table configuration. For example, this op is + /// used to retrieve updated parameters before saving a checkpoint. + /// + /// - Outputs: + /// - parameters: Parameter parameters updated by the proximal Adagrad optimization algorithm. + /// - accumulators: Parameter accumulators updated by the proximal Adagrad optimization algorithm. + /// - gradient_accumulators: Parameter gradient_accumulators updated by the proximal Adagrad optimization algorithm. + @inlinable @inline(__always) + public static func retrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) -> ( + parameters: Tensor, accumulators: Tensor, gradientAccumulators: Tensor + ) { + _RawTFEager.retrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug( + tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, + config: config) + } + + /// Retrieve RMSProp embedding parameters. + /// + /// An op that retrieves optimization parameters from embedding to host + /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + /// the correct embedding table configuration. For example, this op is + /// used to retrieve updated parameters before saving a checkpoint. + /// + /// - Outputs: + /// - parameters: Parameter parameters updated by the RMSProp optimization algorithm. + /// - ms: Parameter ms updated by the RMSProp optimization algorithm. + /// - mom: Parameter mom updated by the RMSProp optimization algorithm. + @inlinable @inline(__always) + public static func retrieveTPUEmbeddingRMSPropParameters( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) -> (parameters: Tensor, ms: Tensor, mom: Tensor) { + _RawTFEager.retrieveTPUEmbeddingRMSPropParameters( + tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, + config: config) + } + + /// Retrieve RMSProp embedding parameters with debug support. + /// + /// An op that retrieves optimization parameters from embedding to host + /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + /// the correct embedding table configuration. For example, this op is + /// used to retrieve updated parameters before saving a checkpoint. + /// + /// - Outputs: + /// - parameters: Parameter parameters updated by the RMSProp optimization algorithm. + /// - ms: Parameter ms updated by the RMSProp optimization algorithm. + /// - mom: Parameter mom updated by the RMSProp optimization algorithm. + /// - gradient_accumulators: Parameter gradient_accumulators updated by the RMSProp optimization algorithm. + @inlinable @inline(__always) + public static func retrieveTPUEmbeddingRMSPropParametersGradAccumDebug( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) -> ( + parameters: Tensor, ms: Tensor, mom: Tensor, + gradientAccumulators: Tensor + ) { + _RawTFEager.retrieveTPUEmbeddingRMSPropParametersGradAccumDebug( + tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, + config: config) + } + + /// Retrieve SGD embedding parameters. + /// + /// An op that retrieves optimization parameters from embedding to host + /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up + /// the correct embedding table configuration. For example, this op is + /// used to retrieve updated parameters before saving a checkpoint. + /// + /// - Output parameters: Parameter parameters updated by the stochastic gradient descent optimization algorithm. + @inlinable @inline(__always) + public static func retrieveTPUEmbeddingStochasticGradientDescentParameters( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64, + config: String + ) -> Tensor { + _RawTFEager.retrieveTPUEmbeddingStochasticGradientDescentParameters( + tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, + config: config) + } + + /// Reverses specific dimensions of a tensor. + /// + /// Given a `tensor`, and a `bool` tensor `dims` representing the dimensions + /// of `tensor`, this operation reverses each dimension i of `tensor` where + /// `dims[i]` is `True`. + /// + /// `tensor` can have up to 8 dimensions. The number of dimensions + /// of `tensor` must equal the number of elements in `dims`. In other words: + /// + /// `rank(tensor) = size(dims)` + /// + /// For example: + /// + /// ``` + /// # tensor 't' is [[[[ 0, 1, 2, 3], + /// # [ 4, 5, 6, 7], + /// # [ 8, 9, 10, 11]], + /// # [[12, 13, 14, 15], + /// # [16, 17, 18, 19], + /// # [20, 21, 22, 23]]]] + /// # tensor 't' shape is [1, 2, 3, 4] + /// + /// # 'dims' is [False, False, False, True] + /// reverse(t, dims) ==> [[[[ 3, 2, 1, 0], + /// [ 7, 6, 5, 4], + /// [ 11, 10, 9, 8]], + /// [[15, 14, 13, 12], + /// [19, 18, 17, 16], + /// [23, 22, 21, 20]]]] + /// + /// # 'dims' is [False, True, False, False] + /// reverse(t, dims) ==> [[[[12, 13, 14, 15], + /// [16, 17, 18, 19], + /// [20, 21, 22, 23] + /// [[ 0, 1, 2, 3], + /// [ 4, 5, 6, 7], + /// [ 8, 9, 10, 11]]]] + /// + /// # 'dims' is [False, False, True, False] + /// reverse(t, dims) ==> [[[[8, 9, 10, 11], + /// [4, 5, 6, 7], + /// [0, 1, 2, 3]] + /// [[20, 21, 22, 23], + /// [16, 17, 18, 19], + /// [12, 13, 14, 15]]]] + /// ``` + /// + /// - Parameters: + /// - tensor: Up to 8-D. + /// - dims: 1-D. The dimensions to reverse. + /// + /// - Output output: The same shape as `tensor`. + @inlinable @inline(__always) + public static func reverse( + _ tensor: Tensor, + dims: Tensor + ) -> Tensor { + switch commonBackend(tensor.handle.backend, dims.handle.backend) { + case .XLA: + let output_device = dims.device + let tensor = Tensor(copying: tensor, to: .defaultTFEager) + let dims = Tensor(copying: dims, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.reverse(tensor, dims: dims), to: output_device) + case .TF_EAGER: + return _RawTFEager.reverse(tensor, dims: dims) + } + + } + + /// Reverses specific dimensions of a tensor. + /// + /// Given a `tensor`, and a `bool` tensor `dims` representing the dimensions + /// of `tensor`, this operation reverses each dimension i of `tensor` where + /// `dims[i]` is `True`. + /// + /// `tensor` can have up to 8 dimensions. The number of dimensions + /// of `tensor` must equal the number of elements in `dims`. In other words: + /// + /// `rank(tensor) = size(dims)` + /// + /// For example: + /// + /// ``` + /// # tensor 't' is [[[[ 0, 1, 2, 3], + /// # [ 4, 5, 6, 7], + /// # [ 8, 9, 10, 11]], + /// # [[12, 13, 14, 15], + /// # [16, 17, 18, 19], + /// # [20, 21, 22, 23]]]] + /// # tensor 't' shape is [1, 2, 3, 4] + /// + /// # 'dims' is [False, False, False, True] + /// reverse(t, dims) ==> [[[[ 3, 2, 1, 0], + /// [ 7, 6, 5, 4], + /// [ 11, 10, 9, 8]], + /// [[15, 14, 13, 12], + /// [19, 18, 17, 16], + /// [23, 22, 21, 20]]]] + /// + /// # 'dims' is [False, True, False, False] + /// reverse(t, dims) ==> [[[[12, 13, 14, 15], + /// [16, 17, 18, 19], + /// [20, 21, 22, 23] + /// [[ 0, 1, 2, 3], + /// [ 4, 5, 6, 7], + /// [ 8, 9, 10, 11]]]] + /// + /// # 'dims' is [False, False, True, False] + /// reverse(t, dims) ==> [[[[8, 9, 10, 11], + /// [4, 5, 6, 7], + /// [0, 1, 2, 3]] + /// [[20, 21, 22, 23], + /// [16, 17, 18, 19], + /// [12, 13, 14, 15]]]] + /// ``` + /// + /// - Parameters: + /// - tensor: Up to 8-D. + /// - dims: 1-D. The dimensions to reverse. + /// + /// - Output output: The same shape as `tensor`. + @inlinable @inline(__always) + public static func reverse( + _ tensor: StringTensor, + dims: Tensor + ) -> StringTensor { + _RawTFEager.reverse(tensor, dims: dims) + } + + /// Reverses variable length slices. + /// + /// This op first slices `input` along the dimension `batch_dim`, and for each + /// slice `i`, reverses the first `seq_lengths[i]` elements along + /// the dimension `seq_dim`. + /// + /// The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`, + /// and `seq_lengths` must be a vector of length `input.dims[batch_dim]`. + /// + /// The output slice `i` along dimension `batch_dim` is then given by input + /// slice `i`, with the first `seq_lengths[i]` slices along dimension + /// `seq_dim` reversed. + /// + /// For example: + /// + /// ``` + /// # Given this: + /// batch_dim = 0 + /// seq_dim = 1 + /// input.dims = (4, 8, ...) + /// seq_lengths = [7, 2, 3, 5] + /// + /// # then slices of input are reversed on seq_dim, but only up to seq_lengths: + /// output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...] + /// output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...] + /// output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...] + /// output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...] + /// + /// # while entries past seq_lens are copied through: + /// output[0, 7:, :, ...] = input[0, 7:, :, ...] + /// output[1, 2:, :, ...] = input[1, 2:, :, ...] + /// output[2, 3:, :, ...] = input[2, 3:, :, ...] + /// output[3, 2:, :, ...] = input[3, 2:, :, ...] + /// ``` + /// + /// In contrast, if: + /// + /// ``` + /// # Given this: + /// batch_dim = 2 + /// seq_dim = 0 + /// input.dims = (8, ?, 4, ...) + /// seq_lengths = [7, 2, 3, 5] + /// + /// # then slices of input are reversed on seq_dim, but only up to seq_lengths: + /// output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...] + /// output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...] + /// output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...] + /// output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...] + /// + /// # while entries past seq_lens are copied through: + /// output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...] + /// output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...] + /// output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...] + /// output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] + /// ``` + /// + /// - Parameters: + /// - input: The input to reverse. + /// - seq_lengths: 1-D with length `input.dims(batch_dim)` and + /// `max(seq_lengths) <= input.dims(seq_dim)` + /// + /// - Attrs: + /// - seq_dim: The dimension which is partially reversed. + /// - batch_dim: The dimension along which reversal is performed. + /// + /// - Output output: The partially reversed input. It has the same shape as `input`. + @inlinable @inline(__always) + public static func reverseSequence< + T: TensorFlowScalar, + Tlen: TensorFlowIndex + >( + _ input: Tensor, + seqLengths: Tensor, + seqDim: Int64, + batchDim: Int64 = 0 + ) -> Tensor { + switch commonBackend(input.handle.backend, seqLengths.handle.backend) { + case .XLA: + let output_device = seqLengths.device + let input = Tensor(copying: input, to: .defaultTFEager) + let seqLengths = Tensor(copying: seqLengths, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.reverseSequence( + input, seqLengths: seqLengths, seqDim: seqDim, batchDim: batchDim), to: output_device) + case .TF_EAGER: + return _RawTFEager.reverseSequence( + input, seqLengths: seqLengths, seqDim: seqDim, batchDim: batchDim) + } + + } + + /// Reverses specific dimensions of a tensor. + /// + /// NOTE `tf.reverse` has now changed behavior in preparation for 1.0. + /// `tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0. + /// + /// Given a `tensor`, and a `int32` tensor `axis` representing the set of + /// dimensions of `tensor` to reverse. This operation reverses each dimension + /// `i` for which there exists `j` s.t. `axis[j] == i`. + /// + /// `tensor` can have up to 8 dimensions. The number of dimensions specified + /// in `axis` may be 0 or more entries. If an index is specified more than + /// once, a InvalidArgument error is raised. + /// + /// For example: + /// + /// ``` + /// # tensor 't' is [[[[ 0, 1, 2, 3], + /// # [ 4, 5, 6, 7], + /// # [ 8, 9, 10, 11]], + /// # [[12, 13, 14, 15], + /// # [16, 17, 18, 19], + /// # [20, 21, 22, 23]]]] + /// # tensor 't' shape is [1, 2, 3, 4] + /// + /// # 'dims' is [3] or 'dims' is [-1] + /// reverse(t, dims) ==> [[[[ 3, 2, 1, 0], + /// [ 7, 6, 5, 4], + /// [ 11, 10, 9, 8]], + /// [[15, 14, 13, 12], + /// [19, 18, 17, 16], + /// [23, 22, 21, 20]]]] + /// + /// # 'dims' is '[1]' (or 'dims' is '[-3]') + /// reverse(t, dims) ==> [[[[12, 13, 14, 15], + /// [16, 17, 18, 19], + /// [20, 21, 22, 23] + /// [[ 0, 1, 2, 3], + /// [ 4, 5, 6, 7], + /// [ 8, 9, 10, 11]]]] + /// + /// # 'dims' is '[2]' (or 'dims' is '[-2]') + /// reverse(t, dims) ==> [[[[8, 9, 10, 11], + /// [4, 5, 6, 7], + /// [0, 1, 2, 3]] + /// [[20, 21, 22, 23], + /// [16, 17, 18, 19], + /// [12, 13, 14, 15]]]] + /// ``` + /// + /// - Parameters: + /// - tensor: Up to 8-D. + /// - axis: 1-D. The indices of the dimensions to reverse. Must be in the range + /// `[-rank(tensor), rank(tensor))`. + /// + /// - Output output: The same shape as `tensor`. + @inlinable @inline(__always) + public static func reverseV2< + Tidx: TensorFlowIndex, + T: TensorFlowScalar + >( + _ tensor: Tensor, + axis: Tensor + ) -> Tensor { + switch commonBackend(tensor.handle.backend, axis.handle.backend) { + case .XLA: + return _RawXLA.reverseV2(tensor, axis: axis) + case .TF_EAGER: + return _RawTFEager.reverseV2(tensor, axis: axis) + } + + } + + /// Reverses specific dimensions of a tensor. + /// + /// NOTE `tf.reverse` has now changed behavior in preparation for 1.0. + /// `tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0. + /// + /// Given a `tensor`, and a `int32` tensor `axis` representing the set of + /// dimensions of `tensor` to reverse. This operation reverses each dimension + /// `i` for which there exists `j` s.t. `axis[j] == i`. + /// + /// `tensor` can have up to 8 dimensions. The number of dimensions specified + /// in `axis` may be 0 or more entries. If an index is specified more than + /// once, a InvalidArgument error is raised. + /// + /// For example: + /// + /// ``` + /// # tensor 't' is [[[[ 0, 1, 2, 3], + /// # [ 4, 5, 6, 7], + /// # [ 8, 9, 10, 11]], + /// # [[12, 13, 14, 15], + /// # [16, 17, 18, 19], + /// # [20, 21, 22, 23]]]] + /// # tensor 't' shape is [1, 2, 3, 4] + /// + /// # 'dims' is [3] or 'dims' is [-1] + /// reverse(t, dims) ==> [[[[ 3, 2, 1, 0], + /// [ 7, 6, 5, 4], + /// [ 11, 10, 9, 8]], + /// [[15, 14, 13, 12], + /// [19, 18, 17, 16], + /// [23, 22, 21, 20]]]] + /// + /// # 'dims' is '[1]' (or 'dims' is '[-3]') + /// reverse(t, dims) ==> [[[[12, 13, 14, 15], + /// [16, 17, 18, 19], + /// [20, 21, 22, 23] + /// [[ 0, 1, 2, 3], + /// [ 4, 5, 6, 7], + /// [ 8, 9, 10, 11]]]] + /// + /// # 'dims' is '[2]' (or 'dims' is '[-2]') + /// reverse(t, dims) ==> [[[[8, 9, 10, 11], + /// [4, 5, 6, 7], + /// [0, 1, 2, 3]] + /// [[20, 21, 22, 23], + /// [16, 17, 18, 19], + /// [12, 13, 14, 15]]]] + /// ``` + /// + /// - Parameters: + /// - tensor: Up to 8-D. + /// - axis: 1-D. The indices of the dimensions to reverse. Must be in the range + /// `[-rank(tensor), rank(tensor))`. + /// + /// - Output output: The same shape as `tensor`. + @inlinable @inline(__always) + public static func reverseV2( + _ tensor: StringTensor, + axis: Tensor + ) -> StringTensor { + _RawTFEager.reverseV2(tensor, axis: axis) + } + + /// Elementwise computes the bitwise right-shift of `x` and `y`. + /// + /// Performs a logical shift for unsigned integer types, and an arithmetic shift + /// for signed integer types. + /// + /// If `y` is negative, or greater than or equal to than the width of `x` in bits + /// the result is implementation defined. + /// + /// Example: + /// + /// ```python + /// import tensorflow as tf + /// from tensorflow.python.ops import bitwise_ops + /// import numpy as np + /// dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64] + /// + /// for dtype in dtype_list: + /// lhs = tf.constant([-1, -5, -3, -14], dtype=dtype) + /// rhs = tf.constant([5, 0, 7, 11], dtype=dtype) + /// + /// right_shift_result = bitwise_ops.right_shift(lhs, rhs) + /// + /// print(right_shift_result) + /// + /// # This will print: + /// # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int8) + /// # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int16) + /// # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int32) + /// # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int64) + /// + /// lhs = np.array([-2, 64, 101, 32], dtype=np.int8) + /// rhs = np.array([-1, -5, -3, -14], dtype=np.int8) + /// bitwise_ops.right_shift(lhs, rhs) + /// # + /// ``` + /// + @inlinable @inline(__always) + public static func rightShift( + _ x: Tensor, + _ y: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + let output_device = y.device + let x = Tensor(copying: x, to: .defaultTFEager) + let y = Tensor(copying: y, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.rightShift(x, y), to: output_device) + case .TF_EAGER: + return _RawTFEager.rightShift(x, y) + } + + } + + /// Returns element-wise integer closest to x. + /// + /// If the result is midway between two representable values, + /// the even representable is chosen. + /// For example: + /// + /// ``` + /// rint(-1.5) ==> -2.0 + /// rint(0.5000001) ==> 1.0 + /// rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.] + /// ``` + @inlinable @inline(__always) + public static func rint( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + let output_device = x.device + let x = Tensor(copying: x, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.rint(x), to: output_device) + case .TF_EAGER: + return _RawTFEager.rint(x) + } + + } + + /// Advance the counter of a counter-based RNG. + /// + /// The state of the RNG after + /// `rng_skip(n)` will be the same as that after `stateful_uniform([n])` + /// (or any other distribution). The actual increment added to the + /// counter is an unspecified implementation detail. + /// + /// - Parameters: + /// - resource: The handle of the resource variable that stores the state of the RNG. + /// - algorithm: The RNG algorithm. + /// - delta: The amount of advancement. + @inlinable @inline(__always) + public static func rngSkip( + resource: ResourceHandle, + algorithm: Tensor, + delta: Tensor + ) { + _RawTFEager.rngSkip(resource: resource, algorithm: algorithm, delta: delta) + } + + /// Rolls the elements of a tensor along an axis. + /// + /// The elements are shifted positively (towards larger indices) by the offset of + /// `shift` along the dimension of `axis`. Negative `shift` values will shift + /// elements in the opposite direction. Elements that roll passed the last position + /// will wrap around to the first and vice versa. Multiple shifts along multiple + /// axes may be specified. + /// + /// For example: + /// + /// ``` + /// # 't' is [0, 1, 2, 3, 4] + /// roll(t, shift=2, axis=0) ==> [3, 4, 0, 1, 2] + /// + /// # shifting along multiple dimensions + /// # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] + /// roll(t, shift=[1, -2], axis=[0, 1]) ==> [[7, 8, 9, 5, 6], [2, 3, 4, 0, 1]] + /// + /// # shifting along the same axis multiple times + /// # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] + /// roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]] + /// ``` + /// + /// - Parameters: + /// - shift: Dimension must be 0-D or 1-D. `shift[i]` specifies the number of places by which + /// elements are shifted positively (towards larger indices) along the dimension + /// specified by `axis[i]`. Negative shifts will roll the elements in the opposite + /// direction. + /// - axis: Dimension must be 0-D or 1-D. `axis[i]` specifies the dimension that the shift + /// `shift[i]` should occur. If the same axis is referenced more than once, the + /// total shift for that axis will be the sum of all the shifts that belong to that + /// axis. + /// + /// - Output output: Has the same shape and size as the input. The elements are shifted + /// positively (towards larger indices) by the offsets of `shift` along the + /// dimensions of `axis`. + @inlinable @inline(__always) + public static func roll< + T: TensorFlowScalar, + Tshift: TensorFlowIndex, + Taxis: TensorFlowIndex + >( + _ input: Tensor, + shift: Tensor, + axis: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(input.handle.backend, shift.handle.backend), axis.handle.backend) + { + case .XLA: + let output_device = axis.device + let input = Tensor(copying: input, to: .defaultTFEager) + let shift = Tensor(copying: shift, to: .defaultTFEager) + let axis = Tensor(copying: axis, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.roll(input, shift: shift, axis: axis), to: output_device) + case .TF_EAGER: + return _RawTFEager.roll(input, shift: shift, axis: axis) + } + + } + + /// Rounds the values of a tensor to the nearest integer, element-wise. + /// + /// Rounds half to even. Also known as bankers rounding. If you want to round + /// according to the current system rounding mode use std::cint. + @inlinable @inline(__always) + public static func round( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.round(x) + case .TF_EAGER: + return _RawTFEager.round(x) + } + + } + + /// Perform batches of RPC requests. + /// + /// This op asynchronously performs either a single RPC request, or a batch + /// of requests. RPC requests are defined by three main parameters: + /// + /// - `address` (the host+port or BNS address of the request) + /// - `method` (the RPC method name for the request) + /// - `request` (the serialized proto string, or vector of strings, + /// of the RPC request argument). + /// + /// For example, if you have an RPC service running on port localhost:2345, + /// and its interface is configured with the following proto declaration: + /// + /// ``` + /// service MyService { + /// rpc MyMethod(MyRequestProto) returns (MyResponseProto) { + /// } + /// }; + /// ``` + /// + /// then call this op with arguments: + /// + /// ``` + /// address = "localhost:2345" + /// method = "MyService/MyMethod" + /// ``` + /// + /// The `request` tensor is a string tensor representing serialized `MyRequestProto` + /// strings; and the output string tensor `response` will have the same shape + /// and contain (upon successful completion) corresponding serialized + /// `MyResponseProto` strings. + /// + /// For example, to send a single, empty, `MyRequestProto`, call + /// this op with `request = ""`. To send 5 **parallel** empty requests, + /// call this op with `request = ["", "", "", "", ""]`. + /// + /// More generally, one can create a batch of `MyRequestProto` serialized protos + /// from regular batched tensors using the `encode_proto` op, and convert + /// the response `MyResponseProto` serialized protos to batched tensors + /// using the `decode_proto` op. + /// + /// **NOTE** Working with serialized proto strings is faster than instantiating + /// actual proto objects in memory, so no performance degradation is expected + /// compared to writing custom kernels for this workflow. + /// + /// If the connection fails or the remote worker returns an error + /// status, the op reraises this exception locally. + /// + /// See the `TryRpc` op if you prefer to handle RPC failures manually in the graph. + /// + /// - Parameters: + /// - address: `0-D` or `1-D`. The address (i.e. host_name:port) of the RPC server. + /// If this tensor has more than 1 element, then multiple parallel rpc requests + /// are sent. This argument broadcasts with `method` and `request`. + /// - method: `0-D` or `1-D`. The method address on the RPC server. + /// If this tensor has more than 1 element, then multiple parallel rpc requests + /// are sent. This argument broadcasts with `address` and `request`. + /// - request: `0-D` or `1-D`. Serialized proto strings: the rpc request argument. + /// If this tensor has more than 1 element, then multiple parallel rpc requests + /// are sent. This argument broadcasts with `address` and `method`. + /// + /// - Attrs: + /// - protocol: RPC protocol to use. Empty string means use the default protocol. + /// Options include 'grpc'. + /// - fail_fast: `boolean`. If `true` (default), then failures to connect + /// (i.e., the server does not immediately respond) cause an RPC failure. + /// - timeout_in_ms: `int`. If `0` (default), then the kernel will run the RPC + /// request and only time out if the RPC deadline passes or the session times out. + /// If this value is greater than `0`, then the op will raise an exception if + /// the RPC takes longer than `timeout_in_ms`. + /// + /// - Output response: Same shape as `request`. Serialized proto strings: the rpc responses. + @inlinable @inline(__always) + public static func rpc( + address: StringTensor, + method: StringTensor, + request: StringTensor, + protocol_: String, + failFast: Bool = true, + timeoutInMs: Int64 = 0 + ) -> StringTensor { + _RawTFEager.rpc( + address: address, method: method, request: request, protocol_: protocol_, + failFast: failFast, timeoutInMs: timeoutInMs) + } + + /// Computes reciprocal of square root of x element-wise. + /// + /// I.e., \\(y = 1 / \sqrt{x}\\). + @inlinable @inline(__always) + public static func rsqrt( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.rsqrt(x) + case .TF_EAGER: + return _RawTFEager.rsqrt(x) + } + + } + + /// Computes the gradient for the rsqrt of `x` wrt its input. + /// + /// Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy` + /// is the corresponding input gradient. + @inlinable @inline(__always) + public static func rsqrtGrad( + _ y: Tensor, + dy: Tensor + ) -> Tensor { + switch commonBackend(y.handle.backend, dy.handle.backend) { + case .XLA: + return _RawXLA.rsqrtGrad(y, dy: dy) + case .TF_EAGER: + return _RawTFEager.rsqrtGrad(y, dy: dy) + } + + } + + /// Generate a single randomly distorted bounding box for an image. + /// + /// Bounding box annotations are often supplied in addition to ground-truth labels + /// in image recognition or object localization tasks. A common technique for + /// training such a system is to randomly distort an image while preserving + /// its content, i.e. *data augmentation*. This Op outputs a randomly distorted + /// localization of an object, i.e. bounding box, given an `image_size`, + /// `bounding_boxes` and a series of constraints. + /// + /// The output of this Op is a single bounding box that may be used to crop the + /// original image. The output is returned as 3 tensors: `begin`, `size` and + /// `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the + /// image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize + /// what the bounding box looks like. + /// + /// Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The + /// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and + /// height of the underlying image. + /// + /// For example, + /// + /// ```python + /// # Generate a single distorted bounding box. + /// begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box( + /// tf.shape(image), + /// bounding_boxes=bounding_boxes) + /// + /// # Draw the bounding box in an image summary. + /// image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), + /// bbox_for_draw) + /// tf.summary.image('images_with_box', image_with_box) + /// + /// # Employ the bounding box to distort the image. + /// distorted_image = tf.slice(image, begin, size) + /// ``` + /// + /// Note that if no bounding box information is available, setting + /// `use_image_if_no_bounding_boxes = true` will assume there is a single implicit + /// bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is + /// false and no bounding boxes are supplied, an error is raised. + /// + /// - Parameters: + /// - image_size: 1-D, containing `[height, width, channels]`. + /// - bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes + /// associated with the image. + /// + /// - Attrs: + /// - seed: If either `seed` or `seed2` are set to non-zero, the random number + /// generator is seeded by the given `seed`. Otherwise, it is seeded by a random + /// seed. + /// - seed2: A second seed to avoid seed collision. + /// - min_object_covered: The cropped area of the image must contain at least this + /// fraction of any bounding box supplied. The value of this parameter should be + /// non-negative. In the case of 0, the cropped area does not need to overlap + /// any of the bounding boxes supplied. + /// - aspect_ratio_range: The cropped area of the image must have an aspect ratio = + /// width / height within this range. + /// - area_range: The cropped area of the image must contain a fraction of the + /// supplied image within this range. + /// - max_attempts: Number of attempts at generating a cropped region of the image + /// of the specified constraints. After `max_attempts` failures, return the entire + /// image. + /// - use_image_if_no_bounding_boxes: Controls behavior if no bounding boxes supplied. + /// If true, assume an implicit bounding box covering the whole input. If false, + /// raise an error. + /// + /// - Outputs: + /// - begin: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to + /// `tf.slice`. + /// - size: 1-D, containing `[target_height, target_width, -1]`. Provide as input to + /// `tf.slice`. + /// - bboxes: 3-D with shape `[1, 1, 4]` containing the distorted bounding box. + /// Provide as input to `tf.image.draw_bounding_boxes`. + @inlinable @inline(__always) + public static func sampleDistortedBoundingBox( + imageSize: Tensor, + boundingBoxes: Tensor, + seed: Int64 = 0, + seed2: Int64 = 0, + minObjectCovered: Double = 0.1, + aspectRatioRange: [Double] = [0.75, 1.33], + areaRange: [Double] = [0.05, 1], + maxAttempts: Int64 = 100, + useImageIfNoBoundingBoxes: Bool = false + ) -> (begin: Tensor, size: Tensor, bboxes: Tensor) { + _RawTFEager.sampleDistortedBoundingBox( + imageSize: imageSize, boundingBoxes: boundingBoxes, seed: seed, seed2: seed2, + minObjectCovered: minObjectCovered, aspectRatioRange: aspectRatioRange, + areaRange: areaRange, maxAttempts: maxAttempts, + useImageIfNoBoundingBoxes: useImageIfNoBoundingBoxes) + } + + /// Generate a single randomly distorted bounding box for an image. + /// + /// Bounding box annotations are often supplied in addition to ground-truth labels + /// in image recognition or object localization tasks. A common technique for + /// training such a system is to randomly distort an image while preserving + /// its content, i.e. *data augmentation*. This Op outputs a randomly distorted + /// localization of an object, i.e. bounding box, given an `image_size`, + /// `bounding_boxes` and a series of constraints. + /// + /// The output of this Op is a single bounding box that may be used to crop the + /// original image. The output is returned as 3 tensors: `begin`, `size` and + /// `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the + /// image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize + /// what the bounding box looks like. + /// + /// Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The + /// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and + /// height of the underlying image. + /// + /// For example, + /// + /// ```python + /// # Generate a single distorted bounding box. + /// begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box( + /// tf.shape(image), + /// bounding_boxes=bounding_boxes) + /// + /// # Draw the bounding box in an image summary. + /// image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), + /// bbox_for_draw) + /// tf.summary.image('images_with_box', image_with_box) + /// + /// # Employ the bounding box to distort the image. + /// distorted_image = tf.slice(image, begin, size) + /// ``` + /// + /// Note that if no bounding box information is available, setting + /// `use_image_if_no_bounding_boxes = true` will assume there is a single implicit + /// bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is + /// false and no bounding boxes are supplied, an error is raised. + /// + /// - Parameters: + /// - image_size: 1-D, containing `[height, width, channels]`. + /// - bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes + /// associated with the image. + /// - min_object_covered: The cropped area of the image must contain at least this + /// fraction of any bounding box supplied. The value of this parameter should be + /// non-negative. In the case of 0, the cropped area does not need to overlap + /// any of the bounding boxes supplied. + /// + /// - Attrs: + /// - seed: If either `seed` or `seed2` are set to non-zero, the random number + /// generator is seeded by the given `seed`. Otherwise, it is seeded by a random + /// seed. + /// - seed2: A second seed to avoid seed collision. + /// - aspect_ratio_range: The cropped area of the image must have an aspect ratio = + /// width / height within this range. + /// - area_range: The cropped area of the image must contain a fraction of the + /// supplied image within this range. + /// - max_attempts: Number of attempts at generating a cropped region of the image + /// of the specified constraints. After `max_attempts` failures, return the entire + /// image. + /// - use_image_if_no_bounding_boxes: Controls behavior if no bounding boxes supplied. + /// If true, assume an implicit bounding box covering the whole input. If false, + /// raise an error. + /// + /// - Outputs: + /// - begin: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to + /// `tf.slice`. + /// - size: 1-D, containing `[target_height, target_width, -1]`. Provide as input to + /// `tf.slice`. + /// - bboxes: 3-D with shape `[1, 1, 4]` containing the distorted bounding box. + /// Provide as input to `tf.image.draw_bounding_boxes`. + @inlinable @inline(__always) + public static func sampleDistortedBoundingBoxV2( + imageSize: Tensor, + boundingBoxes: Tensor, + minObjectCovered: Tensor, + seed: Int64 = 0, + seed2: Int64 = 0, + aspectRatioRange: [Double] = [0.75, 1.33], + areaRange: [Double] = [0.05, 1], + maxAttempts: Int64 = 100, + useImageIfNoBoundingBoxes: Bool = false + ) -> (begin: Tensor, size: Tensor, bboxes: Tensor) { + _RawTFEager.sampleDistortedBoundingBoxV2( + imageSize: imageSize, boundingBoxes: boundingBoxes, minObjectCovered: minObjectCovered, + seed: seed, seed2: seed2, aspectRatioRange: aspectRatioRange, areaRange: areaRange, + maxAttempts: maxAttempts, useImageIfNoBoundingBoxes: useImageIfNoBoundingBoxes) + } + + /// Creates a dataset that takes a Bernoulli sample of the contents of another dataset. + /// + /// There is no transformation in the `tf.data` Python API for creating this dataset. + /// Instead, it is created as a result of the `filter_with_random_uniform_fusion` + /// static optimization. Whether this optimization is performed is determined by the + /// `experimental_optimization.filter_with_random_uniform_fusion` option of + /// `tf.data.Options`. + /// + /// - Parameters: + /// - rate: A scalar representing the sample rate. Each element of `input_dataset` is + /// retained with this probability, independent of all other elements. + /// - seed: A scalar representing seed of random number generator. + /// - seed2: A scalar representing seed2 of random number generator. + @inlinable @inline(__always) + public static func samplingDataset( + inputDataset: VariantHandle, + rate: Tensor, + seed: Tensor, + seed2: Tensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.samplingDataset( + inputDataset: inputDataset, rate: rate, seed: seed, seed2: seed2, outputTypes: outputTypes, + outputShapes: outputShapes) + } + + /// Saves the input tensors to disk. + /// + /// The size of `tensor_names` must match the number of tensors in `data`. `data[i]` + /// is written to `filename` with name `tensor_names[i]`. + /// + /// See also `SaveSlices`. + /// + /// - Parameters: + /// - filename: Must have a single element. The name of the file to which we write + /// the tensor. + /// - tensor_names: Shape `[N]`. The names of the tensors to be saved. + /// - data: `N` tensors to save. + @inlinable @inline(__always) + public static func save( + filename: StringTensor, + tensorNames: StringTensor, + data: T + ) { + _RawTFEager.save(filename: filename, tensorNames: tensorNames, data: data) + } + + /// Saves input tensors slices to disk. + /// + /// This is like `Save` except that tensors can be listed in the saved file as being + /// a slice of a larger tensor. `shapes_and_slices` specifies the shape of the + /// larger tensor and the slice that this tensor covers. `shapes_and_slices` must + /// have as many elements as `tensor_names`. + /// + /// Elements of the `shapes_and_slices` input must either be: + /// + /// * The empty string, in which case the corresponding tensor is + /// saved normally. + /// * A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the + /// `dimI` are the dimensions of the larger tensor and `slice-spec` + /// specifies what part is covered by the tensor to save. + /// + /// `slice-spec` itself is a `:`-separated list: `slice0:slice1:...:sliceN-1` + /// where each `sliceI` is either: + /// + /// * The string `-` meaning that the slice covers all indices of this dimension + /// * `start,length` where `start` and `length` are integers. In that + /// case the slice covers `length` indices starting at `start`. + /// + /// See also `Save`. + /// + /// - Parameters: + /// - filename: Must have a single element. The name of the file to which we write the + /// tensor. + /// - tensor_names: Shape `[N]`. The names of the tensors to be saved. + /// - shapes_and_slices: Shape `[N]`. The shapes and slice specifications to use when + /// saving the tensors. + /// - data: `N` tensors to save. + @inlinable @inline(__always) + public static func saveSlices( + filename: StringTensor, + tensorNames: StringTensor, + shapesAndSlices: StringTensor, + data: T + ) { + _RawTFEager.saveSlices( + filename: filename, tensorNames: tensorNames, shapesAndSlices: shapesAndSlices, data: data) + } + + /// Saves tensors in V2 checkpoint format. + /// + /// By default, saves the named tensors in full. If the caller wishes to save + /// specific slices of full tensors, "shape_and_slices" should be non-empty strings + /// and correspondingly well-formed. + /// + /// - Parameters: + /// - prefix: Must have a single element. The prefix of the V2 checkpoint to which we + /// write the tensors. + /// - tensor_names: shape {N}. The names of the tensors to be saved. + /// - shape_and_slices: shape {N}. The slice specs of the tensors to be saved. + /// Empty strings indicate that they are non-partitioned tensors. + /// - tensors: `N` tensors to save. + @inlinable @inline(__always) + public static func saveV2( + prefix: StringTensor, + tensorNames: StringTensor, + shapeAndSlices: StringTensor, + tensors: Dtypes + ) { + _RawTFEager.saveV2( + prefix: prefix, tensorNames: tensorNames, shapeAndSlices: shapeAndSlices, tensors: tensors) + } + + /// Outputs a `Summary` protocol buffer with scalar values. + /// + /// The input `tags` and `values` must have the same shape. The generated summary + /// has a summary value for each tag-value pair in `tags` and `values`. + /// + /// - Parameters: + /// - tags: Tags for the summary. + /// - values: Same shape as `tags. Values for the summary. + /// + /// - Output summary: Scalar. Serialized `Summary` protocol buffer. + @inlinable @inline(__always) + public static func scalarSummary( + tags: StringTensor, + _ values: Tensor + ) -> StringTensor { + _RawTFEager.scalarSummary(tags: tags, values) + } + + @inlinable @inline(__always) + public static func scaleAndTranslate( + images: Tensor, + size: Tensor, + scale: Tensor, + translation: Tensor, + kernelType: String = "lanczos3", + antialias: Bool = true + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(images.handle.backend, size.handle.backend), scale.handle.backend), + translation.handle.backend) + { + case .XLA: + let output_device = translation.device + let images = Tensor(copying: images, to: .defaultTFEager) + let size = Tensor(copying: size, to: .defaultTFEager) + let scale = Tensor(copying: scale, to: .defaultTFEager) + let translation = Tensor(copying: translation, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.scaleAndTranslate( + images: images, size: size, scale: scale, translation: translation, + kernelType: kernelType, antialias: antialias), to: output_device) + case .TF_EAGER: + return _RawTFEager.scaleAndTranslate( + images: images, size: size, scale: scale, translation: translation, + kernelType: kernelType, antialias: antialias) + } + + } + + @inlinable @inline(__always) + public static func scaleAndTranslateGrad( + grads: Tensor, + originalImage: Tensor, + scale: Tensor, + translation: Tensor, + kernelType: String = "lanczos3", + antialias: Bool = true + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(grads.handle.backend, originalImage.handle.backend), scale.handle.backend), + translation.handle.backend) + { + case .XLA: + let output_device = translation.device + let grads = Tensor(copying: grads, to: .defaultTFEager) + let originalImage = Tensor(copying: originalImage, to: .defaultTFEager) + let scale = Tensor(copying: scale, to: .defaultTFEager) + let translation = Tensor(copying: translation, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.scaleAndTranslateGrad( + grads: grads, originalImage: originalImage, scale: scale, translation: translation, + kernelType: kernelType, antialias: antialias), to: output_device) + case .TF_EAGER: + return _RawTFEager.scaleAndTranslateGrad( + grads: grads, originalImage: originalImage, scale: scale, translation: translation, + kernelType: kernelType, antialias: antialias) + } + + } + + /// Creates a dataset successively reduces `f` over the elements of `input_dataset`. + @inlinable @inline(__always) + public static func scanDataset< + FIn: TensorGroup, + FOut: TensorGroup, + Tstate: TensorArrayProtocol, + Targuments: TensorArrayProtocol + >( + inputDataset: VariantHandle, + initialState: Tstate, + otherArguments: Targuments, + f: (FIn) -> FOut, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?], + preserveCardinality: Bool = false, + useDefaultDevice: Bool = true + ) -> VariantHandle { + _RawTFEager.scanDataset( + inputDataset: inputDataset, initialState: initialState, otherArguments: otherArguments, + f: f, outputTypes: outputTypes, outputShapes: outputShapes, + preserveCardinality: preserveCardinality, useDefaultDevice: useDefaultDevice) + } + + /// Scatter `updates` into a new tensor according to `indices`. + /// + /// Creates a new tensor by applying sparse `updates` to individual values or + /// slices within a tensor (initially zero for numeric, empty for string) of + /// the given `shape` according to indices. This operator is the inverse of the + /// `tf.gather_nd` operator which extracts values or slices from a given tensor. + /// + /// This operation is similar to tensor_scatter_add, except that the tensor is + /// zero-initialized. Calling `tf.scatter_nd(indices, values, shape)` is identical + /// to `tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)` + /// + /// If `indices` contains duplicates, then their updates are accumulated (summed). + /// + /// **WARNING**: The order in which updates are applied is nondeterministic, so the + /// output will be nondeterministic if `indices` contains duplicates -- because + /// of some numerical approximation issues, numbers summed in different order + /// may yield different results. + /// + /// `indices` is an integer tensor containing indices into a new tensor of shape + /// `shape`. The last dimension of `indices` can be at most the rank of `shape`: + /// + /// indices.shape[-1] <= shape.rank + /// + /// The last dimension of `indices` corresponds to indices into elements + /// (if `indices.shape[-1] = shape.rank`) or slices + /// (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of + /// `shape`. `updates` is a tensor with shape + /// + /// indices.shape[:-1] + shape[indices.shape[-1]:] + /// + /// The simplest form of scatter is to insert individual elements in a tensor by + /// index. For example, say we want to insert 4 scattered elements in a rank-1 + /// tensor with 8 elements. + /// + ///
+ /// + ///
+ /// + /// In Python, this scatter operation would look like this: + /// + /// ```python + /// indices = tf.constant([[4], [3], [1], [7]]) + /// updates = tf.constant([9, 10, 11, 12]) + /// shape = tf.constant([8]) + /// scatter = tf.scatter_nd(indices, updates, shape) + /// print(scatter) + /// ``` + /// + /// The resulting tensor would look like this: + /// + /// [0, 11, 0, 10, 9, 0, 0, 12] + /// + /// We can also, insert entire slices of a higher rank tensor all at once. For + /// example, if we wanted to insert two slices in the first dimension of a + /// rank-3 tensor with two matrices of new values. + /// + ///
+ /// + ///
+ /// + /// In Python, this scatter operation would look like this: + /// + /// ```python + /// indices = tf.constant([[0], [2]]) + /// updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], + /// [7, 7, 7, 7], [8, 8, 8, 8]], + /// [[5, 5, 5, 5], [6, 6, 6, 6], + /// [7, 7, 7, 7], [8, 8, 8, 8]]]) + /// shape = tf.constant([4, 4, 4]) + /// scatter = tf.scatter_nd(indices, updates, shape) + /// print(scatter) + /// ``` + /// + /// The resulting tensor would look like this: + /// + /// [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + /// [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], + /// [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + /// [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]] + /// + /// Note that on CPU, if an out of bound index is found, an error is returned. + /// On GPU, if an out of bound index is found, the index is ignored. + /// + /// - Parameters: + /// - indices: Index tensor. + /// - updates: Updates to scatter into output. + /// - shape: 1-D. The shape of the resulting tensor. + /// + /// - Output output: A new tensor with the given shape and updates applied according + /// to the indices. + @inlinable @inline(__always) + public static func scatterNd< + T: TensorFlowScalar, + Tindices: TensorFlowIndex + >( + indices: Tensor, + updates: Tensor, + shape: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(indices.handle.backend, updates.handle.backend), shape.handle.backend) + { + case .XLA: + let output_device = shape.device + let indices = Tensor(copying: indices, to: .defaultTFEager) + let updates = Tensor(copying: updates, to: .defaultTFEager) + let shape = Tensor(copying: shape, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.scatterNd(indices: indices, updates: updates, shape: shape), + to: output_device) + case .TF_EAGER: + return _RawTFEager.scatterNd(indices: indices, updates: updates, shape: shape) + } + + } + + /// Applies sparse addition to `input` using individual values or slices + /// + /// from `updates` according to indices `indices`. The updates are non-aliasing: + /// `input` is only modified in-place if no other operations will use it. + /// Otherwise, a copy of `input` is made. This operation has a gradient with + /// respect to both `input` and `updates`. + /// + /// `input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + /// + /// `indices` must be integer tensor, containing indices into `input`. + /// It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`. + /// + /// The innermost dimension of `indices` (with length `K`) corresponds to + /// indices into elements (if `K = P`) or `(P-K)`-dimensional slices + /// (if `K < P`) along the `K`th dimension of `input`. + /// + /// `updates` is `Tensor` of rank `Q-1+P-K` with shape: + /// + /// $$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$ + /// + /// For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 + /// elements. In Python, that addition would look like this: + /// + /// input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8]) + /// indices = tf.constant([[4], [3], [1], [7]]) + /// updates = tf.constant([9, 10, 11, 12]) + /// output = tf.scatter_nd_non_aliasing_add(input, indices, updates) + /// with tf.Session() as sess: + /// print(sess.run(output)) + /// + /// The resulting value `output` would look like this: + /// + /// [1, 13, 3, 14, 14, 6, 7, 20] + /// + /// See `tf.scatter_nd` for more details about how to make updates to slices. + /// + /// - Parameters: + /// - input: A Tensor. + /// - indices: A Tensor. Must be one of the following types: `int32`, `int64`. + /// A tensor of indices into `input`. + /// - updates: A Tensor. Must have the same type as ref. A tensor of updated values + /// to add to `input`. + /// + /// - Output output: A `Tensor` with the same shape as `input`, containing values of `input` + /// updated with `updates`. + @inlinable @inline(__always) + public static func scatterNdNonAliasingAdd< + T: TensorFlowScalar, + Tindices: TensorFlowIndex + >( + _ input: Tensor, + indices: Tensor, + updates: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(input.handle.backend, indices.handle.backend), updates.handle.backend) + { + case .XLA: + let output_device = updates.device + let input = Tensor(copying: input, to: .defaultTFEager) + let indices = Tensor(copying: indices, to: .defaultTFEager) + let updates = Tensor(copying: updates, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.scatterNdNonAliasingAdd(input, indices: indices, updates: updates), + to: output_device) + case .TF_EAGER: + return _RawTFEager.scatterNdNonAliasingAdd(input, indices: indices, updates: updates) + } + + } + + /// Computes fingerprints of the input strings. + /// + /// - Parameter input: vector of strings to compute fingerprints on. + /// + /// - Output output: a (N,2) shaped matrix where N is the number of elements in the input + /// vector. Each row contains the low and high parts of the fingerprint. + @inlinable @inline(__always) + public static func sdcaFprint( + _ input: StringTensor + ) -> Tensor { + _RawTFEager.sdcaFprint(input) + } + + /// Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for + /// + /// linear models with L1 + L2 regularization. As global optimization objective is + /// strongly-convex, the optimizer optimizes the dual objective at each step. The + /// optimizer applies each update one example at a time. Examples are sampled + /// uniformly, and the optimizer is learning rate free and enjoys linear convergence + /// rate. + /// + /// [Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).
+ /// Shai Shalev-Shwartz, Tong Zhang. 2012 + /// + /// $$Loss Objective = \sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$ + /// + /// [Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).
+ /// Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan, + /// Peter Richtarik, Martin Takac. 2015 + /// + /// [Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).
+ /// Dominik Csiba, Zheng Qu, Peter Richtarik. 2015 + /// + /// - Parameters: + /// - sparse_example_indices: a list of vectors which contain example indices. + /// - sparse_feature_indices: a list of vectors which contain feature indices. + /// - sparse_feature_values: a list of vectors which contains feature value + /// associated with each feature group. + /// - dense_features: a list of matrices which contains the dense feature values. + /// - example_weights: a vector which contains the weight associated with each + /// example. + /// - example_labels: a vector which contains the label/target associated with each + /// example. + /// - sparse_indices: a list of vectors where each value is the indices which has + /// corresponding weights in sparse_weights. This field maybe omitted for the + /// dense approach. + /// - sparse_weights: a list of vectors where each value is the weight associated with + /// a sparse feature group. + /// - dense_weights: a list of vectors where the values are the weights associated + /// with a dense feature group. + /// - example_state_data: a list of vectors containing the example state data. + /// + /// - Attrs: + /// - loss_type: Type of the primal loss. Currently SdcaSolver supports logistic, + /// squared and hinge losses. + /// - adaptative: Whether to use Adaptive SDCA for the inner loop. + /// - num_sparse_features: Number of sparse feature groups to train on. + /// - num_sparse_features_with_values: Number of sparse feature groups with values + /// associated with it, otherwise implicitly treats values as 1.0. + /// - num_dense_features: Number of dense feature groups to train on. + /// - l1: Symmetric l1 regularization strength. + /// - l2: Symmetric l2 regularization strength. + /// - num_loss_partitions: Number of partitions of the global loss function. + /// - num_inner_iterations: Number of iterations per mini-batch. + /// + /// - Outputs: + /// - out_example_state_data: a list of vectors containing the updated example state + /// data. + /// - out_delta_sparse_weights: a list of vectors where each value is the delta + /// weights associated with a sparse feature group. + /// - out_delta_dense_weights: a list of vectors where the values are the delta + /// weights associated with a dense feature group. + @inlinable @inline(__always) + public static func sdcaOptimizer( + sparseExampleIndices: [Tensor], + sparseFeatureIndices: [Tensor], + sparseFeatureValues: [Tensor], + denseFeatures: [Tensor], + exampleWeights: Tensor, + exampleLabels: Tensor, + sparseIndices: [Tensor], + sparseWeights: [Tensor], + denseWeights: [Tensor], + exampleStateData: Tensor, + lossType: LossType, + adaptative: Bool = false, + l1: Double, + l2: Double, + numLossPartitions: Int64, + numInnerIterations: Int64 + ) -> ( + outExampleStateData: Tensor, outDeltaSparseWeights: [Tensor], + outDeltaDenseWeights: [Tensor] + ) { + _RawTFEager.sdcaOptimizer( + sparseExampleIndices: sparseExampleIndices, sparseFeatureIndices: sparseFeatureIndices, + sparseFeatureValues: sparseFeatureValues, denseFeatures: denseFeatures, + exampleWeights: exampleWeights, exampleLabels: exampleLabels, sparseIndices: sparseIndices, + sparseWeights: sparseWeights, denseWeights: denseWeights, + exampleStateData: exampleStateData, lossType: lossType, adaptative: adaptative, l1: l1, + l2: l2, numLossPartitions: numLossPartitions, numInnerIterations: numInnerIterations) + } + + /// Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for + /// + /// linear models with L1 + L2 regularization. As global optimization objective is + /// strongly-convex, the optimizer optimizes the dual objective at each step. The + /// optimizer applies each update one example at a time. Examples are sampled + /// uniformly, and the optimizer is learning rate free and enjoys linear convergence + /// rate. + /// + /// [Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).
+ /// Shai Shalev-Shwartz, Tong Zhang. 2012 + /// + /// $$Loss Objective = \sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$ + /// + /// [Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).
+ /// Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan, + /// Peter Richtarik, Martin Takac. 2015 + /// + /// [Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).
+ /// Dominik Csiba, Zheng Qu, Peter Richtarik. 2015 + /// + /// - Parameters: + /// - sparse_example_indices: a list of vectors which contain example indices. + /// - sparse_feature_indices: a list of vectors which contain feature indices. + /// - sparse_feature_values: a list of vectors which contains feature value + /// associated with each feature group. + /// - dense_features: a list of matrices which contains the dense feature values. + /// - example_weights: a vector which contains the weight associated with each + /// example. + /// - example_labels: a vector which contains the label/target associated with each + /// example. + /// - sparse_indices: a list of vectors where each value is the indices which has + /// corresponding weights in sparse_weights. This field maybe omitted for the + /// dense approach. + /// - sparse_weights: a list of vectors where each value is the weight associated with + /// a sparse feature group. + /// - dense_weights: a list of vectors where the values are the weights associated + /// with a dense feature group. + /// - example_state_data: a list of vectors containing the example state data. + /// + /// - Attrs: + /// - loss_type: Type of the primal loss. Currently SdcaSolver supports logistic, + /// squared and hinge losses. + /// - adaptive: Whether to use Adaptive SDCA for the inner loop. + /// - num_sparse_features: Number of sparse feature groups to train on. + /// - num_sparse_features_with_values: Number of sparse feature groups with values + /// associated with it, otherwise implicitly treats values as 1.0. + /// - num_dense_features: Number of dense feature groups to train on. + /// - l1: Symmetric l1 regularization strength. + /// - l2: Symmetric l2 regularization strength. + /// - num_loss_partitions: Number of partitions of the global loss function. + /// - num_inner_iterations: Number of iterations per mini-batch. + /// + /// - Outputs: + /// - out_example_state_data: a list of vectors containing the updated example state + /// data. + /// - out_delta_sparse_weights: a list of vectors where each value is the delta + /// weights associated with a sparse feature group. + /// - out_delta_dense_weights: a list of vectors where the values are the delta + /// weights associated with a dense feature group. + @inlinable @inline(__always) + public static func sdcaOptimizerV2( + sparseExampleIndices: [Tensor], + sparseFeatureIndices: [Tensor], + sparseFeatureValues: [Tensor], + denseFeatures: [Tensor], + exampleWeights: Tensor, + exampleLabels: Tensor, + sparseIndices: [Tensor], + sparseWeights: [Tensor], + denseWeights: [Tensor], + exampleStateData: Tensor, + lossType: LossType, + adaptive: Bool = false, + l1: Double, + l2: Double, + numLossPartitions: Int64, + numInnerIterations: Int64 + ) -> ( + outExampleStateData: Tensor, outDeltaSparseWeights: [Tensor], + outDeltaDenseWeights: [Tensor] + ) { + _RawTFEager.sdcaOptimizerV2( + sparseExampleIndices: sparseExampleIndices, sparseFeatureIndices: sparseFeatureIndices, + sparseFeatureValues: sparseFeatureValues, denseFeatures: denseFeatures, + exampleWeights: exampleWeights, exampleLabels: exampleLabels, sparseIndices: sparseIndices, + sparseWeights: sparseWeights, denseWeights: denseWeights, + exampleStateData: exampleStateData, lossType: lossType, adaptive: adaptive, l1: l1, l2: l2, + numLossPartitions: numLossPartitions, numInnerIterations: numInnerIterations) + } + + /// Computes the maximum along segments of a tensor. + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// Computes a tensor such that + /// \\(output_i = \max_j(data_j)\\) where `max` is over `j` such + /// that `segment_ids[j] == i`. + /// + /// If the max is empty for a given segment ID `i`, `output[i] = 0`. + /// + ///
+ /// + ///
+ /// + /// For example: + /// + /// ``` + /// c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + /// tf.segment_max(c, tf.constant([0, 0, 1])) + /// # ==> [[4, 3, 3, 4], + /// # [5, 6, 7, 8]] + /// ``` + /// + /// + /// - Parameter segment_ids: A 1-D tensor whose size is equal to the size of `data`'s + /// first dimension. Values should be sorted and can be repeated. + /// + /// - Output output: Has same shape as data, except for dimension 0 which + /// has size `k`, the number of segments. + @inlinable @inline(__always) + public static func segmentMax< + T: TensorFlowNumeric, + Tindices: TensorFlowIndex + >( + data: Tensor, + segmentIds: Tensor + ) -> Tensor { + switch commonBackend(data.handle.backend, segmentIds.handle.backend) { + case .XLA: + let output_device = segmentIds.device + let data = Tensor(copying: data, to: .defaultTFEager) + let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.segmentMax(data: data, segmentIds: segmentIds), to: output_device) + case .TF_EAGER: + return _RawTFEager.segmentMax(data: data, segmentIds: segmentIds) + } + + } + + /// Computes the mean along segments of a tensor. + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// Computes a tensor such that + /// \\(output_i = \frac{\sum_j data_j}{N}\\) where `mean` is + /// over `j` such that `segment_ids[j] == i` and `N` is the total number of + /// values summed. + /// + /// If the mean is empty for a given segment ID `i`, `output[i] = 0`. + /// + ///
+ /// + ///
+ /// + /// For example: + /// + /// ``` + /// c = tf.constant([[1.0,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + /// tf.segment_mean(c, tf.constant([0, 0, 1])) + /// # ==> [[2.5, 2.5, 2.5, 2.5], + /// # [5, 6, 7, 8]] + /// ``` + /// + /// + /// - Parameter segment_ids: A 1-D tensor whose size is equal to the size of `data`'s + /// first dimension. Values should be sorted and can be repeated. + /// + /// - Output output: Has same shape as data, except for dimension 0 which + /// has size `k`, the number of segments. + @inlinable @inline(__always) + public static func segmentMean< + T: TensorFlowNumeric, + Tindices: TensorFlowIndex + >( + data: Tensor, + segmentIds: Tensor + ) -> Tensor { + switch commonBackend(data.handle.backend, segmentIds.handle.backend) { + case .XLA: + let output_device = segmentIds.device + let data = Tensor(copying: data, to: .defaultTFEager) + let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.segmentMean(data: data, segmentIds: segmentIds), to: output_device) + case .TF_EAGER: + return _RawTFEager.segmentMean(data: data, segmentIds: segmentIds) + } + + } + + /// Computes the minimum along segments of a tensor. + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// Computes a tensor such that + /// \\(output_i = \min_j(data_j)\\) where `min` is over `j` such + /// that `segment_ids[j] == i`. + /// + /// If the min is empty for a given segment ID `i`, `output[i] = 0`. + /// + ///
+ /// + ///
+ /// + /// For example: + /// + /// ``` + /// c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + /// tf.segment_min(c, tf.constant([0, 0, 1])) + /// # ==> [[1, 2, 2, 1], + /// # [5, 6, 7, 8]] + /// ``` + /// + /// - Parameter segment_ids: A 1-D tensor whose size is equal to the size of `data`'s + /// first dimension. Values should be sorted and can be repeated. + /// + /// - Output output: Has same shape as data, except for dimension 0 which + /// has size `k`, the number of segments. + @inlinable @inline(__always) + public static func segmentMin< + T: TensorFlowNumeric, + Tindices: TensorFlowIndex + >( + data: Tensor, + segmentIds: Tensor + ) -> Tensor { + switch commonBackend(data.handle.backend, segmentIds.handle.backend) { + case .XLA: + let output_device = segmentIds.device + let data = Tensor(copying: data, to: .defaultTFEager) + let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.segmentMin(data: data, segmentIds: segmentIds), to: output_device) + case .TF_EAGER: + return _RawTFEager.segmentMin(data: data, segmentIds: segmentIds) + } + + } + + /// Computes the product along segments of a tensor. + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// Computes a tensor such that + /// \\(output_i = \prod_j data_j\\) where the product is over `j` such + /// that `segment_ids[j] == i`. + /// + /// If the product is empty for a given segment ID `i`, `output[i] = 1`. + /// + ///
+ /// + ///
+ /// + /// For example: + /// + /// ``` + /// c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + /// tf.segment_prod(c, tf.constant([0, 0, 1])) + /// # ==> [[4, 6, 6, 4], + /// # [5, 6, 7, 8]] + /// ``` + /// + /// + /// - Parameter segment_ids: A 1-D tensor whose size is equal to the size of `data`'s + /// first dimension. Values should be sorted and can be repeated. + /// + /// - Output output: Has same shape as data, except for dimension 0 which + /// has size `k`, the number of segments. + @inlinable @inline(__always) + public static func segmentProd< + T: TensorFlowNumeric, + Tindices: TensorFlowIndex + >( + data: Tensor, + segmentIds: Tensor + ) -> Tensor { + switch commonBackend(data.handle.backend, segmentIds.handle.backend) { + case .XLA: + let output_device = segmentIds.device + let data = Tensor(copying: data, to: .defaultTFEager) + let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.segmentProd(data: data, segmentIds: segmentIds), to: output_device) + case .TF_EAGER: + return _RawTFEager.segmentProd(data: data, segmentIds: segmentIds) + } + + } + + /// Computes the sum along segments of a tensor. + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// Computes a tensor such that + /// \\(output_i = \sum_j data_j\\) where sum is over `j` such + /// that `segment_ids[j] == i`. + /// + /// If the sum is empty for a given segment ID `i`, `output[i] = 0`. + /// + ///
+ /// + ///
+ /// + /// For example: + /// + /// ``` + /// c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + /// tf.segment_sum(c, tf.constant([0, 0, 1])) + /// # ==> [[5, 5, 5, 5], + /// # [5, 6, 7, 8]] + /// ``` + /// + /// + /// - Parameter segment_ids: A 1-D tensor whose size is equal to the size of `data`'s + /// first dimension. Values should be sorted and can be repeated. + /// + /// - Output output: Has same shape as data, except for dimension 0 which + /// has size `k`, the number of segments. + @inlinable @inline(__always) + public static func segmentSum< + T: TensorFlowNumeric, + Tindices: TensorFlowIndex + >( + data: Tensor, + segmentIds: Tensor + ) -> Tensor { + switch commonBackend(data.handle.backend, segmentIds.handle.backend) { + case .XLA: + let output_device = segmentIds.device + let data = Tensor(copying: data, to: .defaultTFEager) + let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.segmentSum(data: data, segmentIds: segmentIds), to: output_device) + case .TF_EAGER: + return _RawTFEager.segmentSum(data: data, segmentIds: segmentIds) + } + + } + + /// Selects elements from `x` or `y`, depending on `condition`. + /// + /// The `x`, and `y` tensors must all have the same shape, and the + /// output will also have that shape. + /// + /// The `condition` tensor must be a scalar if `x` and `y` are scalars. + /// If `x` and `y` are vectors or higher rank, then `condition` must be either a + /// scalar, a vector with size matching the first dimension of `x`, or must have + /// the same shape as `x`. + /// + /// The `condition` tensor acts as a mask that chooses, based on the value at each + /// element, whether the corresponding element / row in the output should be + /// taken from `x` (if true) or `y` (if false). + /// + /// If `condition` is a vector and `x` and `y` are higher rank matrices, then + /// it chooses which row (outer dimension) to copy from `x` and `y`. + /// If `condition` has the same shape as `x` and `y`, then it chooses which + /// element to copy from `x` and `y`. + /// + /// For example: + /// + /// ```python + /// # 'condition' tensor is [[True, False] + /// # [False, True]] + /// # 't' is [[1, 2], + /// # [3, 4]] + /// # 'e' is [[5, 6], + /// # [7, 8]] + /// select(condition, t, e) # => [[1, 6], [7, 4]] + /// + /// + /// # 'condition' tensor is [True, False] + /// # 't' is [[1, 2], + /// # [3, 4]] + /// # 'e' is [[5, 6], + /// # [7, 8]] + /// select(condition, t, e) ==> [[1, 2], + /// [7, 8]] + /// + /// ``` + /// + /// - Parameters: + /// - t: = A `Tensor` which may have the same shape as `condition`. + /// If `condition` is rank 1, `x` may have higher rank, + /// but its first dimension must match the size of `condition`. + /// - e: = A `Tensor` with the same type and shape as `x`. + /// + /// - Output output: = A `Tensor` with the same type and shape as `x` and `y`. + @inlinable @inline(__always) + public static func select( + condition: Tensor, + t: Tensor, + e: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(condition.handle.backend, t.handle.backend), e.handle.backend) + { + case .XLA: + return _RawXLA.select(condition: condition, t: t, e: e) + case .TF_EAGER: + return _RawTFEager.select(condition: condition, t: t, e: e) + } + + } + + @inlinable @inline(__always) + public static func selectV2( + condition: Tensor, + t: Tensor, + e: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(condition.handle.backend, t.handle.backend), e.handle.backend) + { + case .XLA: + let output_device = e.device + let condition = Tensor(copying: condition, to: .defaultTFEager) + let t = Tensor(copying: t, to: .defaultTFEager) + let e = Tensor(copying: e, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.selectV2(condition: condition, t: t, e: e), to: output_device) + case .TF_EAGER: + return _RawTFEager.selectV2(condition: condition, t: t, e: e) + } + + } + + /// Computes the Eigen Decomposition of a batch of square self-adjoint matrices. + /// + /// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + /// form square matrices, with the same constraints as the single matrix + /// SelfAdjointEig. + /// + /// The result is a [..., M+1, M] matrix with [..., 0,:] containing the + /// eigenvalues, and subsequent [...,1:, :] containing the eigenvectors. The eigenvalues + /// are sorted in non-decreasing order. + /// + /// - Parameter input: Shape is `[..., M, M]`. + /// + /// - Output output: Shape is `[..., M+1, M]`. + @inlinable @inline(__always) + public static func selfAdjointEig( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.selfAdjointEig(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.selfAdjointEig(input) + } + + } + + /// Computes the eigen decomposition of one or more square self-adjoint matrices. + /// + /// Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in + /// `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The eigenvalues + /// are sorted in non-decreasing order. + /// + /// ```python + /// # a is a tensor. + /// # e is a tensor of eigenvalues. + /// # v is a tensor of eigenvectors. + /// e, v = self_adjoint_eig(a) + /// e = self_adjoint_eig(a, compute_v=False) + /// ``` + /// + /// - Parameter input: `Tensor` input of shape `[N, N]`. + /// + /// - Attr compute_v: If `True` then eigenvectors will be computed and returned in `v`. + /// Otherwise, only the eigenvalues will be computed. + /// + /// - Outputs: + /// - e: Eigenvalues. Shape is `[N]`. + /// - v: Eigenvectors. Shape is `[N, N]`. + @inlinable @inline(__always) + public static func selfAdjointEigV2( + _ input: Tensor, + computeV: Bool = true + ) -> (e: Tensor, v: Tensor) { + _RawTFEager.selfAdjointEigV2(input, computeV: computeV) + } + + /// Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)` + /// + /// if < 0, `scale * features` otherwise. + /// + /// To be used together with + /// `initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')`. + /// For correct dropout, use `tf.contrib.nn.alpha_dropout`. + /// + /// See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515) + @inlinable @inline(__always) + public static func selu( + features: Tensor + ) -> Tensor { + switch features.handle.backend { + case .XLA: + return _RawXLA.selu(features: features) + case .TF_EAGER: + return _RawTFEager.selu(features: features) + } + + } + + /// Computes gradients for the scaled exponential linear (Selu) operation. + /// + /// - Parameters: + /// - gradients: The backpropagated gradients to the corresponding Selu operation. + /// - outputs: The outputs of the corresponding Selu operation. + /// + /// - Output backprops: The gradients: `gradients * (outputs + scale * alpha)` + /// if outputs < 0, `scale * gradients` otherwise. + @inlinable @inline(__always) + public static func seluGrad( + gradients: Tensor, + outputs: Tensor + ) -> Tensor { + switch commonBackend(gradients.handle.backend, outputs.handle.backend) { + case .XLA: + return _RawXLA.seluGrad(gradients: gradients, outputs: outputs) + case .TF_EAGER: + return _RawTFEager.seluGrad(gradients: gradients, outputs: outputs) + } + + } + + /// Sends the named tensor from send_device to recv_device. + /// + /// - Parameter tensor: The tensor to send. + /// + /// - Attrs: + /// - tensor_name: The name of the tensor to send. + /// - send_device: The name of the device sending the tensor. + /// - send_device_incarnation: The current incarnation of send_device. + /// - recv_device: The name of the device receiving the tensor. + /// - client_terminated: If set to true, this indicates that the node was added + /// to the graph as a result of a client-side feed or fetch of Tensor data, + /// in which case the corresponding send or recv is expected to be managed + /// locally by the caller. + @inlinable @inline(__always) + public static func send( + _ tensor: Tensor, + tensorName: String, + sendDevice: String, + sendDeviceIncarnation: Int64, + recvDevice: String, + clientTerminated: Bool = false + ) { + _RawTFEager.send( + tensor, tensorName: tensorName, sendDevice: sendDevice, + sendDeviceIncarnation: sendDeviceIncarnation, recvDevice: recvDevice, + clientTerminated: clientTerminated) + } + + /// Performs gradient updates of embedding tables. + /// + /// - Parameters: + /// - inputs: A TensorList of gradients with which to update embedding tables. + /// This argument has the same length and shapes as the return value of + /// RecvTPUEmbeddingActivations, but contains gradients of the model's loss + /// with respect to the embedding activations. The embedding tables are updated + /// from these gradients via the optimizer specified in the TPU embedding + /// configuration given to tpu.initialize_system. + /// - learning_rates: A TensorList of float32 scalars, one for each dynamic learning + /// rate tag: see the comments in + /// //third_party/tensorflow/core/protobuf/tpu/optimization_parameters.proto. + /// Multiple tables can share the same dynamic learning rate tag as specified + /// in the configuration. If the learning rates for all tables are constant, + /// this list should be empty. + /// + /// - Attr config: Serialized TPUEmbeddingConfiguration proto. + @inlinable @inline(__always) + public static func sendTPUEmbeddingGradients( + inputs: [Tensor], + learningRates: [Tensor], + config: String + ) { + _RawTFEager.sendTPUEmbeddingGradients( + inputs: inputs, learningRates: learningRates, config: config) + } + + /// Converts the given `resource_handle` representing an iterator to a variant tensor. + /// + /// - Parameter resource_handle: A handle to an iterator resource. + /// + /// - Output serialized: A variant tensor storing the state of the iterator contained in the + /// resource. + @inlinable @inline(__always) + public static func serializeIterator( + resourceHandle: ResourceHandle + ) -> VariantHandle { + _RawTFEager.serializeIterator(resourceHandle: resourceHandle) + } + + /// Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object. + /// + /// The `SparseTensor` must have rank `R` greater than 1, and the first dimension + /// is treated as the minibatch dimension. Elements of the `SparseTensor` + /// must be sorted in increasing order of this first dimension. The serialized + /// `SparseTensor` objects going into each row of `serialized_sparse` will have + /// rank `R-1`. + /// + /// The minibatch size `N` is extracted from `sparse_shape[0]`. + /// + /// - Parameters: + /// - sparse_indices: 2-D. The `indices` of the minibatch `SparseTensor`. + /// - sparse_values: 1-D. The `values` of the minibatch `SparseTensor`. + /// - sparse_shape: 1-D. The `shape` of the minibatch `SparseTensor`. + /// + /// - Attr out_type: The `dtype` to use for serialization; the supported types are `string` + /// (default) and `variant`. + @inlinable @inline(__always) + public static func serializeManySparse< + T: TensorFlowScalar, + OutType: TensorFlowScalar + >( + sparseIndices: Tensor, + sparseValues: Tensor, + sparseShape: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(sparseIndices.handle.backend, sparseValues.handle.backend), + sparseShape.handle.backend) + { + case .XLA: + let output_device = sparseShape.device + let sparseIndices = Tensor(copying: sparseIndices, to: .defaultTFEager) + let sparseValues = Tensor(copying: sparseValues, to: .defaultTFEager) + let sparseShape = Tensor(copying: sparseShape, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.serializeManySparse( + sparseIndices: sparseIndices, sparseValues: sparseValues, sparseShape: sparseShape), + to: output_device) + case .TF_EAGER: + return _RawTFEager.serializeManySparse( + sparseIndices: sparseIndices, sparseValues: sparseValues, sparseShape: sparseShape) + } + + } + + /// Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object. + /// + /// The `SparseTensor` must have rank `R` greater than 1, and the first dimension + /// is treated as the minibatch dimension. Elements of the `SparseTensor` + /// must be sorted in increasing order of this first dimension. The serialized + /// `SparseTensor` objects going into each row of `serialized_sparse` will have + /// rank `R-1`. + /// + /// The minibatch size `N` is extracted from `sparse_shape[0]`. + /// + /// - Parameters: + /// - sparse_indices: 2-D. The `indices` of the minibatch `SparseTensor`. + /// - sparse_values: 1-D. The `values` of the minibatch `SparseTensor`. + /// - sparse_shape: 1-D. The `shape` of the minibatch `SparseTensor`. + /// + /// - Attr out_type: The `dtype` to use for serialization; the supported types are `string` + /// (default) and `variant`. + @inlinable @inline(__always) + public static func serializeManySparse( + sparseIndices: Tensor, + sparseValues: Tensor, + sparseShape: Tensor + ) -> StringTensor { + _RawTFEager.serializeManySparse( + sparseIndices: sparseIndices, sparseValues: sparseValues, sparseShape: sparseShape) + } + + /// Serialize a `SparseTensor` into a `[3]` `Tensor` object. + /// + /// - Parameters: + /// - sparse_indices: 2-D. The `indices` of the `SparseTensor`. + /// - sparse_values: 1-D. The `values` of the `SparseTensor`. + /// - sparse_shape: 1-D. The `shape` of the `SparseTensor`. + /// + /// - Attr out_type: The `dtype` to use for serialization; the supported types are `string` + /// (default) and `variant`. + @inlinable @inline(__always) + public static func serializeSparse< + T: TensorFlowScalar, + OutType: TensorFlowScalar + >( + sparseIndices: Tensor, + sparseValues: Tensor, + sparseShape: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(sparseIndices.handle.backend, sparseValues.handle.backend), + sparseShape.handle.backend) + { + case .XLA: + let output_device = sparseShape.device + let sparseIndices = Tensor(copying: sparseIndices, to: .defaultTFEager) + let sparseValues = Tensor(copying: sparseValues, to: .defaultTFEager) + let sparseShape = Tensor(copying: sparseShape, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.serializeSparse( + sparseIndices: sparseIndices, sparseValues: sparseValues, sparseShape: sparseShape), + to: output_device) + case .TF_EAGER: + return _RawTFEager.serializeSparse( + sparseIndices: sparseIndices, sparseValues: sparseValues, sparseShape: sparseShape) + } + + } + + /// Serialize a `SparseTensor` into a `[3]` `Tensor` object. + /// + /// - Parameters: + /// - sparse_indices: 2-D. The `indices` of the `SparseTensor`. + /// - sparse_values: 1-D. The `values` of the `SparseTensor`. + /// - sparse_shape: 1-D. The `shape` of the `SparseTensor`. + /// + /// - Attr out_type: The `dtype` to use for serialization; the supported types are `string` + /// (default) and `variant`. + @inlinable @inline(__always) + public static func serializeSparse( + sparseIndices: Tensor, + sparseValues: Tensor, + sparseShape: Tensor + ) -> StringTensor { + _RawTFEager.serializeSparse( + sparseIndices: sparseIndices, sparseValues: sparseValues, sparseShape: sparseShape) + } + + @inlinable @inline(__always) + public static func serializeTRTResource( + resourceName: StringTensor, + filename: StringTensor, + deleteResource: Bool = false + ) { + _RawTFEager.serializeTRTResource( + resourceName: resourceName, filename: filename, deleteResource: deleteResource) + } + + /// Transforms a Tensor into a serialized TensorProto proto. + /// + /// - Parameter tensor: A Tensor of type `T`. + /// + /// - Attr T: The type of the input tensor. + /// + /// - Output serialized: A serialized TensorProto proto of the input tensor. + @inlinable @inline(__always) + public static func serializeTensor( + _ tensor: Tensor + ) -> StringTensor { + _RawTFEager.serializeTensor(tensor) + } + + /// Number of unique elements along last dimension of input `set`. + /// + /// Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`, + /// and `set_shape`. The last dimension contains values in a set, duplicates are + /// allowed but ignored. + /// + /// If `validate_indices` is `True`, this op validates the order and range of `set` + /// indices. + /// + /// - Parameters: + /// - set_indices: 2D `Tensor`, indices of a `SparseTensor`. + /// - set_values: 1D `Tensor`, values of a `SparseTensor`. + /// - set_shape: 1D `Tensor`, shape of a `SparseTensor`. + /// + /// - Output size: For `set` ranked `n`, this is a `Tensor` with rank `n-1`, and the same 1st + /// `n-1` dimensions as `set`. Each value is the number of unique elements in + /// the corresponding `[0...n-1]` dimension of `set`. + @inlinable @inline(__always) + public static func setSize( + setIndices: Tensor, + setValues: Tensor, + setShape: Tensor, + validateIndices: Bool = true + ) -> Tensor { + switch commonBackend( + commonBackend(setIndices.handle.backend, setValues.handle.backend), setShape.handle.backend) + { + case .XLA: + let output_device = setShape.device + let setIndices = Tensor(copying: setIndices, to: .defaultTFEager) + let setValues = Tensor(copying: setValues, to: .defaultTFEager) + let setShape = Tensor(copying: setShape, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.setSize( + setIndices: setIndices, setValues: setValues, setShape: setShape, + validateIndices: validateIndices), to: output_device) + case .TF_EAGER: + return _RawTFEager.setSize( + setIndices: setIndices, setValues: setValues, setShape: setShape, + validateIndices: validateIndices) + } + + } + + /// Number of unique elements along last dimension of input `set`. + /// + /// Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`, + /// and `set_shape`. The last dimension contains values in a set, duplicates are + /// allowed but ignored. + /// + /// If `validate_indices` is `True`, this op validates the order and range of `set` + /// indices. + /// + /// - Parameters: + /// - set_indices: 2D `Tensor`, indices of a `SparseTensor`. + /// - set_values: 1D `Tensor`, values of a `SparseTensor`. + /// - set_shape: 1D `Tensor`, shape of a `SparseTensor`. + /// + /// - Output size: For `set` ranked `n`, this is a `Tensor` with rank `n-1`, and the same 1st + /// `n-1` dimensions as `set`. Each value is the number of unique elements in + /// the corresponding `[0...n-1]` dimension of `set`. + @inlinable @inline(__always) + public static func setSize( + setIndices: Tensor, + setValues: StringTensor, + setShape: Tensor, + validateIndices: Bool = true + ) -> Tensor { + switch commonBackend(setIndices.handle.backend, setShape.handle.backend) { + case .XLA: + let output_device = setShape.device + let setIndices = Tensor(copying: setIndices, to: .defaultTFEager) + let setShape = Tensor(copying: setShape, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.setSize( + setIndices: setIndices, setValues: setValues, setShape: setShape, + validateIndices: validateIndices), to: output_device) + case .TF_EAGER: + return _RawTFEager.setSize( + setIndices: setIndices, setValues: setValues, setShape: setShape, + validateIndices: validateIndices) + } + + } + + @inlinable @inline(__always) + public static func setStatsAggregatorDataset( + inputDataset: VariantHandle, + statsAggregator: ResourceHandle, + tag: StringTensor, + counterPrefix: StringTensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.setStatsAggregatorDataset( + inputDataset: inputDataset, statsAggregator: statsAggregator, tag: tag, + counterPrefix: counterPrefix, outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Returns the shape of a tensor. + /// + /// This operation returns a 1-D integer tensor representing the shape of `input`. + /// + /// For example: + /// + /// ``` + /// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + /// shape(t) ==> [2, 2, 3] + /// ``` + @inlinable @inline(__always) + public static func shape< + T: TensorFlowScalar, + OutType: TensorFlowIndex + >( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + return _RawXLA.shape(input) + case .TF_EAGER: + return _RawTFEager.shape(input) + } + + } + + /// Returns shape of tensors. + /// + /// This operation returns N 1-D integer tensors representing shape of `input[i]s`. + @inlinable @inline(__always) + public static func shapeN< + T: TensorFlowScalar, + OutType: TensorFlowIndex + >( + _ input: [Tensor] + ) -> [Tensor] { + _RawTFEager.shapeN(input) + } + + /// Creates a `Dataset` that includes only 1/`num_shards` of this dataset. + /// + /// - Parameters: + /// - num_shards: An integer representing the number of shards operating in parallel. + /// - index: An integer representing the current worker index. + @inlinable @inline(__always) + public static func shardDataset( + inputDataset: VariantHandle, + numShards: Tensor, + index: Tensor, + requireNonEmpty: Bool = false, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.shardDataset( + inputDataset: inputDataset, numShards: numShards, index: index, + requireNonEmpty: requireNonEmpty, outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Generate a sharded filename. The filename is printf formatted as + /// + /// %s-%05d-of-%05d, basename, shard, num_shards. + @inlinable @inline(__always) + public static func shardedFilename( + basename: StringTensor, + shard: Tensor, + numShards: Tensor + ) -> StringTensor { + _RawTFEager.shardedFilename(basename: basename, shard: shard, numShards: numShards) + } + + /// Generate a glob pattern matching all sharded file names. + @inlinable @inline(__always) + public static func shardedFilespec( + basename: StringTensor, + numShards: Tensor + ) -> StringTensor { + _RawTFEager.shardedFilespec(basename: basename, numShards: numShards) + } + + /// Creates a dataset that shuffles and repeats elements from `input_dataset` + /// + /// pseudorandomly. + /// + /// - Parameters: + /// - buffer_size: The number of output elements to buffer in an iterator over + /// this dataset. Compare with the `min_after_dequeue` attr when creating a + /// `RandomShuffleQueue`. + /// - seed: A scalar seed for the random number generator. If either `seed` or + /// `seed2` is set to be non-zero, the random number generator is seeded + /// by the given seed. Otherwise, a random seed is used. + /// - seed2: A second scalar seed to avoid seed collision. + /// - count: A scalar representing the number of times the underlying dataset + /// should be repeated. The default is `-1`, which results in infinite repetition. + @inlinable @inline(__always) + public static func shuffleAndRepeatDataset( + inputDataset: VariantHandle, + bufferSize: Tensor, + seed: Tensor, + seed2: Tensor, + count: Tensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.shuffleAndRepeatDataset( + inputDataset: inputDataset, bufferSize: bufferSize, seed: seed, seed2: seed2, count: count, + outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Creates a dataset that shuffles elements from `input_dataset` pseudorandomly. + /// + /// - Parameters: + /// - buffer_size: The number of output elements to buffer in an iterator over + /// this dataset. Compare with the `min_after_dequeue` attr when creating a + /// `RandomShuffleQueue`. + /// - seed: A scalar seed for the random number generator. If either `seed` or + /// `seed2` is set to be non-zero, the random number generator is seeded + /// by the given seed. Otherwise, a random seed is used. + /// - seed2: A second scalar seed to avoid seed collision. + /// + /// - Attr reshuffle_each_iteration: If true, each iterator over this dataset will be given + /// a different pseudorandomly generated seed, based on a sequence seeded by the + /// `seed` and `seed2` inputs. If false, each iterator will be given the same + /// seed, and repeated iteration over this dataset will yield the exact same + /// sequence of results. + @inlinable @inline(__always) + public static func shuffleDataset( + inputDataset: VariantHandle, + bufferSize: Tensor, + seed: Tensor, + seed2: Tensor, + reshuffleEachIteration: Bool = true, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.shuffleDataset( + inputDataset: inputDataset, bufferSize: bufferSize, seed: seed, seed2: seed2, + reshuffleEachIteration: reshuffleEachIteration, outputTypes: outputTypes, + outputShapes: outputShapes) + } + + @inlinable @inline(__always) + public static func shuffleDatasetV2( + inputDataset: VariantHandle, + bufferSize: Tensor, + seedGenerator: ResourceHandle, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.shuffleDatasetV2( + inputDataset: inputDataset, bufferSize: bufferSize, seedGenerator: seedGenerator, + outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Shuts down a running distributed TPU system. + /// + /// The op returns an error if no system is running. + @inlinable @inline(__always) + public static func shutdownDistributedTPU() { + _RawTFEager.shutdownDistributedTPU() + } + + /// Computes sigmoid of `x` element-wise. + /// + /// Specifically, `y = 1 / (1 + exp(-x))`. + @inlinable @inline(__always) + public static func sigmoid( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.sigmoid(x) + case .TF_EAGER: + return _RawTFEager.sigmoid(x) + } + + } + + /// Computes the gradient of the sigmoid of `x` wrt its input. + /// + /// Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and + /// `dy` is the corresponding input gradient. + @inlinable @inline(__always) + public static func sigmoidGrad( + _ y: Tensor, + dy: Tensor + ) -> Tensor { + switch commonBackend(y.handle.backend, dy.handle.backend) { + case .XLA: + return _RawXLA.sigmoidGrad(y, dy: dy) + case .TF_EAGER: + return _RawTFEager.sigmoidGrad(y, dy: dy) + } + + } + + /// Returns an element-wise indication of the sign of a number. + /// + /// `y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`. + /// + /// For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`. + /// + /// Example usage: + /// >>> tf.math.sign([0., 2., -3.]) + /// + @inlinable @inline(__always) + public static func sign( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.sign(x) + case .TF_EAGER: + return _RawTFEager.sign(x) + } + + } + + @inlinable @inline(__always) + public static func simple( + _ a: Tensor + ) -> Tensor { + switch a.handle.backend { + case .XLA: + let output_device = a.device + let a = Tensor(copying: a, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.simple(a), to: output_device) + case .TF_EAGER: + return _RawTFEager.simple(a) + } + + } + + @inlinable @inline(__always) + public static func simpleStruct( + nA: Int64 + ) -> [Tensor] { + _RawTFEager.simpleStruct(nA: nA) + } + + /// Computes sine of x element-wise. + /// + /// Given an input tensor, this function computes sine of every + /// element in the tensor. Input range is `(-inf, inf)` and + /// output range is `[-1,1]`. + /// + /// ```python + /// x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10, float("inf")]) + /// tf.math.sin(x) ==> [nan -0.4121185 -0.47942555 0.84147096 0.9320391 -0.87329733 -0.54402107 nan] + /// ``` + @inlinable @inline(__always) + public static func sin( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.sin(x) + case .TF_EAGER: + return _RawTFEager.sin(x) + } + + } + + /// Computes hyperbolic sine of x element-wise. + /// + /// Given an input tensor, this function computes hyperbolic sine of every + /// element in the tensor. Input range is `[-inf,inf]` and output range + /// is `[-inf,inf]`. + /// + /// ```python + /// x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) + /// tf.math.sinh(x) ==> [-inf -4.0515420e+03 -5.2109528e-01 1.1752012e+00 1.5094614e+00 3.6268604e+00 1.1013232e+04 inf] + /// ``` + @inlinable @inline(__always) + public static func sinh( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.sinh(x) + case .TF_EAGER: + return _RawTFEager.sinh(x) + } + + } + + /// Returns the size of a tensor. + /// + /// This operation returns an integer representing the number of elements in + /// `input`. + /// + /// For example: + /// + /// ``` + /// # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] + /// size(t) ==> 12 + /// ``` + @inlinable @inline(__always) + public static func size< + T: TensorFlowScalar, + OutType: TensorFlowIndex + >( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + return _RawXLA.size(input) + case .TF_EAGER: + return _RawTFEager.size(input) + } + + } + + /// Creates a dataset that skips `count` elements from the `input_dataset`. + /// + /// - Parameter count: A scalar representing the number of elements from the `input_dataset` + /// that should be skipped. If count is -1, skips everything. + @inlinable @inline(__always) + public static func skipDataset( + inputDataset: VariantHandle, + count: Tensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.skipDataset( + inputDataset: inputDataset, count: count, outputTypes: outputTypes, + outputShapes: outputShapes) + } + + /// Parses a text file and creates a batch of examples. + /// + /// - Attrs: + /// - filename: The corpus's text file name. + /// - batch_size: The size of produced batch. + /// - window_size: The number of words to predict to the left and right of the target. + /// - min_count: The minimum number of word occurrences for it to be included in the + /// vocabulary. + /// - subsample: Threshold for word occurrence. Words that appear with higher + /// frequency will be randomly down-sampled. Set to 0 to disable. + /// + /// - Outputs: + /// - vocab_word: A vector of words in the corpus. + /// - vocab_freq: Frequencies of words. Sorted in the non-ascending order. + /// - words_per_epoch: Number of words per epoch in the data file. + /// - current_epoch: The current epoch number. + /// - total_words_processed: The total number of words processed so far. + /// - examples: A vector of word ids. + /// - labels: A vector of word ids. + @inlinable @inline(__always) + public static func skipgram( + filename: String, + batchSize: Int64, + windowSize: Int64 = 5, + minCount: Int64 = 5, + subsample: Double = 0.001 + ) -> ( + vocabWord: StringTensor, vocabFreq: Tensor, wordsPerEpoch: Tensor, + currentEpoch: Tensor, totalWordsProcessed: Tensor, examples: Tensor, + labels: Tensor + ) { + _RawTFEager.skipgram( + filename: filename, batchSize: batchSize, windowSize: windowSize, minCount: minCount, + subsample: subsample) + } + + @inlinable @inline(__always) + public static func sleepDataset( + inputDataset: VariantHandle, + sleepMicroseconds: Tensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.sleepDataset( + inputDataset: inputDataset, sleepMicroseconds: sleepMicroseconds, outputTypes: outputTypes, + outputShapes: outputShapes) + } + + /// Return a slice from 'input'. + /// + /// The output tensor is a tensor with dimensions described by 'size' + /// whose values are extracted from 'input' starting at the offsets in + /// 'begin'. + /// + /// *Requirements*: + /// 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n) + /// + /// - Parameters: + /// - begin: begin[i] specifies the offset into the 'i'th dimension of + /// 'input' to slice from. + /// - size: size[i] specifies the number of elements of the 'i'th dimension + /// of 'input' to slice. If size[i] is -1, all remaining elements in dimension + /// i are included in the slice (i.e. this is equivalent to setting + /// size[i] = input.dim_size(i) - begin[i]). + @inlinable @inline(__always) + public static func slice< + T: TensorFlowScalar, + Index: TensorFlowIndex + >( + _ input: Tensor, + begin: Tensor, + size: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(input.handle.backend, begin.handle.backend), size.handle.backend) + { + case .XLA: + return _RawXLA.slice(input, begin: begin, size: size) + case .TF_EAGER: + return _RawTFEager.slice(input, begin: begin, size: size) + } + + } + + /// Creates a dataset that passes a sliding window over `input_dataset`. + /// + /// - Parameters: + /// - window_size: A scalar representing the number of elements in the + /// sliding window. + /// - window_shift: A scalar representing the steps moving the sliding window + /// forward in one iteration. It must be positive. + /// - window_stride: A scalar representing the stride of the input elements of the sliding window. + /// It must be positive. + @inlinable @inline(__always) + public static func slidingWindowDataset( + inputDataset: VariantHandle, + windowSize: Tensor, + windowShift: Tensor, + windowStride: Tensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.slidingWindowDataset( + inputDataset: inputDataset, windowSize: windowSize, windowShift: windowShift, + windowStride: windowStride, outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Returns a copy of the input tensor. + @inlinable @inline(__always) + public static func snapshot( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.snapshot(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.snapshot(input) + } + + } + + /// Creates a dataset that will write to / read from a snapshot. + /// + /// This dataset attempts to determine whether a valid snapshot exists at the + /// `snapshot_path`, and reads from the snapshot in lieu of using `input_dataset`. + /// If not, it will run the preprocessing pipeline as usual, and write out a + /// snapshot of the data processed for future use. + /// + /// - Parameters: + /// - input_dataset: A variant tensor representing the input dataset. + /// - path: The path we should write snapshots to / read snapshots from. + @inlinable @inline(__always) + public static func snapshotDataset( + inputDataset: VariantHandle, + path: StringTensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?], + compression: String, + readerPathPrefix: String, + writerPathPrefix: String, + shardSizeBytes: Int64 = 10_737_418_240, + pendingSnapshotExpirySeconds: Int64 = 86400, + numReaderThreads: Int64 = 1, + readerBufferSize: Int64 = 1, + numWriterThreads: Int64 = 1, + writerBufferSize: Int64 = 1, + shuffleOnRead: Bool = false, + seed: Int64 = 0, + seed2: Int64 = 0 + ) -> VariantHandle { + _RawTFEager.snapshotDataset( + inputDataset: inputDataset, path: path, outputTypes: outputTypes, + outputShapes: outputShapes, compression: compression, readerPathPrefix: readerPathPrefix, + writerPathPrefix: writerPathPrefix, shardSizeBytes: shardSizeBytes, + pendingSnapshotExpirySeconds: pendingSnapshotExpirySeconds, + numReaderThreads: numReaderThreads, readerBufferSize: readerBufferSize, + numWriterThreads: numWriterThreads, writerBufferSize: writerBufferSize, + shuffleOnRead: shuffleOnRead, seed: seed, seed2: seed2) + } + + /// Computes softmax activations. + /// + /// For each batch `i` and class `j` we have + /// + /// $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$ + /// + /// - Parameter logits: 2-D with shape `[batch_size, num_classes]`. + /// + /// - Output softmax: Same shape as `logits`. + @inlinable @inline(__always) + public static func softmax( + logits: Tensor + ) -> Tensor { + switch logits.handle.backend { + case .XLA: + return _RawXLA.softmax(logits: logits) + case .TF_EAGER: + return _RawTFEager.softmax(logits: logits) + } + + } + + /// Computes softmax cross entropy cost and gradients to backpropagate. + /// + /// Inputs are the logits, not probabilities. + /// + /// - Parameters: + /// - features: batch_size x num_classes matrix + /// - labels: batch_size x num_classes matrix + /// The caller must ensure that each batch of labels represents a valid + /// probability distribution. + /// + /// - Outputs: + /// - loss: Per example loss (batch_size vector). + /// - backprop: backpropagated gradients (batch_size x num_classes matrix). + @inlinable @inline(__always) + public static func softmaxCrossEntropyWithLogits( + features: Tensor, + labels: Tensor + ) -> (loss: Tensor, backprop: Tensor) { + switch commonBackend(features.handle.backend, labels.handle.backend) { + case .XLA: + return _RawXLA.softmaxCrossEntropyWithLogits(features: features, labels: labels) + case .TF_EAGER: + return _RawTFEager.softmaxCrossEntropyWithLogits(features: features, labels: labels) + } + + } + + /// Computes softplus: `log(exp(features) + 1)`. + @inlinable @inline(__always) + public static func softplus( + features: Tensor + ) -> Tensor { + switch features.handle.backend { + case .XLA: + return _RawXLA.softplus(features: features) + case .TF_EAGER: + return _RawTFEager.softplus(features: features) + } + + } + + /// Computes softplus gradients for a softplus operation. + /// + /// - Parameters: + /// - gradients: The backpropagated gradients to the corresponding softplus operation. + /// - features: The features passed as input to the corresponding softplus operation. + /// + /// - Output backprops: The gradients: `gradients / (1 + exp(-features))`. + @inlinable @inline(__always) + public static func softplusGrad( + gradients: Tensor, + features: Tensor + ) -> Tensor { + switch commonBackend(gradients.handle.backend, features.handle.backend) { + case .XLA: + return _RawXLA.softplusGrad(gradients: gradients, features: features) + case .TF_EAGER: + return _RawTFEager.softplusGrad(gradients: gradients, features: features) + } + + } + + /// Computes softsign: `features / (abs(features) + 1)`. + @inlinable @inline(__always) + public static func softsign( + features: Tensor + ) -> Tensor { + switch features.handle.backend { + case .XLA: + return _RawXLA.softsign(features: features) + case .TF_EAGER: + return _RawTFEager.softsign(features: features) + } + + } + + /// Computes softsign gradients for a softsign operation. + /// + /// - Parameters: + /// - gradients: The backpropagated gradients to the corresponding softsign operation. + /// - features: The features passed as input to the corresponding softsign operation. + /// + /// - Output backprops: The gradients: `gradients / (1 + abs(features)) ** 2`. + @inlinable @inline(__always) + public static func softsignGrad( + gradients: Tensor, + features: Tensor + ) -> Tensor { + switch commonBackend(gradients.handle.backend, features.handle.backend) { + case .XLA: + return _RawXLA.softsignGrad(gradients: gradients, features: features) + case .TF_EAGER: + return _RawTFEager.softsignGrad(gradients: gradients, features: features) + } + + } + + /// SpaceToBatch for 4-D tensors of type T. + /// + /// This is a legacy version of the more general SpaceToBatchND. + /// + /// Zero-pads and then rearranges (permutes) blocks of spatial data into batch. + /// More specifically, this op outputs a copy of the input tensor where values from + /// the `height` and `width` dimensions are moved to the `batch` dimension. After + /// the zero-padding, both `height` and `width` of the input must be divisible by the + /// block size. + /// + /// - Parameters: + /// - input: 4-D with shape `[batch, height, width, depth]`. + /// - paddings: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies + /// the padding of the input with zeros across the spatial dimensions as follows: + /// + /// paddings = [[pad_top, pad_bottom], [pad_left, pad_right]] + /// + /// The effective spatial dimensions of the zero-padded input tensor will be: + /// + /// height_pad = pad_top + height + pad_bottom + /// width_pad = pad_left + width + pad_right + /// + /// The attr `block_size` must be greater than one. It indicates the block size. + /// + /// * Non-overlapping blocks of size `block_size x block size` in the height and + /// width dimensions are rearranged into the batch dimension at each location. + /// * The batch of the output tensor is `batch * block_size * block_size`. + /// * Both height_pad and width_pad must be divisible by block_size. + /// + /// The shape of the output will be: + /// + /// [batch*block_size*block_size, height_pad/block_size, width_pad/block_size, + /// depth] + /// + /// Some examples: + /// + /// (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2: + /// + /// ``` + /// x = [[[[1], [2]], [[3], [4]]]] + /// ``` + /// + /// The output tensor has shape `[4, 1, 1, 1]` and value: + /// + /// ``` + /// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] + /// ``` + /// + /// (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2: + /// + /// ``` + /// x = [[[[1, 2, 3], [4, 5, 6]], + /// [[7, 8, 9], [10, 11, 12]]]] + /// ``` + /// + /// The output tensor has shape `[4, 1, 1, 3]` and value: + /// + /// ``` + /// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] + /// ``` + /// + /// (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2: + /// + /// ``` + /// x = [[[[1], [2], [3], [4]], + /// [[5], [6], [7], [8]], + /// [[9], [10], [11], [12]], + /// [[13], [14], [15], [16]]]] + /// ``` + /// + /// The output tensor has shape `[4, 2, 2, 1]` and value: + /// + /// ``` + /// x = [[[[1], [3]], [[9], [11]]], + /// [[[2], [4]], [[10], [12]]], + /// [[[5], [7]], [[13], [15]]], + /// [[[6], [8]], [[14], [16]]]] + /// ``` + /// + /// (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2: + /// + /// ``` + /// x = [[[[1], [2], [3], [4]], + /// [[5], [6], [7], [8]]], + /// [[[9], [10], [11], [12]], + /// [[13], [14], [15], [16]]]] + /// ``` + /// + /// The output tensor has shape `[8, 1, 2, 1]` and value: + /// + /// ``` + /// x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], + /// [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] + /// ``` + /// + /// Among others, this operation is useful for reducing atrous convolution into + /// regular convolution. + @inlinable @inline(__always) + public static func spaceToBatch< + T: TensorFlowScalar, + Tpaddings: TensorFlowIndex + >( + _ input: Tensor, + paddings: Tensor, + blockSize: Int64 + ) -> Tensor { + switch commonBackend(input.handle.backend, paddings.handle.backend) { + case .XLA: + let output_device = paddings.device + let input = Tensor(copying: input, to: .defaultTFEager) + let paddings = Tensor(copying: paddings, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.spaceToBatch(input, paddings: paddings, blockSize: blockSize), + to: output_device) + case .TF_EAGER: + return _RawTFEager.spaceToBatch(input, paddings: paddings, blockSize: blockSize) + } + + } + + /// SpaceToBatch for N-D tensors of type T. + /// + /// This operation divides "spatial" dimensions `[1, ..., M]` of the input into a + /// grid of blocks of shape `block_shape`, and interleaves these blocks with the + /// "batch" dimension (0) such that in the output, the spatial dimensions + /// `[1, ..., M]` correspond to the position within the grid, and the batch + /// dimension combines both the position within a spatial block and the original + /// batch position. Prior to division into blocks, the spatial dimensions of the + /// input are optionally zero padded according to `paddings`. See below for a + /// precise description. + /// + /// - Parameters: + /// - input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, + /// where spatial_shape has `M` dimensions. + /// - block_shape: 1-D with shape `[M]`, all values must be >= 1. + /// - paddings: 2-D with shape `[M, 2]`, all values must be >= 0. + /// `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension + /// `i + 1`, which corresponds to spatial dimension `i`. It is required that + /// `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`. + /// + /// This operation is equivalent to the following steps: + /// + /// 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the + /// input according to `paddings` to produce `padded` of shape `padded_shape`. + /// + /// 2. Reshape `padded` to `reshaped_padded` of shape: + /// + /// [batch] + + /// [padded_shape[1] / block_shape[0], + /// block_shape[0], + /// ..., + /// padded_shape[M] / block_shape[M-1], + /// block_shape[M-1]] + + /// remaining_shape + /// + /// 3. Permute dimensions of `reshaped_padded` to produce + /// `permuted_reshaped_padded` of shape: + /// + /// block_shape + + /// [batch] + + /// [padded_shape[1] / block_shape[0], + /// ..., + /// padded_shape[M] / block_shape[M-1]] + + /// remaining_shape + /// + /// 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch + /// dimension, producing an output tensor of shape: + /// + /// [batch * prod(block_shape)] + + /// [padded_shape[1] / block_shape[0], + /// ..., + /// padded_shape[M] / block_shape[M-1]] + + /// remaining_shape + /// + /// Some examples: + /// + /// (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and + /// `paddings = [[0, 0], [0, 0]]`: + /// + /// ``` + /// x = [[[[1], [2]], [[3], [4]]]] + /// ``` + /// + /// The output tensor has shape `[4, 1, 1, 1]` and value: + /// + /// ``` + /// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] + /// ``` + /// + /// (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and + /// `paddings = [[0, 0], [0, 0]]`: + /// + /// ``` + /// x = [[[[1, 2, 3], [4, 5, 6]], + /// [[7, 8, 9], [10, 11, 12]]]] + /// ``` + /// + /// The output tensor has shape `[4, 1, 1, 3]` and value: + /// + /// ``` + /// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] + /// ``` + /// + /// (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and + /// `paddings = [[0, 0], [0, 0]]`: + /// + /// ``` + /// x = [[[[1], [2], [3], [4]], + /// [[5], [6], [7], [8]], + /// [[9], [10], [11], [12]], + /// [[13], [14], [15], [16]]]] + /// ``` + /// + /// The output tensor has shape `[4, 2, 2, 1]` and value: + /// + /// ``` + /// x = [[[[1], [3]], [[9], [11]]], + /// [[[2], [4]], [[10], [12]]], + /// [[[5], [7]], [[13], [15]]], + /// [[[6], [8]], [[14], [16]]]] + /// ``` + /// + /// (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and + /// paddings = `[[0, 0], [2, 0]]`: + /// + /// ``` + /// x = [[[[1], [2], [3], [4]], + /// [[5], [6], [7], [8]]], + /// [[[9], [10], [11], [12]], + /// [[13], [14], [15], [16]]]] + /// ``` + /// + /// The output tensor has shape `[8, 1, 3, 1]` and value: + /// + /// ``` + /// x = [[[[0], [1], [3]]], [[[0], [9], [11]]], + /// [[[0], [2], [4]]], [[[0], [10], [12]]], + /// [[[0], [5], [7]]], [[[0], [13], [15]]], + /// [[[0], [6], [8]]], [[[0], [14], [16]]]] + /// ``` + /// + /// Among others, this operation is useful for reducing atrous convolution into + /// regular convolution. + @inlinable @inline(__always) + public static func spaceToBatchND< + T: TensorFlowScalar, + TblockShape: TensorFlowIndex, + Tpaddings: TensorFlowIndex + >( + _ input: Tensor, + blockShape: Tensor, + paddings: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(input.handle.backend, blockShape.handle.backend), paddings.handle.backend) + { + case .XLA: + let output_device = paddings.device + let input = Tensor(copying: input, to: .defaultTFEager) + let blockShape = Tensor(copying: blockShape, to: .defaultTFEager) + let paddings = Tensor(copying: paddings, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.spaceToBatchND(input, blockShape: blockShape, paddings: paddings), + to: output_device) + case .TF_EAGER: + return _RawTFEager.spaceToBatchND(input, blockShape: blockShape, paddings: paddings) + } + + } + + /// SpaceToDepth for tensors of type T. + /// + /// Rearranges blocks of spatial data, into depth. More specifically, + /// this op outputs a copy of the input tensor where values from the `height` + /// and `width` dimensions are moved to the `depth` dimension. + /// The attr `block_size` indicates the input block size. + /// + /// * Non-overlapping blocks of size `block_size x block size` are rearranged + /// into depth at each location. + /// * The depth of the output tensor is `block_size * block_size * input_depth`. + /// * The Y, X coordinates within each block of the input become the high order + /// component of the output channel index. + /// * The input tensor's height and width must be divisible by block_size. + /// + /// The `data_format` attr specifies the layout of the input and output tensors + /// with the following options: + /// "NHWC": `[ batch, height, width, channels ]` + /// "NCHW": `[ batch, channels, height, width ]` + /// "NCHW_VECT_C": + /// `qint8 [ batch, channels / 4, height, width, 4 ]` + /// + /// It is useful to consider the operation as transforming a 6-D Tensor. + /// e.g. for data_format = NHWC, + /// Each element in the input tensor can be specified via 6 coordinates, + /// ordered by decreasing memory layout significance as: + /// n,oY,bY,oX,bX,iC (where n=batch index, oX, oY means X or Y coordinates + /// within the output image, bX, bY means coordinates + /// within the input block, iC means input channels). + /// The output would be a transpose to the following layout: + /// n,oY,oX,bY,bX,iC + /// + /// This operation is useful for resizing the activations between convolutions + /// (but keeping all data), e.g. instead of pooling. It is also useful for training + /// purely convolutional models. + /// + /// For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" and + /// block_size = 2: + /// + /// ``` + /// x = [[[[1], [2]], + /// [[3], [4]]]] + /// ``` + /// + /// This operation will output a tensor of shape `[1, 1, 1, 4]`: + /// + /// ``` + /// [[[[1, 2, 3, 4]]]] + /// ``` + /// + /// Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`, + /// the corresponding output will have a single element (i.e. width and height are + /// both 1) and will have a depth of 4 channels (1 * block_size * block_size). + /// The output element shape is `[1, 1, 4]`. + /// + /// For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g. + /// + /// ``` + /// x = [[[[1, 2, 3], [4, 5, 6]], + /// [[7, 8, 9], [10, 11, 12]]]] + /// ``` + /// + /// This operation, for block_size of 2, will return the following tensor of shape + /// `[1, 1, 1, 12]` + /// + /// ``` + /// [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] + /// ``` + /// + /// Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2: + /// + /// ``` + /// x = [[[[1], [2], [5], [6]], + /// [[3], [4], [7], [8]], + /// [[9], [10], [13], [14]], + /// [[11], [12], [15], [16]]]] + /// ``` + /// + /// the operator will return the following tensor of shape `[1 2 2 4]`: + /// + /// ``` + /// x = [[[[1, 2, 3, 4], + /// [5, 6, 7, 8]], + /// [[9, 10, 11, 12], + /// [13, 14, 15, 16]]]] + /// ``` + /// + /// - Attr block_size: The size of the spatial block. + @inlinable @inline(__always) + public static func spaceToDepth( + _ input: Tensor, + blockSize: Int64, + dataFormat: DataFormat2 = .nhwc + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.spaceToDepth(input, blockSize: blockSize, dataFormat: dataFormat), + to: output_device) + case .TF_EAGER: + return _RawTFEager.spaceToDepth(input, blockSize: blockSize, dataFormat: dataFormat) + } + + } + + /// Adds two `SparseTensor` objects to produce another `SparseTensor`. + /// + /// The input `SparseTensor` objects' indices are assumed ordered in standard + /// lexicographic order. If this is not the case, before this step run + /// `SparseReorder` to restore index ordering. + /// + /// By default, if two values sum to zero at some index, the output `SparseTensor` + /// would still include that particular location in its index, storing a zero in the + /// corresponding value slot. To override this, callers can specify `thresh`, + /// indicating that if the sum has a magnitude strictly smaller than `thresh`, its + /// corresponding value and index would then not be included. In particular, + /// `thresh == 0` (default) means everything is kept and actual thresholding happens + /// only for a positive value. + /// + /// In the following shapes, `nnz` is the count after taking `thresh` into account. + /// + /// - Parameters: + /// - a_indices: 2-D. The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix. + /// - a_values: 1-D. The `values` of the first `SparseTensor`, size `[nnz]` Vector. + /// - a_shape: 1-D. The `shape` of the first `SparseTensor`, size `[ndims]` Vector. + /// - b_indices: 2-D. The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix. + /// - b_values: 1-D. The `values` of the second `SparseTensor`, size `[nnz]` Vector. + /// - b_shape: 1-D. The `shape` of the second `SparseTensor`, size `[ndims]` Vector. + /// - thresh: 0-D. The magnitude threshold that determines if an output value/index + /// pair takes space. + @inlinable @inline(__always) + public static func sparseAdd< + T: TensorFlowNumeric, + Treal: TensorFlowNumeric + >( + aIndices: Tensor, + aValues: Tensor, + aShape: Tensor, + bIndices: Tensor, + bValues: Tensor, + bShape: Tensor, + thresh: Tensor + ) -> (sumIndices: Tensor, sumValues: Tensor, sumShape: Tensor) { + _RawTFEager.sparseAdd( + aIndices: aIndices, aValues: aValues, aShape: aShape, bIndices: bIndices, bValues: bValues, + bShape: bShape, thresh: thresh) + } + + /// The gradient operator for the SparseAdd op. + /// + /// The SparseAdd op calculates A + B, where A, B, and the sum are all represented + /// as `SparseTensor` objects. This op takes in the upstream gradient w.r.t. + /// non-empty values of the sum, and outputs the gradients w.r.t. the non-empty + /// values of A and B. + /// + /// - Parameters: + /// - backprop_val_grad: 1-D with shape `[nnz(sum)]`. The gradient with respect to + /// the non-empty values of the sum. + /// - a_indices: 2-D. The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`. + /// - b_indices: 2-D. The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`. + /// - sum_indices: 2-D. The `indices` of the sum `SparseTensor`, size + /// `[nnz(sum), ndims]`. + /// + /// - Outputs: + /// - a_val_grad: 1-D with shape `[nnz(A)]`. The gradient with respect to the + /// non-empty values of A. + /// - b_val_grad: 1-D with shape `[nnz(B)]`. The gradient with respect to the + /// non-empty values of B. + @inlinable @inline(__always) + public static func sparseAddGrad( + backpropValGrad: Tensor, + aIndices: Tensor, + bIndices: Tensor, + sumIndices: Tensor + ) -> (aValGrad: Tensor, bValGrad: Tensor) { + _RawTFEager.sparseAddGrad( + backpropValGrad: backpropValGrad, aIndices: aIndices, bIndices: bIndices, + sumIndices: sumIndices) + } + + /// Concatenates a list of `SparseTensor` along the specified dimension. + /// + /// Concatenation is with respect to the dense versions of these sparse tensors. + /// It is assumed that each input is a `SparseTensor` whose elements are ordered + /// along increasing dimension number. + /// + /// All inputs' shapes must match, except for the concat dimension. The + /// `indices`, `values`, and `shapes` lists must have the same length. + /// + /// The output shape is identical to the inputs', except along the concat + /// dimension, where it is the sum of the inputs' sizes along that dimension. + /// + /// The output elements will be resorted to preserve the sort order along + /// increasing dimension number. + /// + /// This op runs in `O(M log M)` time, where `M` is the total number of non-empty + /// values across all inputs. This is due to the need for an internal sort in + /// order to concatenate efficiently across an arbitrary dimension. + /// + /// For example, if `concat_dim = 1` and the inputs are + /// + /// sp_inputs[0]: shape = [2, 3] + /// [0, 2]: "a" + /// [1, 0]: "b" + /// [1, 1]: "c" + /// + /// sp_inputs[1]: shape = [2, 4] + /// [0, 1]: "d" + /// [0, 2]: "e" + /// + /// then the output will be + /// + /// shape = [2, 7] + /// [0, 2]: "a" + /// [0, 4]: "d" + /// [0, 5]: "e" + /// [1, 0]: "b" + /// [1, 1]: "c" + /// + /// Graphically this is equivalent to doing + /// + /// [ a] concat [ d e ] = [ a d e ] + /// [b c ] [ ] [b c ] + /// + /// - Parameters: + /// - indices: 2-D. Indices of each input `SparseTensor`. + /// - values: 1-D. Non-empty values of each `SparseTensor`. + /// - shapes: 1-D. Shapes of each `SparseTensor`. + /// + /// - Attr concat_dim: Dimension to concatenate along. Must be in range [-rank, rank), + /// where rank is the number of dimensions in each input `SparseTensor`. + /// + /// - Outputs: + /// - output_indices: 2-D. Indices of the concatenated `SparseTensor`. + /// - output_values: 1-D. Non-empty values of the concatenated `SparseTensor`. + /// - output_shape: 1-D. Shape of the concatenated `SparseTensor`. + @inlinable @inline(__always) + public static func sparseConcat( + indices: [Tensor], + _ values: [Tensor], + shapes: [Tensor], + concatDim: Int64 + ) -> (outputIndices: Tensor, outputValues: Tensor, outputShape: Tensor) { + _RawTFEager.sparseConcat(indices: indices, values, shapes: shapes, concatDim: concatDim) + } + + /// Generates sparse cross from a list of sparse and dense tensors. + /// + /// The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each + /// representing features of one feature column. It outputs a 2D `SparseTensor` with + /// the batchwise crosses of these features. + /// + /// For example, if the inputs are + /// + /// inputs[0]: SparseTensor with shape = [2, 2] + /// [0, 0]: "a" + /// [1, 0]: "b" + /// [1, 1]: "c" + /// + /// inputs[1]: SparseTensor with shape = [2, 1] + /// [0, 0]: "d" + /// [1, 0]: "e" + /// + /// inputs[2]: Tensor [["f"], ["g"]] + /// + /// then the output will be + /// + /// shape = [2, 2] + /// [0, 0]: "a_X_d_X_f" + /// [1, 0]: "b_X_e_X_g" + /// [1, 1]: "c_X_e_X_g" + /// + /// if hashed_output=true then the output will be + /// + /// shape = [2, 2] + /// [0, 0]: FingerprintCat64( + /// Fingerprint64("f"), FingerprintCat64( + /// Fingerprint64("d"), Fingerprint64("a"))) + /// [1, 0]: FingerprintCat64( + /// Fingerprint64("g"), FingerprintCat64( + /// Fingerprint64("e"), Fingerprint64("b"))) + /// [1, 1]: FingerprintCat64( + /// Fingerprint64("g"), FingerprintCat64( + /// Fingerprint64("e"), Fingerprint64("c"))) + /// + /// - Parameters: + /// - indices: 2-D. Indices of each input `SparseTensor`. + /// - values: 1-D. values of each `SparseTensor`. + /// - shapes: 1-D. Shapes of each `SparseTensor`. + /// - dense_inputs: 2-D. Columns represented by dense `Tensor`. + /// + /// - Attrs: + /// - hashed_output: If true, returns the hash of the cross instead of the string. + /// This will allow us avoiding string manipulations. + /// - num_buckets: It is used if hashed_output is true. + /// output = hashed_value%num_buckets if num_buckets > 0 else hashed_value. + /// - hash_key: Specify the hash_key that will be used by the `FingerprintCat64` + /// function to combine the crosses fingerprints. + /// + /// - Outputs: + /// - output_indices: 2-D. Indices of the concatenated `SparseTensor`. + /// - output_values: 1-D. Non-empty values of the concatenated or hashed + /// `SparseTensor`. + /// - output_shape: 1-D. Shape of the concatenated `SparseTensor`. + @inlinable @inline(__always) + public static func sparseCross< + SparseTypes: TensorArrayProtocol, + DenseTypes: TensorArrayProtocol, + OutType: TensorFlowIndex + >( + indices: [Tensor], + _ values: SparseTypes, + shapes: [Tensor], + denseInputs: DenseTypes, + hashedOutput: Bool, + numBuckets: Int64, + hashKey: Int64, + internalType: TensorDataType + ) -> (outputIndices: Tensor, outputValues: Tensor, outputShape: Tensor) { + _RawTFEager.sparseCross( + indices: indices, values, shapes: shapes, denseInputs: denseInputs, + hashedOutput: hashedOutput, numBuckets: numBuckets, hashKey: hashKey, + internalType: internalType) + } + + /// Generates sparse cross from a list of sparse and dense tensors. + /// + /// The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each + /// representing features of one feature column. It outputs a 2D `SparseTensor` with + /// the batchwise crosses of these features. + /// + /// For example, if the inputs are + /// + /// inputs[0]: SparseTensor with shape = [2, 2] + /// [0, 0]: "a" + /// [1, 0]: "b" + /// [1, 1]: "c" + /// + /// inputs[1]: SparseTensor with shape = [2, 1] + /// [0, 0]: "d" + /// [1, 0]: "e" + /// + /// inputs[2]: Tensor [["f"], ["g"]] + /// + /// then the output will be + /// + /// shape = [2, 2] + /// [0, 0]: "a_X_d_X_f" + /// [1, 0]: "b_X_e_X_g" + /// [1, 1]: "c_X_e_X_g" + /// + /// if hashed_output=true then the output will be + /// + /// shape = [2, 2] + /// [0, 0]: FingerprintCat64( + /// Fingerprint64("f"), FingerprintCat64( + /// Fingerprint64("d"), Fingerprint64("a"))) + /// [1, 0]: FingerprintCat64( + /// Fingerprint64("g"), FingerprintCat64( + /// Fingerprint64("e"), Fingerprint64("b"))) + /// [1, 1]: FingerprintCat64( + /// Fingerprint64("g"), FingerprintCat64( + /// Fingerprint64("e"), Fingerprint64("c"))) + /// + /// - Parameters: + /// - indices: 2-D. Indices of each input `SparseTensor`. + /// - values: 1-D. values of each `SparseTensor`. + /// - shapes: 1-D. Shapes of each `SparseTensor`. + /// - dense_inputs: 2-D. Columns represented by dense `Tensor`. + /// + /// - Attrs: + /// - hashed_output: If true, returns the hash of the cross instead of the string. + /// This will allow us avoiding string manipulations. + /// - num_buckets: It is used if hashed_output is true. + /// output = hashed_value%num_buckets if num_buckets > 0 else hashed_value. + /// - hash_key: Specify the hash_key that will be used by the `FingerprintCat64` + /// function to combine the crosses fingerprints. + /// + /// - Outputs: + /// - output_indices: 2-D. Indices of the concatenated `SparseTensor`. + /// - output_values: 1-D. Non-empty values of the concatenated or hashed + /// `SparseTensor`. + /// - output_shape: 1-D. Shape of the concatenated `SparseTensor`. + @inlinable @inline(__always) + public static func sparseCross< + SparseTypes: TensorArrayProtocol, + DenseTypes: TensorArrayProtocol + >( + indices: [Tensor], + _ values: SparseTypes, + shapes: [Tensor], + denseInputs: DenseTypes, + hashedOutput: Bool, + numBuckets: Int64, + hashKey: Int64, + internalType: TensorDataType + ) -> (outputIndices: Tensor, outputValues: StringTensor, outputShape: Tensor) { + _RawTFEager.sparseCross( + indices: indices, values, shapes: shapes, denseInputs: denseInputs, + hashedOutput: hashedOutput, numBuckets: numBuckets, hashKey: hashKey, + internalType: internalType) + } + + /// Adds up a SparseTensor and a dense Tensor, using these special rules: + /// + /// (1) Broadcasts the dense side to have the same shape as the sparse side, if + /// eligible; + /// (2) Then, only the dense values pointed to by the indices of the SparseTensor + /// participate in the cwise addition. + /// + /// By these rules, the result is a logical SparseTensor with exactly the same + /// indices and shape, but possibly with different non-zero values. The output of + /// this Op is the resultant non-zero values. + /// + /// - Parameters: + /// - sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + /// SparseTensor, possibly not in canonical ordering. + /// - sp_values: 1-D. `N` non-empty values corresponding to `sp_indices`. + /// - sp_shape: 1-D. Shape of the input SparseTensor. + /// - dense: `R`-D. The dense Tensor operand. + /// + /// - Output output: 1-D. The `N` values that are operated on. + @inlinable @inline(__always) + public static func sparseDenseCwiseAdd( + spIndices: Tensor, + spValues: Tensor, + spShape: Tensor, + dense: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(spIndices.handle.backend, spValues.handle.backend), spShape.handle.backend), + dense.handle.backend) + { + case .XLA: + let output_device = dense.device + let spIndices = Tensor(copying: spIndices, to: .defaultTFEager) + let spValues = Tensor(copying: spValues, to: .defaultTFEager) + let spShape = Tensor(copying: spShape, to: .defaultTFEager) + let dense = Tensor(copying: dense, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.sparseDenseCwiseAdd( + spIndices: spIndices, spValues: spValues, spShape: spShape, dense: dense), + to: output_device) + case .TF_EAGER: + return _RawTFEager.sparseDenseCwiseAdd( + spIndices: spIndices, spValues: spValues, spShape: spShape, dense: dense) + } + + } + + /// Component-wise divides a SparseTensor by a dense Tensor. + /// + /// *Limitation*: this Op only broadcasts the dense side to the sparse side, but not + /// the other direction. + /// + /// - Parameters: + /// - sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + /// SparseTensor, possibly not in canonical ordering. + /// - sp_values: 1-D. `N` non-empty values corresponding to `sp_indices`. + /// - sp_shape: 1-D. Shape of the input SparseTensor. + /// - dense: `R`-D. The dense Tensor operand. + /// + /// - Output output: 1-D. The `N` values that are operated on. + @inlinable @inline(__always) + public static func sparseDenseCwiseDiv( + spIndices: Tensor, + spValues: Tensor, + spShape: Tensor, + dense: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(spIndices.handle.backend, spValues.handle.backend), spShape.handle.backend), + dense.handle.backend) + { + case .XLA: + let output_device = dense.device + let spIndices = Tensor(copying: spIndices, to: .defaultTFEager) + let spValues = Tensor(copying: spValues, to: .defaultTFEager) + let spShape = Tensor(copying: spShape, to: .defaultTFEager) + let dense = Tensor(copying: dense, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.sparseDenseCwiseDiv( + spIndices: spIndices, spValues: spValues, spShape: spShape, dense: dense), + to: output_device) + case .TF_EAGER: + return _RawTFEager.sparseDenseCwiseDiv( + spIndices: spIndices, spValues: spValues, spShape: spShape, dense: dense) + } + + } + + /// Component-wise multiplies a SparseTensor by a dense Tensor. + /// + /// The output locations corresponding to the implicitly zero elements in the sparse + /// tensor will be zero (i.e., will not take up storage space), regardless of the + /// contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN). + /// + /// *Limitation*: this Op only broadcasts the dense side to the sparse side, but not + /// the other direction. + /// + /// - Parameters: + /// - sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + /// SparseTensor, possibly not in canonical ordering. + /// - sp_values: 1-D. `N` non-empty values corresponding to `sp_indices`. + /// - sp_shape: 1-D. Shape of the input SparseTensor. + /// - dense: `R`-D. The dense Tensor operand. + /// + /// - Output output: 1-D. The `N` values that are operated on. + @inlinable @inline(__always) + public static func sparseDenseCwiseMul( + spIndices: Tensor, + spValues: Tensor, + spShape: Tensor, + dense: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(spIndices.handle.backend, spValues.handle.backend), spShape.handle.backend), + dense.handle.backend) + { + case .XLA: + let output_device = dense.device + let spIndices = Tensor(copying: spIndices, to: .defaultTFEager) + let spValues = Tensor(copying: spValues, to: .defaultTFEager) + let spShape = Tensor(copying: spShape, to: .defaultTFEager) + let dense = Tensor(copying: dense, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.sparseDenseCwiseMul( + spIndices: spIndices, spValues: spValues, spShape: spShape, dense: dense), + to: output_device) + case .TF_EAGER: + return _RawTFEager.sparseDenseCwiseMul( + spIndices: spIndices, spValues: spValues, spShape: spShape, dense: dense) + } + + } + + /// Fills empty rows in the input 2-D `SparseTensor` with a default value. + /// + /// The input `SparseTensor` is represented via the tuple of inputs + /// (`indices`, `values`, `dense_shape`). The output `SparseTensor` has the + /// same `dense_shape` but with indices `output_indices` and values + /// `output_values`. + /// + /// This op inserts a single entry for every row that doesn't have any values. + /// The index is created as `[row, 0, ..., 0]` and the inserted value + /// is `default_value`. + /// + /// For example, suppose `sp_input` has shape `[5, 6]` and non-empty values: + /// + /// [0, 1]: a + /// [0, 3]: b + /// [2, 0]: c + /// [3, 1]: d + /// + /// Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values: + /// + /// [0, 1]: a + /// [0, 3]: b + /// [1, 0]: default_value + /// [2, 0]: c + /// [3, 1]: d + /// [4, 0]: default_value + /// + /// The output `SparseTensor` will be in row-major order and will have the + /// same shape as the input. + /// + /// This op also returns an indicator vector shaped `[dense_shape[0]]` such that + /// + /// empty_row_indicator[i] = True iff row i was an empty row. + /// + /// And a reverse index map vector shaped `[indices.shape[0]]` that is used during + /// backpropagation, + /// + /// reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :] + /// + /// - Parameters: + /// - indices: 2-D. the indices of the sparse tensor. + /// - values: 1-D. the values of the sparse tensor. + /// - dense_shape: 1-D. the shape of the sparse tensor. + /// - default_value: 0-D. default value to insert into location `[row, 0, ..., 0]` + /// for rows missing from the input sparse tensor. + /// output indices: 2-D. the indices of the filled sparse tensor. + /// + /// - Outputs: + /// - output_values: 1-D. the values of the filled sparse tensor. + /// - empty_row_indicator: 1-D. whether the dense row was missing in the + /// input sparse tensor. + /// - reverse_index_map: 1-D. a map from the input indices to the output indices. + @inlinable @inline(__always) + public static func sparseFillEmptyRows( + indices: Tensor, + _ values: Tensor, + denseShape: Tensor, + defaultValue: Tensor + ) -> ( + outputIndices: Tensor, outputValues: Tensor, emptyRowIndicator: Tensor, + reverseIndexMap: Tensor + ) { + _RawTFEager.sparseFillEmptyRows( + indices: indices, values, denseShape: denseShape, defaultValue: defaultValue) + } + + /// The gradient of SparseFillEmptyRows. + /// + /// Takes vectors reverse_index_map, shaped `[N]`, and grad_values, + /// shaped `[N_full]`, where `N_full >= N` and copies data into either + /// `d_values` or `d_default_value`. Here `d_values` is shaped `[N]` and + /// `d_default_value` is a scalar. + /// + /// d_values[j] = grad_values[reverse_index_map[j]] + /// d_default_value = sum_{k : 0 .. N_full - 1} ( + /// grad_values[k] * 1{k not in reverse_index_map}) + /// + /// - Parameters: + /// - reverse_index_map: 1-D. The reverse index map from SparseFillEmptyRows. + /// - grad_values: 1-D. The gradients from backprop. + /// + /// - Outputs: + /// - d_values: 1-D. The backprop into values. + /// - d_default_value: 0-D. The backprop into default_value. + @inlinable @inline(__always) + public static func sparseFillEmptyRowsGrad( + reverseIndexMap: Tensor, + gradValues: Tensor + ) -> (dValues: Tensor, dDefaultValue: Tensor) { + _RawTFEager.sparseFillEmptyRowsGrad(reverseIndexMap: reverseIndexMap, gradValues: gradValues) + } + + /// Multiply matrix "a" by matrix "b". + /// + /// The inputs must be two-dimensional matrices and the inner dimension of "a" must + /// match the outer dimension of "b". Both "a" and "b" must be `Tensor`s not + /// `SparseTensor`s. This op is optimized for the case where at least one of "a" or + /// "b" is sparse, in the sense that they have a large proportion of zero values. + /// The breakeven for using this versus a dense matrix multiply on one platform was + /// 30% zero values in the sparse matrix. + /// + /// The gradient computation of this operation will only take advantage of sparsity + /// in the input gradient when that gradient comes from a Relu. + @inlinable @inline(__always) + public static func sparseMatMul< + Ta: FloatingPoint & TensorFlowScalar, + Tb: FloatingPoint & TensorFlowScalar + >( + _ a: Tensor, + _ b: Tensor, + transposeA: Bool = false, + transposeB: Bool = false, + aIsSparse: Bool = false, + bIsSparse: Bool = false + ) -> Tensor { + switch commonBackend(a.handle.backend, b.handle.backend) { + case .XLA: + let output_device = b.device + let a = Tensor(copying: a, to: .defaultTFEager) + let b = Tensor(copying: b, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.sparseMatMul( + a, b, transposeA: transposeA, transposeB: transposeB, aIsSparse: aIsSparse, + bIsSparse: bIsSparse), to: output_device) + case .TF_EAGER: + return _RawTFEager.sparseMatMul( + a, b, transposeA: transposeA, transposeB: transposeB, aIsSparse: aIsSparse, + bIsSparse: bIsSparse) + } + + } + + /// Sparse addition of two CSR matrices, C = alpha * A + beta * B. + /// + /// The gradients of SparseMatrixAdd outputs with respect to alpha and beta are not + /// currently defined (TensorFlow will return zeros for these entries). + /// + /// - Parameters: + /// - a: A CSRSparseMatrix. + /// - b: A CSRSparseMatrix. + /// - alpha: A constant scalar. + /// - beta: A constant scalar. + /// + /// - Output c: A CSRSparseMatrix. + @inlinable @inline(__always) + public static func sparseMatrixAdd( + _ a: VariantHandle, + _ b: VariantHandle, + alpha: Tensor, + beta: Tensor + ) -> VariantHandle { + _RawTFEager.sparseMatrixAdd(a, b, alpha: alpha, beta: beta) + } + + /// Matrix-multiplies a sparse matrix with a dense matrix. + /// + /// Returns a dense matrix. + /// For inputs A and B, where A is CSR and B is dense; this op returns a dense C; + /// + /// If transpose_output is false, returns: + /// ``` + /// C = A . B + /// ``` + /// + /// If transpose_output is `true`, returns: + /// ``` + /// C = transpose(A . B) = transpose(B) . transpose(A) + /// ``` + /// where the transposition is performed along the two innermost (matrix) + /// dimensions. + /// + /// If conjugate_output is `true`, returns: + /// ``` + /// C = conjugate(A . B) = conjugate(A) . conjugate(B) + /// ``` + /// + /// If both conjugate_output and transpose_output are `true`, returns: + /// ``` + /// C = conjugate(transpose(A . B)) = conjugate(transpose(B)) . + /// conjugate(transpose(A)) + /// ``` + /// + /// - Parameters: + /// - a: A CSRSparseMatrix. + /// - b: A dense tensor. + /// + /// - Attrs: + /// - transpose_a: Indicates whether `a` should be transposed. + /// - transpose_b: Indicates whether `b` should be transposed. + /// - adjoint_a: Indicates whether `a` should be conjugate-transposed. + /// - adjoint_b: Indicates whether `b` should be conjugate-transposed. + /// - transpose_output: Transposes the product of `a` and `b`. + /// - conjugate_output: Conjugates the product of `a` and `b`. + /// + /// - Output output: A dense output tensor. + @inlinable @inline(__always) + public static func sparseMatrixMatMul( + _ a: VariantHandle, + _ b: Tensor, + transposeA: Bool = false, + transposeB: Bool = false, + adjointA: Bool = false, + adjointB: Bool = false, + transposeOutput: Bool = false, + conjugateOutput: Bool = false + ) -> Tensor { + switch b.handle.backend { + case .XLA: + let output_device = b.device + let b = Tensor(copying: b, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.sparseMatrixMatMul( + a, b, transposeA: transposeA, transposeB: transposeB, adjointA: adjointA, + adjointB: adjointB, transposeOutput: transposeOutput, conjugateOutput: conjugateOutput), + to: output_device) + case .TF_EAGER: + return _RawTFEager.sparseMatrixMatMul( + a, b, transposeA: transposeA, transposeB: transposeB, adjointA: adjointA, + adjointB: adjointB, transposeOutput: transposeOutput, conjugateOutput: conjugateOutput) + } + + } + + /// Element-wise multiplication of a sparse matrix with a dense tensor. + /// + /// Returns a sparse matrix. + /// + /// The dense tensor `b` may be either a scalar; otherwise `a` must be a rank-3 + /// `SparseMatrix`; in this case `b` must be shaped `[batch_size, 1, 1]` and the + /// multiply operation broadcasts. + /// + /// **NOTE** even if `b` is zero, the sparsity structure of the output does not + /// change. + /// + /// - Parameters: + /// - a: A CSRSparseMatrix. + /// - b: A dense tensor. + /// + /// - Output output: A dense output tensor. + @inlinable @inline(__always) + public static func sparseMatrixMul( + _ a: VariantHandle, + _ b: Tensor + ) -> VariantHandle { + _RawTFEager.sparseMatrixMul(a, b) + } + + /// Returns the number of nonzeroes of `sparse_matrix`. + /// + /// - Parameter sparse_matrix: A CSRSparseMatrix. + /// + /// - Output nnz: The number of nonzeroes of `sparse_matrix`. + @inlinable @inline(__always) + public static func sparseMatrixNNZ( + sparseMatrix: VariantHandle + ) -> Tensor { + _RawTFEager.sparseMatrixNNZ(sparseMatrix: sparseMatrix) + } + + /// Computes the Approximate Minimum Degree (AMD) ordering of `input`. + /// + /// Computes the Approximate Minimum Degree (AMD) ordering for a sparse matrix. + /// + /// The returned permutation may be used to permute the rows and columns of the + /// given sparse matrix. This typically results in permuted sparse matrix's sparse + /// Cholesky (or other decompositions) in having fewer zero fill-in compared to + /// decomposition of the original matrix. + /// + /// The input sparse matrix may have rank 2 or rank 3. The output Tensor, + /// representing would then have rank 1 or 2 respectively, with the same batch + /// shape as the input. + /// + /// Each component of the input sparse matrix must represent a square symmetric + /// matrix; only the lower triangular part of the matrix is read. The values of the + /// sparse matrix does not affect the returned permutation, only the sparsity + /// pattern of the sparse matrix is used. Hence, a single AMD ordering may be + /// reused for the Cholesky decompositions of sparse matrices with the same sparsity + /// pattern but with possibly different values. + /// + /// Each batch component of the output permutation represents a permutation of `N` + /// elements, where the input sparse matrix components each have `N` rows. That is, + /// the component contains each of the integers `{0, .. N-1}` exactly once. The + /// `i`th element represents the row index that the `i`th row maps to. + /// + /// Usage example: + /// + /// ```python + /// from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops + /// + /// a_indices = np.array([[0, 0], [1, 1], [2, 1], [2, 2], [3, 3]]) + /// a_values = np.array([1.0, 2.0, 1.0, 3.0, 4.0], np.float32) + /// a_dense_shape = [4, 4] + /// + /// with tf.Session() as sess: + /// # Define (COO format) SparseTensor over Numpy array. + /// a_st = tf.SparseTensor(a_indices, a_values, a_dense_shape) + /// + /// # Convert SparseTensors to CSR SparseMatrix. + /// a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix( + /// a_st.indices, a_st.values, a_st.dense_shape) + /// + /// # Obtain the AMD Ordering for the CSR SparseMatrix. + /// ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(sparse_matrix) + /// + /// ordering_amd_value = sess.run(ordering_amd) + /// ``` + /// + /// `ordering_amd_value` stores the AMD ordering: `[1 2 3 0]`. + /// + /// input: A `CSRSparseMatrix`. + /// + /// - Parameter input: A `CSRSparseMatrix`. + /// + /// - Output output: The Approximate Minimum Degree (AMD) ordering of `input`. + @inlinable @inline(__always) + public static func sparseMatrixOrderingAMD( + _ input: VariantHandle + ) -> Tensor { + _RawTFEager.sparseMatrixOrderingAMD(input) + } + + /// Calculates the softmax of a CSRSparseMatrix. + /// + /// Calculate the softmax of the innermost dimensions of a SparseMatrix. + /// + /// Missing values are treated as `-inf` (i.e., logits of zero probability); and + /// the output has the same sparsity structure as the input (though missing values + /// in the output may now be treated as having probability zero). + /// + /// - Parameter logits: A CSRSparseMatrix. + /// + /// - Output softmax: A CSRSparseMatrix. + @inlinable @inline(__always) + public static func sparseMatrixSoftmax( + logits: VariantHandle, + type: TensorDataType + ) -> VariantHandle { + _RawTFEager.sparseMatrixSoftmax(logits: logits, type: type) + } + + /// Calculates the gradient of the SparseMatrixSoftmax op. + /// + /// - Parameters: + /// - softmax: A CSRSparseMatrix. + /// - grad_softmax: The gradient of `softmax`. + /// + /// - Output gradient: The output gradient. + @inlinable @inline(__always) + public static func sparseMatrixSoftmaxGrad( + softmax: VariantHandle, + gradSoftmax: VariantHandle, + type: TensorDataType + ) -> VariantHandle { + _RawTFEager.sparseMatrixSoftmaxGrad(softmax: softmax, gradSoftmax: gradSoftmax, type: type) + } + + /// Computes the sparse Cholesky decomposition of `input`. + /// + /// Computes the Sparse Cholesky decomposition of a sparse matrix, with the given + /// fill-in reducing permutation. + /// + /// The input sparse matrix and the fill-in reducing permutation `permutation` must + /// have compatible shapes. If the sparse matrix has rank 3; with the batch + /// dimension `B`, then the `permutation` must be of rank 2; with the same batch + /// dimension `B`. There is no support for broadcasting. + /// + /// Furthermore, each component vector of `permutation` must be of length `N`, + /// containing each of the integers {0, 1, ..., N - 1} exactly once, where `N` is + /// the number of rows of each component of the sparse matrix. + /// + /// Each component of the input sparse matrix must represent a symmetric positive + /// definite (SPD) matrix; although only the lower triangular part of the matrix is + /// read. If any individual component is not SPD, then an InvalidArgument error is + /// thrown. + /// + /// The returned sparse matrix has the same dense shape as the input sparse matrix. + /// For each component `A` of the input sparse matrix, the corresponding output + /// sparse matrix represents `L`, the lower triangular Cholesky factor satisfying + /// the following identity: + /// + /// ``` + /// A = L * Lt + /// ``` + /// + /// where Lt denotes the transpose of L (or its conjugate transpose, if `type` is + /// `complex64` or `complex128`). + /// + /// The `type` parameter denotes the type of the matrix elements. The supported + /// types are: `float32`, `float64`, `complex64` and `complex128`. + /// + /// Usage example: + /// + /// ```python + /// from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops + /// + /// a_indices = np.array([[0, 0], [1, 1], [2, 1], [2, 2], [3, 3]]) + /// a_values = np.array([1.0, 2.0, 1.0, 3.0, 4.0], np.float32) + /// a_dense_shape = [4, 4] + /// + /// with tf.Session() as sess: + /// # Define (COO format) SparseTensor over Numpy array. + /// a_st = tf.SparseTensor(a_indices, a_values, a_dense_shape) + /// + /// # Convert SparseTensors to CSR SparseMatrix. + /// a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix( + /// a_st.indices, a_st.values, a_st.dense_shape) + /// + /// # Obtain the Sparse Cholesky factor using AMD Ordering for reducing zero + /// # fill-in (number of structural non-zeros in the sparse Cholesky factor). + /// ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(sparse_matrix) + /// cholesky_sparse_matrices = ( + /// sparse_csr_matrix_ops.sparse_matrix_sparse_cholesky( + /// sparse_matrix, ordering_amd, type=tf.float32)) + /// + /// # Convert the CSRSparseMatrix Cholesky factor to a dense Tensor + /// dense_cholesky = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense( + /// cholesky_sparse_matrices, tf.float32) + /// + /// # Evaluate the dense Tensor value. + /// dense_cholesky_value = sess.run(dense_cholesky) + /// ``` + /// + /// `dense_cholesky_value` stores the dense Cholesky factor: + /// + /// ``` + /// [[ 1. 0. 0. 0.] + /// [ 0. 1.41 0. 0.] + /// [ 0. 0.70 1.58 0.] + /// [ 0. 0. 0. 2.]] + /// ``` + /// + /// + /// input: A `CSRSparseMatrix`. + /// permutation: A `Tensor`. + /// type: The type of `input`. + /// + /// - Parameters: + /// - input: A `CSRSparseMatrix`. + /// - permutation: A fill-in reducing permutation matrix. + /// + /// - Output output: The sparse Cholesky decompsition of `input`. + @inlinable @inline(__always) + public static func sparseMatrixSparseCholesky( + _ input: VariantHandle, + permutation: Tensor, + type: TensorDataType + ) -> VariantHandle { + _RawTFEager.sparseMatrixSparseCholesky(input, permutation: permutation, type: type) + } + + /// Sparse-matrix-multiplies two CSR matrices `a` and `b`. + /// + /// Performs a matrix multiplication of a sparse matrix `a` with a sparse matrix + /// `b`; returns a sparse matrix `a * b`, unless either `a` or `b` is transposed or + /// adjointed. + /// + /// Each matrix may be transposed or adjointed (conjugated and transposed) + /// according to the Boolean parameters `transpose_a`, `adjoint_a`, `transpose_b` + /// and `adjoint_b`. At most one of `transpose_a` or `adjoint_a` may be True. + /// Similarly, at most one of `transpose_b` or `adjoint_b` may be True. + /// + /// The inputs must have compatible shapes. That is, the inner dimension of `a` + /// must be equal to the outer dimension of `b`. This requirement is adjusted + /// according to whether either `a` or `b` is transposed or adjointed. + /// + /// The `type` parameter denotes the type of the matrix elements. Both `a` and `b` + /// must have the same type. The supported types are: `float32`, `float64`, + /// `complex64` and `complex128`. + /// + /// Both `a` and `b` must have the same rank. Broadcasting is not supported. If they + /// have rank 3, each batch of 2D CSRSparseMatrices within `a` and `b` must have the + /// same dense shape. + /// + /// The sparse matrix product may have numeric (non-structural) zeros. + /// TODO(anudhyan): Consider adding a boolean attribute to control whether to prune + /// zeros. + /// + /// Usage example: + /// + /// ```python + /// from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops + /// + /// a_indices = np.array([[0, 0], [2, 3], [2, 4], [3, 0]]) + /// a_values = np.array([1.0, 5.0, -1.0, -2.0], np.float32) + /// a_dense_shape = [4, 5] + /// + /// b_indices = np.array([[0, 0], [3, 0], [3, 1]]) + /// b_values = np.array([2.0, 7.0, 8.0], np.float32) + /// b_dense_shape = [5, 3] + /// + /// with tf.Session() as sess: + /// # Define (COO format) Sparse Tensors over Numpy arrays + /// a_st = tf.SparseTensor(a_indices, a_values, a_dense_shape) + /// b_st = tf.SparseTensor(b_indices, b_values, b_dense_shape) + /// + /// # Convert SparseTensors to CSR SparseMatrix + /// a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix( + /// a_st.indices, a_st.values, a_st.dense_shape) + /// b_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix( + /// b_st.indices, b_st.values, b_st.dense_shape) + /// + /// # Compute the CSR SparseMatrix matrix multiplication + /// c_sm = sparse_csr_matrix_ops.sparse_matrix_sparse_mat_mul( + /// a=a_sm, b=b_sm, type=tf.float32) + /// + /// # Convert the CSR SparseMatrix product to a dense Tensor + /// c_sm_dense = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense( + /// c_sm, tf.float32) + /// # Evaluate the dense Tensor value + /// c_sm_dense_value = sess.run(c_sm_dense) + /// ``` + /// + /// `c_sm_dense_value` stores the dense matrix product: + /// + /// ``` + /// [[ 2. 0. 0.] + /// [ 0. 0. 0.] + /// [ 35. 40. 0.] + /// [ -4. 0. 0.]] + /// ``` + /// + /// a: A `CSRSparseMatrix`. + /// b: A `CSRSparseMatrix` with the same type and rank as `a`. + /// type: The type of both `a` and `b`. + /// transpose_a: If True, `a` transposed before multiplication. + /// transpose_b: If True, `b` transposed before multiplication. + /// adjoint_a: If True, `a` adjointed before multiplication. + /// adjoint_b: If True, `b` adjointed before multiplication. + /// + /// - Parameters: + /// - a: A CSRSparseMatrix. + /// - b: A CSRSparseMatrix. + /// + /// - Attrs: + /// - transpose_a: Indicates whether `a` should be transposed. + /// - transpose_b: Indicates whether `b` should be transposed. + /// - adjoint_a: Indicates whether `a` should be conjugate-transposed. + /// - adjoint_b: Indicates whether `b` should be conjugate-transposed. + /// + /// - Output c: A CSRSparseMatrix. + @inlinable @inline(__always) + public static func sparseMatrixSparseMatMul( + _ a: VariantHandle, + _ b: VariantHandle, + type: TensorDataType, + transposeA: Bool = false, + transposeB: Bool = false, + adjointA: Bool = false, + adjointB: Bool = false + ) -> VariantHandle { + _RawTFEager.sparseMatrixSparseMatMul( + a, b, type: type, transposeA: transposeA, transposeB: transposeB, adjointA: adjointA, + adjointB: adjointB) + } + + /// Transposes the inner (matrix) dimensions of a CSRSparseMatrix. + /// + /// Transposes the inner (matrix) dimensions of a SparseMatrix and optionally + /// conjugates its values. + /// + /// - Parameter input: A CSRSparseMatrix. + /// + /// - Attr conjugate: Indicates whether `input` should be conjugated. + /// + /// - Output output: A CSRSparseMatrix. + @inlinable @inline(__always) + public static func sparseMatrixTranspose( + _ input: VariantHandle, + conjugate: Bool = false, + type: TensorDataType + ) -> VariantHandle { + _RawTFEager.sparseMatrixTranspose(input, conjugate: conjugate, type: type) + } + + /// Creates an all-zeros CSRSparseMatrix with shape `dense_shape`. + /// + /// - Parameter dense_shape: The desired matrix shape. + /// + /// - Output sparse_matrix: An empty CSR matrix with shape `dense_shape`. + @inlinable @inline(__always) + public static func sparseMatrixZeros( + denseShape: Tensor, + type: TensorDataType + ) -> VariantHandle { + _RawTFEager.sparseMatrixZeros(denseShape: denseShape, type: type) + } + + /// Computes the max of elements across dimensions of a SparseTensor. + /// + /// This Op takes a SparseTensor and is the sparse counterpart to + /// `tf.reduce_max()`. In particular, this Op also returns a dense `Tensor` + /// instead of a sparse one. + /// + /// Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless + /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + /// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained + /// with length 1. + /// + /// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor + /// with a single element is returned. Additionally, the axes can be negative, + /// which are interpreted according to the indexing rules in Python. + /// + /// - Parameters: + /// - input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + /// SparseTensor, possibly not in canonical ordering. + /// - input_values: 1-D. `N` non-empty values corresponding to `input_indices`. + /// - input_shape: 1-D. Shape of the input SparseTensor. + /// - reduction_axes: 1-D. Length-`K` vector containing the reduction axes. + /// + /// - Attr keep_dims: If true, retain reduced dimensions with length 1. + /// + /// - Output output: `R-K`-D. The reduced Tensor. + @inlinable @inline(__always) + public static func sparseReduceMax( + inputIndices: Tensor, + inputValues: Tensor, + inputShape: Tensor, + reductionAxes: Tensor, + keepDims: Bool = false + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(inputIndices.handle.backend, inputValues.handle.backend), + inputShape.handle.backend), reductionAxes.handle.backend) + { + case .XLA: + let output_device = reductionAxes.device + let inputIndices = Tensor(copying: inputIndices, to: .defaultTFEager) + let inputValues = Tensor(copying: inputValues, to: .defaultTFEager) + let inputShape = Tensor(copying: inputShape, to: .defaultTFEager) + let reductionAxes = Tensor(copying: reductionAxes, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.sparseReduceMax( + inputIndices: inputIndices, inputValues: inputValues, inputShape: inputShape, + reductionAxes: reductionAxes, keepDims: keepDims), to: output_device) + case .TF_EAGER: + return _RawTFEager.sparseReduceMax( + inputIndices: inputIndices, inputValues: inputValues, inputShape: inputShape, + reductionAxes: reductionAxes, keepDims: keepDims) + } + + } + + /// Computes the max of elements across dimensions of a SparseTensor. + /// + /// This Op takes a SparseTensor and is the sparse counterpart to + /// `tf.reduce_max()`. In contrast to SparseReduceMax, this Op returns a + /// SparseTensor. + /// + /// Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless + /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + /// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained + /// with length 1. + /// + /// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor + /// with a single element is returned. Additionally, the axes can be negative, + /// which are interpreted according to the indexing rules in Python. + /// + /// - Parameters: + /// - input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + /// SparseTensor, possibly not in canonical ordering. + /// - input_values: 1-D. `N` non-empty values corresponding to `input_indices`. + /// - input_shape: 1-D. Shape of the input SparseTensor. + /// - reduction_axes: 1-D. Length-`K` vector containing the reduction axes. + /// + /// - Attr keep_dims: If true, retain reduced dimensions with length 1. + @inlinable @inline(__always) + public static func sparseReduceMaxSparse( + inputIndices: Tensor, + inputValues: Tensor, + inputShape: Tensor, + reductionAxes: Tensor, + keepDims: Bool = false + ) -> (outputIndices: Tensor, outputValues: Tensor, outputShape: Tensor) { + _RawTFEager.sparseReduceMaxSparse( + inputIndices: inputIndices, inputValues: inputValues, inputShape: inputShape, + reductionAxes: reductionAxes, keepDims: keepDims) + } + + /// Computes the sum of elements across dimensions of a SparseTensor. + /// + /// This Op takes a SparseTensor and is the sparse counterpart to + /// `tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor` + /// instead of a sparse one. + /// + /// Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless + /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + /// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained + /// with length 1. + /// + /// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor + /// with a single element is returned. Additionally, the axes can be negative, + /// which are interpreted according to the indexing rules in Python. + /// + /// - Parameters: + /// - input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + /// SparseTensor, possibly not in canonical ordering. + /// - input_values: 1-D. `N` non-empty values corresponding to `input_indices`. + /// - input_shape: 1-D. Shape of the input SparseTensor. + /// - reduction_axes: 1-D. Length-`K` vector containing the reduction axes. + /// + /// - Attr keep_dims: If true, retain reduced dimensions with length 1. + /// + /// - Output output: `R-K`-D. The reduced Tensor. + @inlinable @inline(__always) + public static func sparseReduceSum( + inputIndices: Tensor, + inputValues: Tensor, + inputShape: Tensor, + reductionAxes: Tensor, + keepDims: Bool = false + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(inputIndices.handle.backend, inputValues.handle.backend), + inputShape.handle.backend), reductionAxes.handle.backend) + { + case .XLA: + let output_device = reductionAxes.device + let inputIndices = Tensor(copying: inputIndices, to: .defaultTFEager) + let inputValues = Tensor(copying: inputValues, to: .defaultTFEager) + let inputShape = Tensor(copying: inputShape, to: .defaultTFEager) + let reductionAxes = Tensor(copying: reductionAxes, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.sparseReduceSum( + inputIndices: inputIndices, inputValues: inputValues, inputShape: inputShape, + reductionAxes: reductionAxes, keepDims: keepDims), to: output_device) + case .TF_EAGER: + return _RawTFEager.sparseReduceSum( + inputIndices: inputIndices, inputValues: inputValues, inputShape: inputShape, + reductionAxes: reductionAxes, keepDims: keepDims) + } + + } + + /// Computes the sum of elements across dimensions of a SparseTensor. + /// + /// This Op takes a SparseTensor and is the sparse counterpart to + /// `tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a + /// SparseTensor. + /// + /// Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless + /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + /// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained + /// with length 1. + /// + /// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor + /// with a single element is returned. Additionally, the axes can be negative, + /// which are interpreted according to the indexing rules in Python. + /// + /// - Parameters: + /// - input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + /// SparseTensor, possibly not in canonical ordering. + /// - input_values: 1-D. `N` non-empty values corresponding to `input_indices`. + /// - input_shape: 1-D. Shape of the input SparseTensor. + /// - reduction_axes: 1-D. Length-`K` vector containing the reduction axes. + /// + /// - Attr keep_dims: If true, retain reduced dimensions with length 1. + @inlinable @inline(__always) + public static func sparseReduceSumSparse( + inputIndices: Tensor, + inputValues: Tensor, + inputShape: Tensor, + reductionAxes: Tensor, + keepDims: Bool = false + ) -> (outputIndices: Tensor, outputValues: Tensor, outputShape: Tensor) { + _RawTFEager.sparseReduceSumSparse( + inputIndices: inputIndices, inputValues: inputValues, inputShape: inputShape, + reductionAxes: reductionAxes, keepDims: keepDims) + } + + /// Reorders a SparseTensor into the canonical, row-major ordering. + /// + /// Note that by convention, all sparse ops preserve the canonical ordering along + /// increasing dimension number. The only time ordering can be violated is during + /// manual manipulation of the indices and values vectors to add entries. + /// + /// Reordering does not affect the shape of the SparseTensor. + /// + /// If the tensor has rank `R` and `N` non-empty values, `input_indices` has + /// shape `[N, R]`, input_values has length `N`, and input_shape has length `R`. + /// + /// - Parameters: + /// - input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + /// SparseTensor, possibly not in canonical ordering. + /// - input_values: 1-D. `N` non-empty values corresponding to `input_indices`. + /// - input_shape: 1-D. Shape of the input SparseTensor. + /// + /// - Outputs: + /// - output_indices: 2-D. `N x R` matrix with the same indices as input_indices, but + /// in canonical row-major ordering. + /// - output_values: 1-D. `N` non-empty values corresponding to `output_indices`. + @inlinable @inline(__always) + public static func sparseReorder( + inputIndices: Tensor, + inputValues: Tensor, + inputShape: Tensor + ) -> (outputIndices: Tensor, outputValues: Tensor) { + _RawTFEager.sparseReorder( + inputIndices: inputIndices, inputValues: inputValues, inputShape: inputShape) + } + + /// Reshapes a SparseTensor to represent values in a new dense shape. + /// + /// This operation has the same semantics as reshape on the represented dense + /// tensor. The `input_indices` are recomputed based on the requested `new_shape`. + /// + /// If one component of `new_shape` is the special value -1, the size of that + /// dimension is computed so that the total dense size remains constant. At + /// most one component of `new_shape` can be -1. The number of dense elements + /// implied by `new_shape` must be the same as the number of dense elements + /// originally implied by `input_shape`. + /// + /// Reshaping does not affect the order of values in the SparseTensor. + /// + /// If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape` + /// has length `R_out`, then `input_indices` has shape `[N, R_in]`, + /// `input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and + /// `output_shape` has length `R_out`. + /// + /// - Parameters: + /// - input_indices: 2-D. `N x R_in` matrix with the indices of non-empty values in a + /// SparseTensor. + /// - input_shape: 1-D. `R_in` vector with the input SparseTensor's dense shape. + /// - new_shape: 1-D. `R_out` vector with the requested new dense shape. + /// + /// - Outputs: + /// - output_indices: 2-D. `N x R_out` matrix with the updated indices of non-empty + /// values in the output SparseTensor. + /// - output_shape: 1-D. `R_out` vector with the full dense shape of the output + /// SparseTensor. This is the same as `new_shape` but with any -1 dimensions + /// filled in. + @inlinable @inline(__always) + public static func sparseReshape( + inputIndices: Tensor, + inputShape: Tensor, + newShape: Tensor + ) -> (outputIndices: Tensor, outputShape: Tensor) { + _RawTFEager.sparseReshape( + inputIndices: inputIndices, inputShape: inputShape, newShape: newShape) + } + + /// Computes the mean along sparse segments of a tensor. + /// + /// See `tf.sparse.segment_sum` for usage examples. + /// + /// Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first + /// dimension, selecting a subset of dimension 0, specified by `indices`. + /// + /// - Parameters: + /// - indices: A 1-D tensor. Has same rank as `segment_ids`. + /// - segment_ids: A 1-D tensor. Values should be sorted and can be repeated. + /// + /// - Output output: Has same shape as data, except for dimension 0 which + /// has size `k`, the number of segments. + @inlinable @inline(__always) + public static func sparseSegmentMean< + T: FloatingPoint & TensorFlowScalar, + Tidx: TensorFlowIndex + >( + data: Tensor, + indices: Tensor, + segmentIds: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(data.handle.backend, indices.handle.backend), segmentIds.handle.backend) + { + case .XLA: + let output_device = segmentIds.device + let data = Tensor(copying: data, to: .defaultTFEager) + let indices = Tensor(copying: indices, to: .defaultTFEager) + let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.sparseSegmentMean( + data: data, indices: indices, segmentIds: segmentIds), to: output_device) + case .TF_EAGER: + return _RawTFEager.sparseSegmentMean(data: data, indices: indices, segmentIds: segmentIds) + } + + } + + /// Computes gradients for SparseSegmentMean. + /// + /// Returns tensor "output" with same shape as grad, except for dimension 0 whose + /// value is output_dim0. + /// + /// - Parameters: + /// - grad: gradient propagated to the SparseSegmentMean op. + /// - indices: indices passed to the corresponding SparseSegmentMean op. + /// - segment_ids: segment_ids passed to the corresponding SparseSegmentMean op. + /// - output_dim0: dimension 0 of "data" passed to SparseSegmentMean op. + @inlinable @inline(__always) + public static func sparseSegmentMeanGrad< + T: FloatingPoint & TensorFlowScalar, + Tidx: TensorFlowIndex + >( + grad: Tensor, + indices: Tensor, + segmentIds: Tensor, + outputDim0: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(grad.handle.backend, indices.handle.backend), segmentIds.handle.backend), + outputDim0.handle.backend) + { + case .XLA: + let output_device = outputDim0.device + let grad = Tensor(copying: grad, to: .defaultTFEager) + let indices = Tensor(copying: indices, to: .defaultTFEager) + let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) + let outputDim0 = Tensor(copying: outputDim0, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.sparseSegmentMeanGrad( + grad: grad, indices: indices, segmentIds: segmentIds, outputDim0: outputDim0), + to: output_device) + case .TF_EAGER: + return _RawTFEager.sparseSegmentMeanGrad( + grad: grad, indices: indices, segmentIds: segmentIds, outputDim0: outputDim0) + } + + } + + /// Computes the mean along sparse segments of a tensor. + /// + /// Like `SparseSegmentMean`, but allows missing ids in `segment_ids`. If an id is + /// misisng, the `output` tensor at that position will be zeroed. + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// - Parameters: + /// - indices: A 1-D tensor. Has same rank as `segment_ids`. + /// - segment_ids: A 1-D tensor. Values should be sorted and can be repeated. + /// - num_segments: Should equal the number of distinct segment IDs. + /// + /// - Output output: Has same shape as data, except for dimension 0 which has size + /// `num_segments`. + @inlinable @inline(__always) + public static func sparseSegmentMeanWithNumSegments< + T: FloatingPoint & TensorFlowScalar, + Tidx: TensorFlowIndex, + Tnumsegments: TensorFlowIndex + >( + data: Tensor, + indices: Tensor, + segmentIds: Tensor, + numSegments: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(data.handle.backend, indices.handle.backend), segmentIds.handle.backend), + numSegments.handle.backend) + { + case .XLA: + let output_device = numSegments.device + let data = Tensor(copying: data, to: .defaultTFEager) + let indices = Tensor(copying: indices, to: .defaultTFEager) + let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) + let numSegments = Tensor(copying: numSegments, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.sparseSegmentMeanWithNumSegments( + data: data, indices: indices, segmentIds: segmentIds, numSegments: numSegments), + to: output_device) + case .TF_EAGER: + return _RawTFEager.sparseSegmentMeanWithNumSegments( + data: data, indices: indices, segmentIds: segmentIds, numSegments: numSegments) + } + + } + + /// Computes the sum along sparse segments of a tensor divided by the sqrt of N. + /// + /// N is the size of the segment being reduced. + /// + /// See `tf.sparse.segment_sum` for usage examples. + /// + /// + /// - Parameters: + /// - indices: A 1-D tensor. Has same rank as `segment_ids`. + /// - segment_ids: A 1-D tensor. Values should be sorted and can be repeated. + /// + /// - Output output: Has same shape as data, except for dimension 0 which + /// has size `k`, the number of segments. + @inlinable @inline(__always) + public static func sparseSegmentSqrtN< + T: FloatingPoint & TensorFlowScalar, + Tidx: TensorFlowIndex + >( + data: Tensor, + indices: Tensor, + segmentIds: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(data.handle.backend, indices.handle.backend), segmentIds.handle.backend) + { + case .XLA: + let output_device = segmentIds.device + let data = Tensor(copying: data, to: .defaultTFEager) + let indices = Tensor(copying: indices, to: .defaultTFEager) + let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.sparseSegmentSqrtN( + data: data, indices: indices, segmentIds: segmentIds), to: output_device) + case .TF_EAGER: + return _RawTFEager.sparseSegmentSqrtN(data: data, indices: indices, segmentIds: segmentIds) + } + + } + + /// Computes gradients for SparseSegmentSqrtN. + /// + /// Returns tensor "output" with same shape as grad, except for dimension 0 whose + /// value is output_dim0. + /// + /// - Parameters: + /// - grad: gradient propagated to the SparseSegmentSqrtN op. + /// - indices: indices passed to the corresponding SparseSegmentSqrtN op. + /// - segment_ids: segment_ids passed to the corresponding SparseSegmentSqrtN op. + /// - output_dim0: dimension 0 of "data" passed to SparseSegmentSqrtN op. + @inlinable @inline(__always) + public static func sparseSegmentSqrtNGrad< + T: FloatingPoint & TensorFlowScalar, + Tidx: TensorFlowIndex + >( + grad: Tensor, + indices: Tensor, + segmentIds: Tensor, + outputDim0: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(grad.handle.backend, indices.handle.backend), segmentIds.handle.backend), + outputDim0.handle.backend) + { + case .XLA: + let output_device = outputDim0.device + let grad = Tensor(copying: grad, to: .defaultTFEager) + let indices = Tensor(copying: indices, to: .defaultTFEager) + let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) + let outputDim0 = Tensor(copying: outputDim0, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.sparseSegmentSqrtNGrad( + grad: grad, indices: indices, segmentIds: segmentIds, outputDim0: outputDim0), + to: output_device) + case .TF_EAGER: + return _RawTFEager.sparseSegmentSqrtNGrad( + grad: grad, indices: indices, segmentIds: segmentIds, outputDim0: outputDim0) + } + + } + + /// Computes the sum along sparse segments of a tensor divided by the sqrt of N. + /// + /// N is the size of the segment being reduced. + /// + /// Like `SparseSegmentSqrtN`, but allows missing ids in `segment_ids`. If an id is + /// misisng, the `output` tensor at that position will be zeroed. + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// - Parameters: + /// - indices: A 1-D tensor. Has same rank as `segment_ids`. + /// - segment_ids: A 1-D tensor. Values should be sorted and can be repeated. + /// - num_segments: Should equal the number of distinct segment IDs. + /// + /// - Output output: Has same shape as data, except for dimension 0 which + /// has size `k`, the number of segments. + @inlinable @inline(__always) + public static func sparseSegmentSqrtNWithNumSegments< + T: FloatingPoint & TensorFlowScalar, + Tidx: TensorFlowIndex, + Tnumsegments: TensorFlowIndex + >( + data: Tensor, + indices: Tensor, + segmentIds: Tensor, + numSegments: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(data.handle.backend, indices.handle.backend), segmentIds.handle.backend), + numSegments.handle.backend) + { + case .XLA: + let output_device = numSegments.device + let data = Tensor(copying: data, to: .defaultTFEager) + let indices = Tensor(copying: indices, to: .defaultTFEager) + let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) + let numSegments = Tensor(copying: numSegments, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.sparseSegmentSqrtNWithNumSegments( + data: data, indices: indices, segmentIds: segmentIds, numSegments: numSegments), + to: output_device) + case .TF_EAGER: + return _RawTFEager.sparseSegmentSqrtNWithNumSegments( + data: data, indices: indices, segmentIds: segmentIds, numSegments: numSegments) + } + + } + + /// Computes the sum along sparse segments of a tensor. + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first + /// dimension, selecting a subset of dimension 0, specified by `indices`. + /// + /// For example: + /// + /// ```python + /// c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) + /// + /// # Select two rows, one segment. + /// tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])) + /// # => [[0 0 0 0]] + /// + /// # Select two rows, two segment. + /// tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1])) + /// # => [[ 1 2 3 4] + /// # [-1 -2 -3 -4]] + /// + /// # Select all rows, two segments. + /// tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) + /// # => [[0 0 0 0] + /// # [5 6 7 8]] + /// + /// # Which is equivalent to: + /// tf.segment_sum(c, tf.constant([0, 0, 1])) + /// ``` + /// + /// - Parameters: + /// - indices: A 1-D tensor. Has same rank as `segment_ids`. + /// - segment_ids: A 1-D tensor. Values should be sorted and can be repeated. + /// + /// - Output output: Has same shape as data, except for dimension 0 which + /// has size `k`, the number of segments. + @inlinable @inline(__always) + public static func sparseSegmentSum< + T: TensorFlowNumeric, + Tidx: TensorFlowIndex + >( + data: Tensor, + indices: Tensor, + segmentIds: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(data.handle.backend, indices.handle.backend), segmentIds.handle.backend) + { + case .XLA: + let output_device = segmentIds.device + let data = Tensor(copying: data, to: .defaultTFEager) + let indices = Tensor(copying: indices, to: .defaultTFEager) + let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.sparseSegmentSum( + data: data, indices: indices, segmentIds: segmentIds), to: output_device) + case .TF_EAGER: + return _RawTFEager.sparseSegmentSum(data: data, indices: indices, segmentIds: segmentIds) + } + + } + + /// Computes the sum along sparse segments of a tensor. + /// + /// Like `SparseSegmentSum`, but allows missing ids in `segment_ids`. If an id is + /// misisng, the `output` tensor at that position will be zeroed. + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/sparse#Segmentation) + /// for an explanation of segments. + /// + /// For example: + /// + /// ```python + /// c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) + /// + /// tf.sparse_segment_sum_with_num_segments( + /// c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3) + /// # => [[0 0 0 0] + /// # [0 0 0 0] + /// # [0 0 0 0]] + /// + /// tf.sparse_segment_sum_with_num_segments(c, + /// tf.constant([0, 1]), + /// tf.constant([0, 2], + /// num_segments=4)) + /// # => [[ 1 2 3 4] + /// # [ 0 0 0 0] + /// # [-1 -2 -3 -4] + /// # [ 0 0 0 0]] + /// ``` + /// + /// - Parameters: + /// - indices: A 1-D tensor. Has same rank as `segment_ids`. + /// - segment_ids: A 1-D tensor. Values should be sorted and can be repeated. + /// - num_segments: Should equal the number of distinct segment IDs. + /// + /// - Output output: Has same shape as data, except for dimension 0 which + /// has size `num_segments`. + @inlinable @inline(__always) + public static func sparseSegmentSumWithNumSegments< + T: TensorFlowNumeric, + Tidx: TensorFlowIndex, + Tnumsegments: TensorFlowIndex + >( + data: Tensor, + indices: Tensor, + segmentIds: Tensor, + numSegments: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(data.handle.backend, indices.handle.backend), segmentIds.handle.backend), + numSegments.handle.backend) + { + case .XLA: + let output_device = numSegments.device + let data = Tensor(copying: data, to: .defaultTFEager) + let indices = Tensor(copying: indices, to: .defaultTFEager) + let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) + let numSegments = Tensor(copying: numSegments, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.sparseSegmentSumWithNumSegments( + data: data, indices: indices, segmentIds: segmentIds, numSegments: numSegments), + to: output_device) + case .TF_EAGER: + return _RawTFEager.sparseSegmentSumWithNumSegments( + data: data, indices: indices, segmentIds: segmentIds, numSegments: numSegments) + } + + } + + /// Slice a `SparseTensor` based on the `start` and `size`. + /// + /// For example, if the input is + /// + /// input_tensor = shape = [2, 7] + /// [ a d e ] + /// [b c ] + /// + /// Graphically the output tensors are: + /// + /// sparse_slice([0, 0], [2, 4]) = shape = [2, 4] + /// [ a ] + /// [b c ] + /// + /// sparse_slice([0, 4], [2, 3]) = shape = [2, 3] + /// [ d e ] + /// [ ] + /// + /// - Parameters: + /// - indices: 2-D tensor represents the indices of the sparse tensor. + /// - values: 1-D tensor represents the values of the sparse tensor. + /// - shape: 1-D. tensor represents the shape of the sparse tensor. + /// - start: 1-D. tensor represents the start of the slice. + /// - size: 1-D. tensor represents the size of the slice. + /// output indices: A list of 1-D tensors represents the indices of the output + /// sparse tensors. + /// + /// - Outputs: + /// - output_values: A list of 1-D tensors represents the values of the output sparse + /// tensors. + /// - output_shape: A list of 1-D tensors represents the shape of the output sparse + /// tensors. + @inlinable @inline(__always) + public static func sparseSlice( + indices: Tensor, + _ values: Tensor, + shape: Tensor, + start: Tensor, + size: Tensor + ) -> (outputIndices: Tensor, outputValues: Tensor, outputShape: Tensor) { + _RawTFEager.sparseSlice(indices: indices, values, shape: shape, start: start, size: size) + } + + /// The gradient operator for the SparseSlice op. + /// + /// This op takes in the upstream gradient w.r.t. non-empty values of + /// the sliced `SparseTensor`, and outputs the gradients w.r.t. + /// the non-empty values of input `SparseTensor`. + /// + /// - Parameters: + /// - backprop_val_grad: 1-D. The gradient with respect to + /// the non-empty values of the sliced `SparseTensor`. + /// - input_indices: 2-D. The `indices` of the input `SparseTensor`. + /// - input_start: 1-D. tensor represents the start of the slice. + /// - output_indices: 2-D. The `indices` of the sliced `SparseTensor`. + /// + /// - Output val_grad: 1-D. The gradient with respect to the non-empty values of input `SparseTensor`. + @inlinable @inline(__always) + public static func sparseSliceGrad( + backpropValGrad: Tensor, + inputIndices: Tensor, + inputStart: Tensor, + outputIndices: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(backpropValGrad.handle.backend, inputIndices.handle.backend), + inputStart.handle.backend), outputIndices.handle.backend) + { + case .XLA: + let output_device = outputIndices.device + let backpropValGrad = Tensor(copying: backpropValGrad, to: .defaultTFEager) + let inputIndices = Tensor(copying: inputIndices, to: .defaultTFEager) + let inputStart = Tensor(copying: inputStart, to: .defaultTFEager) + let outputIndices = Tensor(copying: outputIndices, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.sparseSliceGrad( + backpropValGrad: backpropValGrad, inputIndices: inputIndices, inputStart: inputStart, + outputIndices: outputIndices), to: output_device) + case .TF_EAGER: + return _RawTFEager.sparseSliceGrad( + backpropValGrad: backpropValGrad, inputIndices: inputIndices, inputStart: inputStart, + outputIndices: outputIndices) + } + + } + + /// Applies softmax to a batched N-D `SparseTensor`. + /// + /// The inputs represent an N-D SparseTensor with logical shape `[..., B, C]` + /// (where `N >= 2`), and with indices sorted in the canonical lexicographic order. + /// + /// This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost + /// logical submatrix with shape `[B, C]`, but with the catch that *the implicitly + /// zero elements do not participate*. Specifically, the algorithm is equivalent + /// to the following: + /// + /// (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix + /// with shape `[B, C]`, along the size-C dimension; + /// (2) Masks out the original implicitly-zero locations; + /// (3) Renormalizes the remaining elements. + /// + /// Hence, the `SparseTensor` result has exactly the same non-zero indices and + /// shape. + /// + /// - Parameters: + /// - sp_indices: 2-D. `NNZ x R` matrix with the indices of non-empty values in a + /// SparseTensor, in canonical ordering. + /// - sp_values: 1-D. `NNZ` non-empty values corresponding to `sp_indices`. + /// - sp_shape: 1-D. Shape of the input SparseTensor. + /// + /// - Output output: 1-D. The `NNZ` values for the result `SparseTensor`. + @inlinable @inline(__always) + public static func sparseSoftmax( + spIndices: Tensor, + spValues: Tensor, + spShape: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(spIndices.handle.backend, spValues.handle.backend), spShape.handle.backend) + { + case .XLA: + let output_device = spShape.device + let spIndices = Tensor(copying: spIndices, to: .defaultTFEager) + let spValues = Tensor(copying: spValues, to: .defaultTFEager) + let spShape = Tensor(copying: spShape, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.sparseSoftmax( + spIndices: spIndices, spValues: spValues, spShape: spShape), to: output_device) + case .TF_EAGER: + return _RawTFEager.sparseSoftmax(spIndices: spIndices, spValues: spValues, spShape: spShape) + } + + } + + /// Computes softmax cross entropy cost and gradients to backpropagate. + /// + /// Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept + /// a matrix of label probabilities, but rather a single label per row + /// of features. This label is considered to have probability 1.0 for the + /// given row. + /// + /// Inputs are the logits, not probabilities. + /// + /// - Parameters: + /// - features: batch_size x num_classes matrix + /// - labels: batch_size vector with values in [0, num_classes). + /// This is the label for the given minibatch entry. + /// + /// - Outputs: + /// - loss: Per example loss (batch_size vector). + /// - backprop: backpropagated gradients (batch_size x num_classes matrix). + @inlinable @inline(__always) + public static func sparseSoftmaxCrossEntropyWithLogits< + T: FloatingPoint & TensorFlowScalar, + Tlabels: TensorFlowIndex + >( + features: Tensor, + labels: Tensor + ) -> (loss: Tensor, backprop: Tensor) { + switch commonBackend(features.handle.backend, labels.handle.backend) { + case .XLA: + return _RawXLA.sparseSoftmaxCrossEntropyWithLogits(features: features, labels: labels) + case .TF_EAGER: + return _RawTFEager.sparseSoftmaxCrossEntropyWithLogits(features: features, labels: labels) + } + + } + + /// Returns the element-wise max of two SparseTensors. + /// + /// Assumes the two SparseTensors have the same shape, i.e., no broadcasting. + /// + /// - Parameters: + /// - a_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + /// SparseTensor, in the canonical lexicographic ordering. + /// - a_values: 1-D. `N` non-empty values corresponding to `a_indices`. + /// - a_shape: 1-D. Shape of the input SparseTensor. + /// - b_indices: counterpart to `a_indices` for the other operand. + /// - b_values: counterpart to `a_values` for the other operand; must be of the same dtype. + /// - b_shape: counterpart to `a_shape` for the other operand; the two shapes must be equal. + /// + /// - Outputs: + /// - output_indices: 2-D. The indices of the output SparseTensor. + /// - output_values: 1-D. The values of the output SparseTensor. + @inlinable @inline(__always) + public static func sparseSparseMaximum( + aIndices: Tensor, + aValues: Tensor, + aShape: Tensor, + bIndices: Tensor, + bValues: Tensor, + bShape: Tensor + ) -> (outputIndices: Tensor, outputValues: Tensor) { + _RawTFEager.sparseSparseMaximum( + aIndices: aIndices, aValues: aValues, aShape: aShape, bIndices: bIndices, bValues: bValues, + bShape: bShape) + } + + /// Returns the element-wise min of two SparseTensors. + /// + /// Assumes the two SparseTensors have the same shape, i.e., no broadcasting. + /// + /// - Parameters: + /// - a_indices: 2-D. `N x R` matrix with the indices of non-empty values in a + /// SparseTensor, in the canonical lexicographic ordering. + /// - a_values: 1-D. `N` non-empty values corresponding to `a_indices`. + /// - a_shape: 1-D. Shape of the input SparseTensor. + /// - b_indices: counterpart to `a_indices` for the other operand. + /// - b_values: counterpart to `a_values` for the other operand; must be of the same dtype. + /// - b_shape: counterpart to `a_shape` for the other operand; the two shapes must be equal. + /// + /// - Outputs: + /// - output_indices: 2-D. The indices of the output SparseTensor. + /// - output_values: 1-D. The values of the output SparseTensor. + @inlinable @inline(__always) + public static func sparseSparseMinimum( + aIndices: Tensor, + aValues: Tensor, + aShape: Tensor, + bIndices: Tensor, + bValues: Tensor, + bShape: Tensor + ) -> (outputIndices: Tensor, outputValues: Tensor) { + _RawTFEager.sparseSparseMinimum( + aIndices: aIndices, aValues: aValues, aShape: aShape, bIndices: bIndices, bValues: bValues, + bShape: bShape) + } + + /// Split a `SparseTensor` into `num_split` tensors along one dimension. + /// + /// If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices + /// `[0 : shape[split_dim] % num_split]` gets one extra dimension. + /// For example, if `split_dim = 1` and `num_split = 2` and the input is + /// + /// input_tensor = shape = [2, 7] + /// [ a d e ] + /// [b c ] + /// + /// Graphically the output tensors are: + /// + /// output_tensor[0] = shape = [2, 4] + /// [ a ] + /// [b c ] + /// + /// output_tensor[1] = shape = [2, 3] + /// [ d e ] + /// [ ] + /// + /// - Parameters: + /// - split_dim: 0-D. The dimension along which to split. Must be in the range + /// `[0, rank(shape))`. + /// - indices: 2-D tensor represents the indices of the sparse tensor. + /// - values: 1-D tensor represents the values of the sparse tensor. + /// - shape: 1-D. tensor represents the shape of the sparse tensor. + /// output indices: A list of 1-D tensors represents the indices of the output + /// sparse tensors. + /// + /// - Attr num_split: The number of ways to split. + /// + /// - Outputs: + /// - output_values: A list of 1-D tensors represents the values of the output sparse + /// tensors. + /// - output_shape: A list of 1-D tensors represents the shape of the output sparse + /// tensors. + @inlinable @inline(__always) + public static func sparseSplit( + splitDim: Tensor, + indices: Tensor, + _ values: Tensor, + shape: Tensor, + numSplit: Int64 + ) -> (outputIndices: [Tensor], outputValues: [Tensor], outputShape: [Tensor]) { + _RawTFEager.sparseSplit( + splitDim: splitDim, indices: indices, values, shape: shape, numSplit: numSplit) + } + + /// Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`. + /// + /// This Op does not require `a_indices` be sorted in standard lexicographic order. + /// + /// - Parameters: + /// - a_indices: 2-D. The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`. + /// - a_values: 1-D. The `values` of the `SparseTensor`, with shape `[nnz]`. + /// - a_shape: 1-D. The `shape` of the `SparseTensor`, with shape `[ndims]`. + /// - b: `ndims`-D Tensor. With shape `a_shape`. + @inlinable @inline(__always) + public static func sparseTensorDenseAdd< + T: TensorFlowNumeric, + Tindices: TensorFlowIndex + >( + aIndices: Tensor, + aValues: Tensor, + aShape: Tensor, + _ b: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(aIndices.handle.backend, aValues.handle.backend), aShape.handle.backend), + b.handle.backend) + { + case .XLA: + let output_device = b.device + let aIndices = Tensor(copying: aIndices, to: .defaultTFEager) + let aValues = Tensor(copying: aValues, to: .defaultTFEager) + let aShape = Tensor(copying: aShape, to: .defaultTFEager) + let b = Tensor(copying: b, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.sparseTensorDenseAdd( + aIndices: aIndices, aValues: aValues, aShape: aShape, b), to: output_device) + case .TF_EAGER: + return _RawTFEager.sparseTensorDenseAdd( + aIndices: aIndices, aValues: aValues, aShape: aShape, b) + } + + } + + /// Multiply SparseTensor (of rank 2) "A" by dense matrix "B". + /// + /// No validity checking is performed on the indices of A. However, the following + /// input format is recommended for optimal behavior: + /// + /// if adjoint_a == false: + /// A should be sorted in lexicographically increasing order. Use SparseReorder + /// if you're not sure. + /// if adjoint_a == true: + /// A should be sorted in order of increasing dimension 1 (i.e., "column major" + /// order instead of "row major" order). + /// + /// - Parameters: + /// - a_indices: 2-D. The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix. + /// - a_values: 1-D. The `values` of the `SparseTensor`, size `[nnz]` Vector. + /// - a_shape: 1-D. The `shape` of the `SparseTensor`, size `[2]` Vector. + /// - b: 2-D. A dense Matrix. + /// + /// - Attrs: + /// - adjoint_a: Use the adjoint of A in the matrix multiply. If A is complex, this + /// is transpose(conj(A)). Otherwise it's transpose(A). + /// - adjoint_b: Use the adjoint of B in the matrix multiply. If B is complex, this + /// is transpose(conj(B)). Otherwise it's transpose(B). + @inlinable @inline(__always) + public static func sparseTensorDenseMatMul< + T: TensorFlowScalar, + Tindices: TensorFlowIndex + >( + aIndices: Tensor, + aValues: Tensor, + aShape: Tensor, + _ b: Tensor, + adjointA: Bool = false, + adjointB: Bool = false + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(aIndices.handle.backend, aValues.handle.backend), aShape.handle.backend), + b.handle.backend) + { + case .XLA: + let output_device = b.device + let aIndices = Tensor(copying: aIndices, to: .defaultTFEager) + let aValues = Tensor(copying: aValues, to: .defaultTFEager) + let aShape = Tensor(copying: aShape, to: .defaultTFEager) + let b = Tensor(copying: b, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.sparseTensorDenseMatMul( + aIndices: aIndices, aValues: aValues, aShape: aShape, b, adjointA: adjointA, + adjointB: adjointB), to: output_device) + case .TF_EAGER: + return _RawTFEager.sparseTensorDenseMatMul( + aIndices: aIndices, aValues: aValues, aShape: aShape, b, adjointA: adjointA, + adjointB: adjointB) + } + + } + + /// Creates a dataset that splits a SparseTensor into elements row-wise. + @inlinable @inline(__always) + public static func sparseTensorSliceDataset( + indices: Tensor, + _ values: Tensor, + denseShape: Tensor + ) -> VariantHandle { + _RawTFEager.sparseTensorSliceDataset(indices: indices, values, denseShape: denseShape) + } + + /// Converts a SparseTensor to a (possibly batched) CSRSparseMatrix. + /// + /// - Parameters: + /// - indices: SparseTensor indices. + /// - values: SparseTensor values. + /// - dense_shape: SparseTensor dense shape. + /// + /// - Output sparse_matrix: A (possibly batched) CSRSparseMatrix. + @inlinable @inline(__always) + public static func sparseTensorToCSRSparseMatrix( + indices: Tensor, + _ values: Tensor, + denseShape: Tensor + ) -> VariantHandle { + _RawTFEager.sparseTensorToCSRSparseMatrix(indices: indices, values, denseShape: denseShape) + } + + /// Converts a sparse representation into a dense tensor. + /// + /// Builds an array `dense` with shape `output_shape` such that + /// + /// ``` + /// # If sparse_indices is scalar + /// dense[i] = (i == sparse_indices ? sparse_values : default_value) + /// + /// # If sparse_indices is a vector, then for each i + /// dense[sparse_indices[i]] = sparse_values[i] + /// + /// # If sparse_indices is an n by d matrix, then for each i in [0, n) + /// dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i] + /// ``` + /// + /// All other values in `dense` are set to `default_value`. If `sparse_values` is a + /// scalar, all sparse indices are set to this single value. + /// + /// Indices should be sorted in lexicographic order, and indices must not + /// contain any repeats. If `validate_indices` is true, these properties + /// are checked during execution. + /// + /// - Parameters: + /// - sparse_indices: 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete + /// index where `sparse_values[i]` will be placed. + /// - output_shape: 1-D. Shape of the dense output tensor. + /// - sparse_values: 1-D. Values corresponding to each row of `sparse_indices`, + /// or a scalar value to be used for all sparse indices. + /// - default_value: Scalar value to set for indices not specified in + /// `sparse_indices`. + /// + /// - Attr validate_indices: If true, indices are checked to make sure they are sorted in + /// lexicographic order and that there are no repeats. + /// + /// - Output dense: Dense output tensor of shape `output_shape`. + @inlinable @inline(__always) + public static func sparseToDense< + T: TensorFlowScalar, + Tindices: TensorFlowIndex + >( + sparseIndices: Tensor, + outputShape: Tensor, + sparseValues: Tensor, + defaultValue: Tensor, + validateIndices: Bool = true + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(sparseIndices.handle.backend, outputShape.handle.backend), + sparseValues.handle.backend), defaultValue.handle.backend) + { + case .XLA: + let output_device = defaultValue.device + let sparseIndices = Tensor(copying: sparseIndices, to: .defaultTFEager) + let outputShape = Tensor(copying: outputShape, to: .defaultTFEager) + let sparseValues = Tensor(copying: sparseValues, to: .defaultTFEager) + let defaultValue = Tensor(copying: defaultValue, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.sparseToDense( + sparseIndices: sparseIndices, outputShape: outputShape, sparseValues: sparseValues, + defaultValue: defaultValue, validateIndices: validateIndices), to: output_device) + case .TF_EAGER: + return _RawTFEager.sparseToDense( + sparseIndices: sparseIndices, outputShape: outputShape, sparseValues: sparseValues, + defaultValue: defaultValue, validateIndices: validateIndices) + } + + } + + /// Applies set operation along last dimension of 2 `SparseTensor` inputs. + /// + /// See SetOperationOp::SetOperationFromContext for values of `set_operation`. + /// + /// If `validate_indices` is `True`, `SparseToSparseSetOperation` validates the + /// order and range of `set1` and `set2` indices. + /// + /// Input `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`, + /// and `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same + /// as `set2`. Dimension `n` contains values in a set, duplicates are allowed but + /// ignored. + /// + /// Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`, + /// and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same + /// as `set1`. Dimension `n` contains values in a set, duplicates are allowed but + /// ignored. + /// + /// If `validate_indices` is `True`, this op validates the order and range of `set1` + /// and `set2` indices. + /// + /// Output `result` is a `SparseTensor` represented by `result_indices`, + /// `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this + /// has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` + /// dimension contains the result of `set_operation` applied to the corresponding + /// `[0...n-1]` dimension of `set`. + /// + /// - Parameters: + /// - set1_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major + /// order. + /// - set1_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major + /// order. + /// - set1_shape: 1D `Tensor`, shape of a `SparseTensor`. `set1_shape[0...n-1]` must + /// be the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the + /// max set size across `0...n-1` dimensions. + /// - set2_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major + /// order. + /// - set2_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major + /// order. + /// - set2_shape: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must + /// be the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the + /// max set size across `0...n-1` dimensions. + /// + /// - Outputs: + /// - result_indices: 2D indices of a `SparseTensor`. + /// - result_values: 1D values of a `SparseTensor`. + /// - result_shape: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is + /// the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]` + /// is the max result set size across all `0...n-1` dimensions. + @inlinable @inline(__always) + public static func sparseToSparseSetOperation( + set1Indices: Tensor, + set1Values: Tensor, + set1Shape: Tensor, + set2Indices: Tensor, + set2Values: Tensor, + set2Shape: Tensor, + setOperation: String, + validateIndices: Bool = true + ) -> (resultIndices: Tensor, resultValues: Tensor, resultShape: Tensor) { + _RawTFEager.sparseToSparseSetOperation( + set1Indices: set1Indices, set1Values: set1Values, set1Shape: set1Shape, + set2Indices: set2Indices, set2Values: set2Values, set2Shape: set2Shape, + setOperation: setOperation, validateIndices: validateIndices) + } + + /// Applies set operation along last dimension of 2 `SparseTensor` inputs. + /// + /// See SetOperationOp::SetOperationFromContext for values of `set_operation`. + /// + /// If `validate_indices` is `True`, `SparseToSparseSetOperation` validates the + /// order and range of `set1` and `set2` indices. + /// + /// Input `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`, + /// and `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same + /// as `set2`. Dimension `n` contains values in a set, duplicates are allowed but + /// ignored. + /// + /// Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`, + /// and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same + /// as `set1`. Dimension `n` contains values in a set, duplicates are allowed but + /// ignored. + /// + /// If `validate_indices` is `True`, this op validates the order and range of `set1` + /// and `set2` indices. + /// + /// Output `result` is a `SparseTensor` represented by `result_indices`, + /// `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this + /// has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` + /// dimension contains the result of `set_operation` applied to the corresponding + /// `[0...n-1]` dimension of `set`. + /// + /// - Parameters: + /// - set1_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major + /// order. + /// - set1_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major + /// order. + /// - set1_shape: 1D `Tensor`, shape of a `SparseTensor`. `set1_shape[0...n-1]` must + /// be the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the + /// max set size across `0...n-1` dimensions. + /// - set2_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major + /// order. + /// - set2_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major + /// order. + /// - set2_shape: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must + /// be the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the + /// max set size across `0...n-1` dimensions. + /// + /// - Outputs: + /// - result_indices: 2D indices of a `SparseTensor`. + /// - result_values: 1D values of a `SparseTensor`. + /// - result_shape: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is + /// the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]` + /// is the max result set size across all `0...n-1` dimensions. + @inlinable @inline(__always) + public static func sparseToSparseSetOperation( + set1Indices: Tensor, + set1Values: StringTensor, + set1Shape: Tensor, + set2Indices: Tensor, + set2Values: StringTensor, + set2Shape: Tensor, + setOperation: String, + validateIndices: Bool = true + ) -> (resultIndices: Tensor, resultValues: StringTensor, resultShape: Tensor) { + _RawTFEager.sparseToSparseSetOperation( + set1Indices: set1Indices, set1Values: set1Values, set1Shape: set1Shape, + set2Indices: set2Indices, set2Values: set2Values, set2Shape: set2Shape, + setOperation: setOperation, validateIndices: validateIndices) + } + + /// Splits a tensor into `num_split` tensors along one dimension. + /// + /// - Parameters: + /// - split_dim: 0-D. The dimension along which to split. Must be in the range + /// `[-rank(value), rank(value))`. + /// - value: The tensor to split. + /// + /// - Attr num_split: The number of ways to split. Must evenly divide + /// `value.shape[split_dim]`. + /// + /// - Output output: They are identically shaped tensors, whose shape matches that of `value` + /// except along `axis`, where their sizes are + /// `values.shape[split_dim] / num_split`. + @inlinable @inline(__always) + public static func split( + splitDim: Tensor, + value: Tensor, + numSplit: Int64 + ) -> [Tensor] { + switch commonBackend(splitDim.handle.backend, value.handle.backend) { + case .XLA: + return _RawXLA.split(splitDim: splitDim, value: value, numSplit: numSplit) + case .TF_EAGER: + return _RawTFEager.split(splitDim: splitDim, value: value, numSplit: numSplit) + } + + } + + /// Splits a tensor into `num_split` tensors along one dimension. + /// + /// - Parameters: + /// - value: The tensor to split. + /// - size_splits: list containing the sizes of each output tensor along the split + /// dimension. Must sum to the dimension of value along split_dim. + /// Can contain one -1 indicating that dimension is to be inferred. + /// - split_dim: 0-D. The dimension along which to split. Must be in the range + /// `[-rank(value), rank(value))`. + /// + /// - Output output: Tensors whose shape matches that of `value` + /// except along `axis`, where their sizes are + /// `size_splits[i]`. + @inlinable @inline(__always) + public static func splitV< + T: TensorFlowScalar, + Tlen: TensorFlowIndex + >( + value: Tensor, + sizeSplits: Tensor, + splitDim: Tensor, + numSplit: Int64 + ) -> [Tensor] { + switch commonBackend( + commonBackend(value.handle.backend, sizeSplits.handle.backend), splitDim.handle.backend) + { + case .XLA: + return _RawXLA.splitV( + value: value, sizeSplits: sizeSplits, splitDim: splitDim, numSplit: numSplit) + case .TF_EAGER: + return _RawTFEager.splitV( + value: value, sizeSplits: sizeSplits, splitDim: splitDim, numSplit: numSplit) + } + + } + + /// Creates a dataset that executes a SQL query and emits rows of the result set. + /// + /// - Parameters: + /// - driver_name: The database type. Currently, the only supported type is 'sqlite'. + /// - data_source_name: A connection string to connect to the database. + /// - query: A SQL query to execute. + @inlinable @inline(__always) + public static func sqlDataset( + driverName: StringTensor, + dataSourceName: StringTensor, + query: StringTensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.sqlDataset( + driverName: driverName, dataSourceName: dataSourceName, query: query, + outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Computes square root of x element-wise. + /// + /// I.e., \\(y = \sqrt{x} = x^{1/2}\\). + @inlinable @inline(__always) + public static func sqrt( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.sqrt(x) + case .TF_EAGER: + return _RawTFEager.sqrt(x) + } + + } + + /// Computes the gradient for the sqrt of `x` wrt its input. + /// + /// Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy` + /// is the corresponding input gradient. + @inlinable @inline(__always) + public static func sqrtGrad( + _ y: Tensor, + dy: Tensor + ) -> Tensor { + switch commonBackend(y.handle.backend, dy.handle.backend) { + case .XLA: + let output_device = dy.device + let y = Tensor(copying: y, to: .defaultTFEager) + let dy = Tensor(copying: dy, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.sqrtGrad(y, dy: dy), to: output_device) + case .TF_EAGER: + return _RawTFEager.sqrtGrad(y, dy: dy) + } + + } + + /// Computes square of x element-wise. + /// + /// I.e., \\(y = x * x = x^2\\). + @inlinable @inline(__always) + public static func square( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.square(x) + case .TF_EAGER: + return _RawTFEager.square(x) + } + + } + + /// Returns (x - y)(x - y) element-wise. + /// + /// *NOTE*: `SquaredDifference` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + @inlinable @inline(__always) + public static func squaredDifference( + _ x: Tensor, + _ y: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + return _RawXLA.squaredDifference(x, y) + case .TF_EAGER: + return _RawTFEager.squaredDifference(x, y) + } + + } + + /// Removes dimensions of size 1 from the shape of a tensor. + /// + /// Given a tensor `input`, this operation returns a tensor of the same type with + /// all dimensions of size 1 removed. If you don't want to remove all size 1 + /// dimensions, you can remove specific size 1 dimensions by specifying + /// `axis`. + /// + /// For example: + /// + /// ``` + /// # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] + /// shape(squeeze(t)) ==> [2, 3] + /// ``` + /// + /// Or, to remove specific size 1 dimensions: + /// + /// ``` + /// # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] + /// shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1] + /// ``` + /// + /// - Parameter input: The `input` to squeeze. + /// + /// - Attr squeeze_dims: If specified, only squeezes the dimensions listed. The dimension + /// index starts at 0. It is an error to squeeze a dimension that is not 1. Must + /// be in the range `[-rank(input), rank(input))`. + /// + /// - Output output: Contains the same data as `input`, but has one or more dimensions of + /// size 1 removed. + @inlinable @inline(__always) + public static func squeeze( + _ input: Tensor, + squeezeDims: [Int32] + ) -> Tensor { + switch input.handle.backend { + case .XLA: + return _RawXLA.squeeze(input, squeezeDims: squeezeDims) + case .TF_EAGER: + return _RawTFEager.squeeze(input, squeezeDims: squeezeDims) + } + + } + + /// Delete the stack from its resource container. + /// + /// - Parameter handle: The handle to a stack. + @inlinable @inline(__always) + public static func stackCloseV2( + handle: ResourceHandle + ) { + _RawTFEager.stackCloseV2(handle: handle) + } + + /// Pop the element at the top of the stack. + /// + /// - Parameter handle: The handle to a stack. + /// + /// - Attr elem_type: The type of the elem that is popped. + /// + /// - Output elem: The tensor that is popped from the top of the stack. + @inlinable @inline(__always) + public static func stackPopV2( + handle: ResourceHandle + ) -> Tensor { + _RawTFEager.stackPopV2(handle: handle) + } + + /// Push an element onto the stack. + /// + /// - Parameters: + /// - handle: The handle to a stack. + /// - elem: The tensor to be pushed onto the stack. + /// + /// - Attr swap_memory: Swap `elem` to CPU. Default to false. + /// + /// - Output output: The same tensor as the input 'elem'. + @inlinable @inline(__always) + public static func stackPushV2( + handle: ResourceHandle, + elem: Tensor, + swapMemory: Bool = false + ) -> Tensor { + switch elem.handle.backend { + case .XLA: + let output_device = elem.device + let elem = Tensor(copying: elem, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.stackPushV2(handle: handle, elem: elem, swapMemory: swapMemory), + to: output_device) + case .TF_EAGER: + return _RawTFEager.stackPushV2(handle: handle, elem: elem, swapMemory: swapMemory) + } + + } + + /// A stack that produces elements in first-in last-out order. + /// + /// - Parameter max_size: The maximum size of the stack if non-negative. If negative, the stack + /// size is unlimited. + /// + /// - Attrs: + /// - elem_type: The type of the elements on the stack. + /// - stack_name: Overrides the name used for the temporary stack resource. Default + /// value is the name of the 'Stack' op (which is guaranteed unique). + /// + /// - Output handle: The handle to the stack. + @inlinable @inline(__always) + public static func stackV2( + maxSize: Tensor, + elemType: TensorDataType, + stackName: String + ) -> ResourceHandle { + _RawTFEager.stackV2(maxSize: maxSize, elemType: elemType, stackName: stackName) + } + + /// Stage values similar to a lightweight Enqueue. + /// + /// The basic functionality of this Op is similar to a queue with many + /// fewer capabilities and options. This Op is optimized for performance. + /// + /// - Parameter values: a list of tensors + /// dtypes A list of data types that inserted values should adhere to. + /// + /// - Attrs: + /// - capacity: Maximum number of elements in the Staging Area. If > 0, inserts + /// on the container will block when the capacity is reached. + /// - memory_limit: The maximum number of bytes allowed for Tensors in the Staging Area. + /// If > 0, inserts will block until sufficient space is available. + /// - container: If non-empty, this queue is placed in the given container. Otherwise, + /// a default container is used. + /// - shared_name: It is necessary to match this name to the matching Unstage Op. + @inlinable @inline(__always) + public static func stage( + _ values: Dtypes, + capacity: Int64 = 0, + memoryLimit: Int64 = 0, + container: String, + sharedName: String + ) { + _RawTFEager.stage( + values, capacity: capacity, memoryLimit: memoryLimit, container: container, + sharedName: sharedName) + } + + /// Op removes all elements in the underlying container. + @inlinable @inline(__always) + public static func stageClear( + capacity: Int64 = 0, + memoryLimit: Int64 = 0, + dtypes: [TensorDataType], + container: String, + sharedName: String + ) { + _RawTFEager.stageClear( + capacity: capacity, memoryLimit: memoryLimit, dtypes: dtypes, container: container, + sharedName: sharedName) + } + + /// Op peeks at the values at the specified index. If the + /// + /// underlying container does not contain sufficient elements + /// this op will block until it does. This Op is optimized for + /// performance. + @inlinable @inline(__always) + public static func stagePeek( + index: Tensor, + capacity: Int64 = 0, + memoryLimit: Int64 = 0, + container: String, + sharedName: String + ) -> Dtypes { + _RawTFEager.stagePeek( + index: index, capacity: capacity, memoryLimit: memoryLimit, container: container, + sharedName: sharedName) + } + + /// Op returns the number of elements in the underlying container. + @inlinable @inline(__always) + public static func stageSize( + capacity: Int64 = 0, + memoryLimit: Int64 = 0, + dtypes: [TensorDataType], + container: String, + sharedName: String + ) -> Tensor { + _RawTFEager.stageSize( + capacity: capacity, memoryLimit: memoryLimit, dtypes: dtypes, container: container, + sharedName: sharedName) + } + + /// returns `f(inputs)`, where `f`'s body is placed and partitioned. + /// + /// - Parameter args: A list of input tensors. + /// + /// - Attrs: + /// - Tin: A list of input types. + /// - Tout: A list of output types. + /// - f: A function that takes 'args', a list of tensors, and returns 'output', + /// another list of tensors. Input and output types are specified by 'Tin' + /// and 'Tout'. The function body of f will be placed and partitioned across + /// devices, setting this op apart from the regular Call op. This op is + /// stateful. + /// + /// - Output output: A list of return values. + @inlinable @inline(__always) + public static func statefulPartitionedCall< + Tin: TensorArrayProtocol, + Tout: TensorGroup, + FIn: TensorGroup, + FOut: TensorGroup + >( + args: Tin, + f: (FIn) -> FOut, + config: String, + configProto: String, + executorType: String + ) -> Tout { + _RawTFEager.statefulPartitionedCall( + args: args, f: f, config: config, configProto: configProto, executorType: executorType) + } + + @inlinable @inline(__always) + public static func statefulRandomBinomial< + S: TensorFlowIndex, + T: TensorFlowNumeric, + Dtype: TensorFlowNumeric + >( + resource: ResourceHandle, + algorithm: Tensor, + shape: Tensor, + counts: Tensor, + probs: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(algorithm.handle.backend, shape.handle.backend), counts.handle.backend), + probs.handle.backend) + { + case .XLA: + let output_device = probs.device + let algorithm = Tensor(copying: algorithm, to: .defaultTFEager) + let shape = Tensor(copying: shape, to: .defaultTFEager) + let counts = Tensor(copying: counts, to: .defaultTFEager) + let probs = Tensor(copying: probs, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.statefulRandomBinomial( + resource: resource, algorithm: algorithm, shape: shape, counts: counts, probs: probs), + to: output_device) + case .TF_EAGER: + return _RawTFEager.statefulRandomBinomial( + resource: resource, algorithm: algorithm, shape: shape, counts: counts, probs: probs) + } + + } + + /// Outputs random values from a normal distribution. This op is deprecated in favor of op 'StatefulStandardNormalV2' + /// + /// The generated values will have mean 0 and standard deviation 1. + /// + /// - Parameters: + /// - resource: The handle of the resource variable that stores the state of the RNG. + /// - shape: The shape of the output tensor. + /// + /// - Attr dtype: The type of the output. + /// + /// - Output output: A tensor of the specified shape filled with random normal values. + @inlinable @inline(__always) + public static func statefulStandardNormal< + Dtype: TensorFlowScalar, + ShapeDtype: TensorFlowScalar + >( + resource: ResourceHandle, + shape: Tensor + ) -> Tensor { + switch shape.handle.backend { + case .XLA: + let output_device = shape.device + let shape = Tensor(copying: shape, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.statefulStandardNormal(resource: resource, shape: shape), + to: output_device) + case .TF_EAGER: + return _RawTFEager.statefulStandardNormal(resource: resource, shape: shape) + } + + } + + /// Outputs random values from a normal distribution. + /// + /// The generated values will have mean 0 and standard deviation 1. + /// + /// - Parameters: + /// - resource: The handle of the resource variable that stores the state of the RNG. + /// - algorithm: The RNG algorithm. + /// - shape: The shape of the output tensor. + /// + /// - Attr dtype: The type of the output. + /// + /// - Output output: A tensor of the specified shape filled with random normal values. + @inlinable @inline(__always) + public static func statefulStandardNormalV2< + Dtype: TensorFlowScalar, + ShapeDtype: TensorFlowScalar + >( + resource: ResourceHandle, + algorithm: Tensor, + shape: Tensor + ) -> Tensor { + switch commonBackend(algorithm.handle.backend, shape.handle.backend) { + case .XLA: + let output_device = shape.device + let algorithm = Tensor(copying: algorithm, to: .defaultTFEager) + let shape = Tensor(copying: shape, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.statefulStandardNormalV2( + resource: resource, algorithm: algorithm, shape: shape), to: output_device) + case .TF_EAGER: + return _RawTFEager.statefulStandardNormalV2( + resource: resource, algorithm: algorithm, shape: shape) + } + + } + + /// Outputs random values from a truncated normal distribution. + /// + /// The generated values follow a normal distribution with mean 0 and standard + /// deviation 1, except that values whose magnitude is more than 2 standard + /// deviations from the mean are dropped and re-picked. + /// + /// - Parameters: + /// - resource: The handle of the resource variable that stores the state of the RNG. + /// - algorithm: The RNG algorithm. + /// - shape: The shape of the output tensor. + /// + /// - Attr dtype: The type of the output. + /// + /// - Output output: Random values with specified shape. + @inlinable @inline(__always) + public static func statefulTruncatedNormal< + Dtype: TensorFlowScalar, + ShapeDtype: TensorFlowScalar + >( + resource: ResourceHandle, + algorithm: Tensor, + shape: Tensor + ) -> Tensor { + switch commonBackend(algorithm.handle.backend, shape.handle.backend) { + case .XLA: + let output_device = shape.device + let algorithm = Tensor(copying: algorithm, to: .defaultTFEager) + let shape = Tensor(copying: shape, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.statefulTruncatedNormal( + resource: resource, algorithm: algorithm, shape: shape), to: output_device) + case .TF_EAGER: + return _RawTFEager.statefulTruncatedNormal( + resource: resource, algorithm: algorithm, shape: shape) + } + + } + + /// Outputs random values from a uniform distribution. + /// + /// The generated values follow a uniform distribution in the range `[0, 1)`. The + /// lower bound 0 is included in the range, while the upper bound 1 is excluded. + /// + /// - Parameters: + /// - resource: The handle of the resource variable that stores the state of the RNG. + /// - algorithm: The RNG algorithm. + /// - shape: The shape of the output tensor. + /// + /// - Attr dtype: The type of the output. + /// + /// - Output output: Random values with specified shape. + @inlinable @inline(__always) + public static func statefulUniform< + Dtype: TensorFlowScalar, + ShapeDtype: TensorFlowScalar + >( + resource: ResourceHandle, + algorithm: Tensor, + shape: Tensor + ) -> Tensor { + switch commonBackend(algorithm.handle.backend, shape.handle.backend) { + case .XLA: + let output_device = shape.device + let algorithm = Tensor(copying: algorithm, to: .defaultTFEager) + let shape = Tensor(copying: shape, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.statefulUniform( + resource: resource, algorithm: algorithm, shape: shape), to: output_device) + case .TF_EAGER: + return _RawTFEager.statefulUniform(resource: resource, algorithm: algorithm, shape: shape) + } + + } + + /// Outputs random integers from a uniform distribution. + /// + /// The generated values are uniform integers covering the whole range of `dtype`. + /// + /// - Parameters: + /// - resource: The handle of the resource variable that stores the state of the RNG. + /// - algorithm: The RNG algorithm. + /// - shape: The shape of the output tensor. + /// + /// - Attr dtype: The type of the output. + /// + /// - Output output: Random values with specified shape. + @inlinable @inline(__always) + public static func statefulUniformFullInt< + Dtype: TensorFlowScalar, + ShapeDtype: TensorFlowScalar + >( + resource: ResourceHandle, + algorithm: Tensor, + shape: Tensor + ) -> Tensor { + switch commonBackend(algorithm.handle.backend, shape.handle.backend) { + case .XLA: + let output_device = shape.device + let algorithm = Tensor(copying: algorithm, to: .defaultTFEager) + let shape = Tensor(copying: shape, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.statefulUniformFullInt( + resource: resource, algorithm: algorithm, shape: shape), to: output_device) + case .TF_EAGER: + return _RawTFEager.statefulUniformFullInt( + resource: resource, algorithm: algorithm, shape: shape) + } + + } + + /// Outputs random integers from a uniform distribution. + /// + /// The generated values are uniform integers in the range `[minval, maxval)`. + /// The lower bound `minval` is included in the range, while the upper bound + /// `maxval` is excluded. + /// + /// The random integers are slightly biased unless `maxval - minval` is an exact + /// power of two. The bias is small for values of `maxval - minval` significantly + /// smaller than the range of the output (either `2^32` or `2^64`). + /// + /// - Parameters: + /// - resource: The handle of the resource variable that stores the state of the RNG. + /// - algorithm: The RNG algorithm. + /// - shape: The shape of the output tensor. + /// - minval: Minimum value (inclusive, scalar). + /// - maxval: Maximum value (exclusive, scalar). + /// + /// - Attr dtype: The type of the output. + /// + /// - Output output: Random values with specified shape. + @inlinable @inline(__always) + public static func statefulUniformInt< + Dtype: TensorFlowScalar, + ShapeDtype: TensorFlowScalar + >( + resource: ResourceHandle, + algorithm: Tensor, + shape: Tensor, + minval: Tensor, + maxval: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(algorithm.handle.backend, shape.handle.backend), minval.handle.backend), + maxval.handle.backend) + { + case .XLA: + let output_device = maxval.device + let algorithm = Tensor(copying: algorithm, to: .defaultTFEager) + let shape = Tensor(copying: shape, to: .defaultTFEager) + let minval = Tensor(copying: minval, to: .defaultTFEager) + let maxval = Tensor(copying: maxval, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.statefulUniformInt( + resource: resource, algorithm: algorithm, shape: shape, minval: minval, maxval: maxval), + to: output_device) + case .TF_EAGER: + return _RawTFEager.statefulUniformInt( + resource: resource, algorithm: algorithm, shape: shape, minval: minval, maxval: maxval) + } + + } + + /// output = cond ? then_branch(input) : else_branch(input) + /// + /// - Parameters: + /// - cond: A Tensor. If the tensor is a scalar of non-boolean type, the + /// scalar is converted to a boolean according to the + /// following rule: if the scalar is a numerical value, non-zero means + /// `True` and zero means False; if the scalar is a string, non-empty + /// means `True` and empty means `False`. If the tensor is not a scalar, + /// being empty means False and being non-empty means True. + /// + /// This should only be used when the if then/else body functions do not + /// have stateful ops. + /// - input: A list of input tensors. + /// + /// - Attrs: + /// - Tin: A list of input types. + /// - Tout: A list of output types. + /// - then_branch: A function that takes 'inputs' and returns a list of tensors, whose + /// types are the same as what else_branch returns. + /// - else_branch: A function that takes 'inputs' and returns a list of tensors, whose + /// types are the same as what then_branch returns. + /// + /// - Output output: A list of return values. + @inlinable @inline(__always) + public static func statelessIf< + Tcond: TensorFlowScalar, + Tin: TensorArrayProtocol, + Tout: TensorGroup, + ThenbranchIn: TensorGroup, + ThenbranchOut: TensorGroup, + ElsebranchIn: TensorGroup, + ElsebranchOut: TensorGroup + >( + cond: Tensor, + _ input: Tin, + thenBranch: (ThenbranchIn) -> ThenbranchOut, + elseBranch: (ElsebranchIn) -> ElsebranchOut, + outputShapes: [TensorShape?] + ) -> Tout { + _RawTFEager.statelessIf( + cond: cond, input, thenBranch: thenBranch, elseBranch: elseBranch, + outputShapes: outputShapes) + } + + /// Draws samples from a multinomial distribution. + /// + /// - Parameters: + /// - logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` + /// represents the unnormalized log probabilities for all classes. + /// - num_samples: 0-D. Number of independent samples to draw for each row slice. + /// - seed: 2 seeds (shape [2]). + /// + /// - Output output: 2-D Tensor with shape `[batch_size, num_samples]`. Each slice `[i, :]` + /// contains the drawn class labels with range `[0, num_classes)`. + @inlinable @inline(__always) + public static func statelessMultinomial< + T: TensorFlowNumeric, + Tseed: TensorFlowIndex, + OutputDtype: TensorFlowIndex + >( + logits: Tensor, + numSamples: Tensor, + seed: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(logits.handle.backend, numSamples.handle.backend), seed.handle.backend) + { + case .XLA: + return _RawXLA.statelessMultinomial(logits: logits, numSamples: numSamples, seed: seed) + case .TF_EAGER: + return _RawTFEager.statelessMultinomial(logits: logits, numSamples: numSamples, seed: seed) + } + + } + + /// Outputs deterministic pseudorandom values from a normal distribution. + /// + /// The generated values will have mean 0 and standard deviation 1. + /// + /// The outputs are a deterministic function of `shape` and `seed`. + /// + /// - Parameters: + /// - shape: The shape of the output tensor. + /// - seed: 2 seeds (shape [2]). + /// + /// - Attr dtype: The type of the output. + /// + /// - Output output: Random values with specified shape. + @inlinable @inline(__always) + public static func statelessRandomNormal< + Dtype: FloatingPoint & TensorFlowScalar, + T: TensorFlowIndex, + Tseed: TensorFlowIndex + >( + shape: Tensor, + seed: Tensor + ) -> Tensor { + switch commonBackend(shape.handle.backend, seed.handle.backend) { + case .XLA: + return _RawXLA.statelessRandomNormal(shape: shape, seed: seed) + case .TF_EAGER: + return _RawTFEager.statelessRandomNormal(shape: shape, seed: seed) + } + + } + + /// Outputs deterministic pseudorandom random values from a uniform distribution. + /// + /// The generated values follow a uniform distribution in the range `[0, 1)`. The + /// lower bound 0 is included in the range, while the upper bound 1 is excluded. + /// + /// The outputs are a deterministic function of `shape` and `seed`. + /// + /// - Parameters: + /// - shape: The shape of the output tensor. + /// - seed: 2 seeds (shape [2]). + /// + /// - Attr dtype: The type of the output. + /// + /// - Output output: Random values with specified shape. + @inlinable @inline(__always) + public static func statelessRandomUniform< + Dtype: FloatingPoint & TensorFlowScalar, + T: TensorFlowIndex, + Tseed: TensorFlowIndex + >( + shape: Tensor, + seed: Tensor + ) -> Tensor { + switch commonBackend(shape.handle.backend, seed.handle.backend) { + case .XLA: + return _RawXLA.statelessRandomUniform(shape: shape, seed: seed) + case .TF_EAGER: + return _RawTFEager.statelessRandomUniform(shape: shape, seed: seed) + } + + } + + /// Outputs deterministic pseudorandom random integers from a uniform distribution. + /// + /// The generated values follow a uniform distribution in the range `[minval, maxval)`. + /// + /// The outputs are a deterministic function of `shape`, `seed`, `minval`, and `maxval`. + /// + /// - Parameters: + /// - shape: The shape of the output tensor. + /// - seed: 2 seeds (shape [2]). + /// - minval: Minimum value (inclusive, scalar). + /// - maxval: Maximum value (exclusive, scalar). + /// + /// - Attr dtype: The type of the output. + /// + /// - Output output: Random values with specified shape. + @inlinable @inline(__always) + public static func statelessRandomUniformInt< + Dtype: TensorFlowIndex, + T: TensorFlowIndex, + Tseed: TensorFlowIndex + >( + shape: Tensor, + seed: Tensor, + minval: Tensor, + maxval: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(shape.handle.backend, seed.handle.backend), minval.handle.backend), + maxval.handle.backend) + { + case .XLA: + return _RawXLA.statelessRandomUniformInt( + shape: shape, seed: seed, minval: minval, maxval: maxval) + case .TF_EAGER: + return _RawTFEager.statelessRandomUniformInt( + shape: shape, seed: seed, minval: minval, maxval: maxval) + } + + } + + /// Outputs deterministic pseudorandom values from a truncated normal distribution. + /// + /// The generated values follow a normal distribution with mean 0 and standard + /// deviation 1, except that values whose magnitude is more than 2 standard + /// deviations from the mean are dropped and re-picked. + /// + /// The outputs are a deterministic function of `shape` and `seed`. + /// + /// - Parameters: + /// - shape: The shape of the output tensor. + /// - seed: 2 seeds (shape [2]). + /// + /// - Attr dtype: The type of the output. + /// + /// - Output output: Random values with specified shape. + @inlinable @inline(__always) + public static func statelessTruncatedNormal< + Dtype: FloatingPoint & TensorFlowScalar, + T: TensorFlowIndex, + Tseed: TensorFlowIndex + >( + shape: Tensor, + seed: Tensor + ) -> Tensor { + switch commonBackend(shape.handle.backend, seed.handle.backend) { + case .XLA: + return _RawXLA.statelessTruncatedNormal(shape: shape, seed: seed) + case .TF_EAGER: + return _RawTFEager.statelessTruncatedNormal(shape: shape, seed: seed) + } + + } + + /// output = input; While (Cond(output)) { output = Body(output) } + /// + /// - Parameter input: A list of input tensors whose types are T. + /// + /// - Attrs: + /// - T: dtype in use. + /// - cond: A function takes 'input' and returns a tensor. If the tensor is + /// a scalar of non-boolean, the scalar is converted to a boolean + /// according to the following rule: if the scalar is a numerical + /// value, non-zero means True and zero means False; if the scalar is + /// a string, non-empty means True and empty means False. If the + /// tensor is not a scalar, non-emptiness means True and False + /// otherwise. + /// + /// This should only be used when the while condition and body functions + /// do not have stateful ops. + /// - body: A function that takes a list of tensors and returns another + /// list of tensors. Both lists have the same types as specified + /// by T. + /// + /// - Output output: A list of output tensors whose types are T. + @inlinable @inline(__always) + public static func statelessWhile< + T: TensorArrayProtocol, + CondIn: TensorGroup, + CondOut: TensorGroup, + BodyIn: TensorGroup, + BodyOut: TensorGroup + >( + _ input: T, + cond: (CondIn) -> CondOut, + body: (BodyIn) -> BodyOut, + outputShapes: [TensorShape?], + parallelIterations: Int64 = 10 + ) -> T { + _RawTFEager.statelessWhile( + input, cond: cond, body: body, outputShapes: outputShapes, + parallelIterations: parallelIterations) + } + + /// Check if the input matches the regex pattern. + /// + /// The input is a string tensor of any shape. The pattern is the + /// regular expression to be matched with every element of the input tensor. + /// The boolean values (True or False) of the output tensor indicate + /// if the input matches the regex pattern provided. + /// + /// The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) + /// + /// - Parameter input: A string tensor of the text to be processed. + /// + /// - Attr pattern: The regular expression to match the input. + /// + /// - Output output: A bool tensor with the same shape as `input`. + @inlinable @inline(__always) + public static func staticRegexFullMatch( + _ input: StringTensor, + pattern: String + ) -> Tensor { + _RawTFEager.staticRegexFullMatch(input, pattern: pattern) + } + + /// Replaces the match of pattern in input with rewrite. + /// + /// It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) + /// + /// - Parameter input: The text to be processed. + /// + /// - Attrs: + /// - pattern: The regular expression to match the input. + /// - rewrite: The rewrite to be applied to the matched expression. + /// - replace_global: If True, the replacement is global, otherwise the replacement + /// is done only on the first match. + /// + /// - Output output: The text after applying pattern and rewrite. + @inlinable @inline(__always) + public static func staticRegexReplace( + _ input: StringTensor, + pattern: String, + rewrite: String, + replaceGlobal: Bool = true + ) -> StringTensor { + _RawTFEager.staticRegexReplace( + input, pattern: pattern, rewrite: rewrite, replaceGlobal: replaceGlobal) + } + + /// Creates a statistics manager resource. + @inlinable @inline(__always) + public static func statsAggregatorHandle( + container: String, + sharedName: String + ) -> ResourceHandle { + _RawTFEager.statsAggregatorHandle(container: container, sharedName: sharedName) + } + + @inlinable @inline(__always) + public static func statsAggregatorHandleV2( + container: String, + sharedName: String + ) -> ResourceHandle { + _RawTFEager.statsAggregatorHandleV2(container: container, sharedName: sharedName) + } + + /// Set a summary_writer_interface to record statistics using given stats_aggregator. + @inlinable @inline(__always) + public static func statsAggregatorSetSummaryWriter( + statsAggregator: ResourceHandle, + summary: ResourceHandle + ) { + _RawTFEager.statsAggregatorSetSummaryWriter( + statsAggregator: statsAggregator, summary: summary) + } + + /// Produces a summary of any statistics recorded by the given statistics manager. + @inlinable @inline(__always) + public static func statsAggregatorSummary( + iterator: ResourceHandle + ) -> StringTensor { + _RawTFEager.statsAggregatorSummary(iterator: iterator) + } + + /// Stops gradient computation. + /// + /// When executed in a graph, this op outputs its input tensor as-is. + /// + /// When building ops to compute gradients, this op prevents the contribution of + /// its inputs to be taken into account. Normally, the gradient generator adds ops + /// to a graph to compute the derivatives of a specified 'loss' by recursively + /// finding out inputs that contributed to its computation. If you insert this op + /// in the graph it inputs are masked from the gradient generator. They are not + /// taken into account for computing gradients. + /// + /// This is useful any time you want to compute a value with TensorFlow but need + /// to pretend that the value was a constant. Some examples include: + /// + /// * The *EM* algorithm where the *M-step* should not involve backpropagation + /// through the output of the *E-step*. + /// * Contrastive divergence training of Boltzmann machines where, when + /// differentiating the energy function, the training must not backpropagate + /// through the graph that generated the samples from the model. + /// * Adversarial training, where no backprop should happen through the adversarial + /// example generation process. + @inlinable @inline(__always) + public static func stopGradient( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.stopGradient(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.stopGradient(input) + } + + } + + /// Return a strided slice from `input`. + /// + /// Note, most python users will want to use the Python `Tensor.__getitem__` + /// or `Variable.__getitem__` rather than this op directly. + /// + /// The goal of this op is to produce a new tensor with a subset of + /// the elements from the `n` dimensional `input` tensor. The subset is chosen using + /// a sequence of `m` sparse range specifications encoded into the arguments + /// of this function. Note, in some cases + /// `m` could be equal to `n`, but this need not be the case. Each + /// range specification entry can be one of the following: + /// + /// - An ellipsis (...). Ellipses are used to imply zero or more + /// dimensions of full-dimension selection and are produced using + /// `ellipsis_mask`. For example, `foo[...]` is the identity slice. + /// + /// - A new axis. This is used to insert a new shape=1 dimension and is + /// produced using `new_axis_mask`. For example, `foo[:, ...]` where + /// `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor. + /// + /// + /// - A range `begin:end:stride`. This is used to specify how much to choose from + /// a given dimension. `stride` can be any integer but 0. `begin` is an integer + /// which represents the index of the first value to select while `end` represents + /// the index of the last value to select. The number of values selected in each + /// dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`. + /// `begin` and `end` can be negative where `-1` is the last element, `-2` is + /// the second to last. `begin_mask` controls whether to replace the explicitly + /// given `begin` with an implicit effective value of `0` if `stride > 0` and + /// `-1` if `stride < 0`. `end_mask` is analogous but produces the number + /// required to create the largest open interval. For example, given a shape + /// `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do + /// not assume this is equivalent to `foo[0:-1]` which has an effective `begin` + /// and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the + /// first dimension of a tensor while dropping the last two (in the original + /// order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`. + /// + /// - A single index. This is used to keep only elements that have a given + /// index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a + /// shape `(6,)` tensor. This is encoded in `begin` and `end` and + /// `shrink_axis_mask`. + /// + /// Each conceptual range specification is encoded in the op's argument. This + /// encoding is best understand by considering a non-trivial example. In + /// particular, + /// `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as + /// + /// ``` + /// begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0) + /// end = [2, 4, x, x, -3, x] + /// strides = [1, 1, x, x, -1, 1] + /// begin_mask = 1<<4 | 1 << 5 = 48 + /// end_mask = 1<<5 = 32 + /// ellipsis_mask = 1<<3 = 8 + /// new_axis_mask = 1<<2 4 + /// shrink_axis_mask = 1<<0 + /// ``` + /// + /// In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of + /// the slice becomes (2, 1, 5, 5, 2, 5). + /// Let us walk step by step through each argument specification. + /// + /// 1. The first argument in the example slice is turned into `begin = 1` and + /// `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we + /// also set the appropriate bit in `shrink_axis_mask`. + /// + /// 2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have + /// zero bits contributed. + /// + /// 3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1 + /// dimension in the final shape. Dummy values are contributed to begin, + /// end and stride, while the new_axis_mask bit is set. + /// + /// 4. `...` grab the full ranges from as many dimensions as needed to + /// fully specify a slice for every dimension of the input shape. + /// + /// 5. `:-3:-1` shows the use of negative indices. A negative index `i` associated + /// with a dimension that has shape `s` is converted to a positive index + /// `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion + /// is done internally so begin, end and strides receive x, -3, and -1. + /// The appropriate begin_mask bit is set to indicate the start range is the + /// full range (ignoring the x). + /// + /// 6. `:` indicates that the entire contents of the corresponding dimension + /// is selected. This is equivalent to `::` or `0::1`. begin, end, and strides + /// receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and + /// `end_mask` are also set. + /// + /// *Requirements*: + /// `0 != strides[i] for i in [0, m)` + /// `ellipsis_mask must be a power of two (only one ellipsis)` + /// + /// - Parameters: + /// - begin: `begin[k]` specifies the offset into the `k`th range specification. + /// The exact dimension this corresponds to will be determined by context. + /// Out-of-bounds values will be silently clamped. If the `k`th bit of + /// `begin_mask` then `begin[k]` is ignored and the full range of the + /// appropriate dimension is used instead. Negative values causes indexing + /// to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`. + /// - end: `end[i]` is like `begin` with the exception that `end_mask` is + /// used to determine full ranges. + /// - strides: `strides[i]` specifies the increment in the `i`th specification + /// after extracting a given element. Negative indices will reverse + /// the original order. Out or range values are + /// clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0` + /// + /// - Attrs: + /// - begin_mask: a bitmask where a bit i being 1 means to ignore the begin + /// value and instead use the largest interval possible. At runtime + /// begin[i] will be replaced with `[0, n-1)` if `stride[i] > 0` or + /// `[-1, n-1]` if `stride[i] < 0` + /// - end_mask: analogous to `begin_mask` + /// - ellipsis_mask: a bitmask where bit `i` being 1 means the `i`th + /// position is actually an ellipsis. One bit at most can be 1. + /// If `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)` + /// is provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis + /// implicitly creates as many range specifications as necessary to fully + /// specify the sliced range for every dimension. For example for a 4-dimensional + /// tensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`. + /// - new_axis_mask: a bitmask where bit `i` being 1 means the `i`th + /// specification creates a new shape 1 dimension. For example + /// `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor. + /// - shrink_axis_mask: a bitmask where bit `i` implies that the `i`th + /// specification should shrink the dimensionality. begin and end + /// must imply a slice of size 1 in the dimension. For example in + /// python one might do `foo[:, 3, :]` which would result in + /// `shrink_axis_mask` being 2. + @inlinable @inline(__always) + public static func stridedSlice< + T: TensorFlowScalar, + Index: TensorFlowIndex + >( + _ input: Tensor, + begin: Tensor, + end: Tensor, + strides: Tensor, + beginMask: Int64 = 0, + endMask: Int64 = 0, + ellipsisMask: Int64 = 0, + newAxisMask: Int64 = 0, + shrinkAxisMask: Int64 = 0 + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(input.handle.backend, begin.handle.backend), end.handle.backend), + strides.handle.backend) + { + case .XLA: + return _RawXLA.stridedSlice( + input, begin: begin, end: end, strides: strides, beginMask: beginMask, endMask: endMask, + ellipsisMask: ellipsisMask, newAxisMask: newAxisMask, shrinkAxisMask: shrinkAxisMask) + case .TF_EAGER: + return _RawTFEager.stridedSlice( + input, begin: begin, end: end, strides: strides, beginMask: beginMask, endMask: endMask, + ellipsisMask: ellipsisMask, newAxisMask: newAxisMask, shrinkAxisMask: shrinkAxisMask) + } + + } + + /// Returns the gradient of `StridedSlice`. + /// + /// Since `StridedSlice` cuts out pieces of its `input` which is size + /// `shape`, its gradient will have the same shape (which is passed here + /// as `shape`). The gradient will be zero in any element that the slice + /// does not select. + /// + /// Arguments are the same as StridedSliceGrad with the exception that + /// `dy` is the input gradient to be propagated and `shape` is the + /// shape of `StridedSlice`'s `input`. + @inlinable @inline(__always) + public static func stridedSliceGrad< + T: TensorFlowScalar, + Index: TensorFlowIndex + >( + shape: Tensor, + begin: Tensor, + end: Tensor, + strides: Tensor, + dy: Tensor, + beginMask: Int64 = 0, + endMask: Int64 = 0, + ellipsisMask: Int64 = 0, + newAxisMask: Int64 = 0, + shrinkAxisMask: Int64 = 0 + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend( + commonBackend(shape.handle.backend, begin.handle.backend), end.handle.backend), + strides.handle.backend), dy.handle.backend) + { + case .XLA: + return _RawXLA.stridedSliceGrad( + shape: shape, begin: begin, end: end, strides: strides, dy: dy, beginMask: beginMask, + endMask: endMask, ellipsisMask: ellipsisMask, newAxisMask: newAxisMask, + shrinkAxisMask: shrinkAxisMask) + case .TF_EAGER: + return _RawTFEager.stridedSliceGrad( + shape: shape, begin: begin, end: end, strides: strides, dy: dy, beginMask: beginMask, + endMask: endMask, ellipsisMask: ellipsisMask, newAxisMask: newAxisMask, + shrinkAxisMask: shrinkAxisMask) + } + + } + + /// Formats a string template using a list of tensors. + /// + /// Formats a string template using a list of tensors, pretty-printing tensor summaries. + /// + /// - Parameter inputs: The list of tensors to format into the placeholder string. + /// + /// - Attrs: + /// - template: A string, the template to format tensor summaries into. + /// - placeholder: A string, at each placeholder in the template a subsequent tensor summary will be inserted. + /// - summarize: When formatting the tensor summaries print the first and last summarize entries of each tensor dimension. + /// + /// - Output output: = The resulting string scalar. + @inlinable @inline(__always) + public static func stringFormat( + inputs: T, + template: String = "%s", + placeholder: String = "%s", + summarize: Int64 = 3 + ) -> StringTensor { + _RawTFEager.stringFormat( + inputs: inputs, template: template, placeholder: placeholder, summarize: summarize) + } + + /// Joins the strings in the given list of string tensors into one tensor; + /// + /// with the given separator (default is an empty separator). + /// + /// Examples: + /// + /// >>> s = ["hello", "world", "tensorflow"] + /// >>> tf.strings.join(s, " ") + /// + /// + /// - Parameter inputs: A list of string tensors. The tensors must all have the same shape, + /// or be scalars. Scalars may be mixed in; these will be broadcast to the shape + /// of non-scalar inputs. + /// + /// - Attr separator: string, an optional join separator. + @inlinable @inline(__always) + public static func stringJoin( + inputs: [StringTensor], + separator: String + ) -> StringTensor { + _RawTFEager.stringJoin(inputs: inputs, separator: separator) + } + + /// String lengths of `input`. + /// + /// Computes the length of each string given in the input tensor. + /// + /// >>> strings = tf.constant(['Hello','TensorFlow', '\U0001F642']) + /// >>> tf.strings.length(strings).numpy() # default counts bytes + /// array([ 5, 10, 4], dtype=int32) + /// >>> tf.strings.length(strings, unit="UTF8_CHAR").numpy() + /// array([ 5, 10, 1], dtype=int32) + /// + /// + /// - Parameter input: The strings for which to compute the length for each element. + /// + /// - Attr unit: The unit that is counted to compute string length. One of: `"BYTE"` (for + /// the number of bytes in each string) or `"UTF8_CHAR"` (for the number of UTF-8 + /// encoded Unicode code points in each string). Results are undefined + /// if `unit=UTF8_CHAR` and the `input` strings do not contain structurally + /// valid UTF-8. + /// + /// - Output output: Integer tensor that has the same shape as `input`. The output contains the + /// element-wise string lengths of `input`. + @inlinable @inline(__always) + public static func stringLength( + _ input: StringTensor, + unit: Unit = .byte + ) -> Tensor { + _RawTFEager.stringLength(input, unit: unit) + } + + @inlinable @inline(__always) + public static func stringListAttr( + _ a: [String], + _ b: String + ) { + _RawTFEager.stringListAttr(a, b) + } + + /// Converts all uppercase characters into their respective lowercase replacements. + /// + /// Example: + /// + /// >>> tf.strings.lower("CamelCase string and ALL CAPS") + /// + /// + @inlinable @inline(__always) + public static func stringLower( + _ input: StringTensor, + encoding: String + ) -> StringTensor { + _RawTFEager.stringLower(input, encoding: encoding) + } + + /// Creates ngrams from ragged string data. + /// + /// This op accepts a ragged tensor with 1 ragged dimension containing only + /// strings and outputs a ragged tensor with 1 ragged dimension containing ngrams + /// of that string, joined along the innermost axis. + /// + /// - Parameters: + /// - data: The values tensor of the ragged string tensor to make ngrams out of. Must be a + /// 1D string tensor. + /// - data_splits: The splits tensor of the ragged string tensor to make ngrams out of. + /// + /// - Attrs: + /// - separator: The string to append between elements of the token. Use "" for no separator. + /// - ngram_widths: The sizes of the ngrams to create. + /// - left_pad: The string to use to pad the left side of the ngram sequence. Only used if + /// pad_width != 0. + /// - right_pad: The string to use to pad the right side of the ngram sequence. Only used if + /// pad_width != 0. + /// - pad_width: The number of padding elements to add to each side of each + /// sequence. Note that padding will never be greater than 'ngram_widths'-1 + /// regardless of this value. If `pad_width=-1`, then add `max(ngram_widths)-1` + /// elements. + /// + /// - Outputs: + /// - ngrams: The values tensor of the output ngrams ragged tensor. + /// - ngrams_splits: The splits tensor of the output ngrams ragged tensor. + @inlinable @inline(__always) + public static func stringNGrams( + data: StringTensor, + dataSplits: Tensor, + separator: String, + ngramWidths: [Int32], + leftPad: String, + rightPad: String, + padWidth: Int64, + preserveShortSequences: Bool + ) -> (ngrams: StringTensor, ngramsSplits: Tensor) { + _RawTFEager.stringNGrams( + data: data, dataSplits: dataSplits, separator: separator, ngramWidths: ngramWidths, + leftPad: leftPad, rightPad: rightPad, padWidth: padWidth, + preserveShortSequences: preserveShortSequences) + } + + /// Split elements of `input` based on `delimiter` into a `SparseTensor`. + /// + /// Let N be the size of source (typically N will be the batch size). Split each + /// element of `input` based on `delimiter` and return a `SparseTensor` + /// containing the splitted tokens. Empty tokens are ignored. + /// + /// `delimiter` can be empty, or a string of split characters. If `delimiter` is an + /// empty string, each element of `input` is split into individual single-byte + /// character strings, including splitting of UTF-8 multibyte sequences. Otherwise + /// every character of `delimiter` is a potential split point. + /// + /// For example: + /// N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output + /// will be + /// + /// indices = [0, 0; + /// 0, 1; + /// 1, 0; + /// 1, 1; + /// 1, 2] + /// shape = [2, 3] + /// values = ['hello', 'world', 'a', 'b', 'c'] + /// + /// - Parameters: + /// - input: 1-D. Strings to split. + /// - delimiter: 0-D. Delimiter characters (bytes), or empty string. + /// + /// - Attr skip_empty: A `bool`. If `True`, skip the empty strings from the result. + /// + /// - Outputs: + /// - indices: A dense matrix of int64 representing the indices of the sparse tensor. + /// - values: A vector of strings corresponding to the splited values. + /// - shape: a length-2 vector of int64 representing the shape of the sparse + /// tensor, where the first value is N and the second value is the maximum number + /// of tokens in a single input entry. + @inlinable @inline(__always) + public static func stringSplit( + _ input: StringTensor, + delimiter: StringTensor, + skipEmpty: Bool = true + ) -> (indices: Tensor, values: StringTensor, shape: Tensor) { + _RawTFEager.stringSplit(input, delimiter: delimiter, skipEmpty: skipEmpty) + } + + /// Split elements of `source` based on `sep` into a `SparseTensor`. + /// + /// Let N be the size of source (typically N will be the batch size). Split each + /// element of `source` based on `sep` and return a `SparseTensor` + /// containing the split tokens. Empty tokens are ignored. + /// + /// For example, N = 2, source[0] is 'hello world' and source[1] is 'a b c', + /// then the output will be + /// ``` + /// st.indices = [0, 0; + /// 0, 1; + /// 1, 0; + /// 1, 1; + /// 1, 2] + /// st.shape = [2, 3] + /// st.values = ['hello', 'world', 'a', 'b', 'c'] + /// ``` + /// + /// If `sep` is given, consecutive delimiters are not grouped together and are + /// deemed to delimit empty strings. For example, source of `"1<>2<><>3"` and + /// sep of `"<>"` returns `["1", "2", "", "3"]`. If `sep` is None or an empty + /// string, consecutive whitespace are regarded as a single separator, and the + /// result will contain no empty strings at the startor end if the string has + /// leading or trailing whitespace. + /// + /// Note that the above mentioned behavior matches python's str.split. + /// + /// - Parameters: + /// - input: `1-D` string `Tensor`, the strings to split. + /// - sep: `0-D` string `Tensor`, the delimiter character. + /// + /// - Attr maxsplit: An `int`. If `maxsplit > 0`, limit of the split of the result. + @inlinable @inline(__always) + public static func stringSplitV2( + _ input: StringTensor, + sep: StringTensor, + maxsplit: Int64 = -1 + ) -> (indices: Tensor, values: StringTensor, shape: Tensor) { + _RawTFEager.stringSplitV2(input, sep: sep, maxsplit: maxsplit) + } + + /// Strip leading and trailing whitespaces from the Tensor. + /// + /// - Parameter input: A string `Tensor` of any shape. + /// + /// - Output output: A string `Tensor` of the same shape as the input. + /// + /// Examples: + /// + /// >>> tf.strings.strip(["\nTensorFlow", " The python library "]).numpy() + /// array([b'TensorFlow', b'The python library'], dtype=object) + @inlinable @inline(__always) + public static func stringStrip( + _ input: StringTensor + ) -> StringTensor { + _RawTFEager.stringStrip(input) + } + + /// Converts each string in the input Tensor to its hash mod by a number of buckets. + /// + /// The hash function is deterministic on the content of the string within the + /// process. + /// + /// Note that the hash function may change from time to time. + /// This functionality will be deprecated and it's recommended to use + /// `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`. + /// + /// - Attr num_buckets: The number of buckets. + /// + /// - Output output: A Tensor of the same shape as the input `string_tensor`. + @inlinable @inline(__always) + public static func stringToHashBucket( + stringTensor: StringTensor, + numBuckets: Int64 + ) -> Tensor { + _RawTFEager.stringToHashBucket(stringTensor: stringTensor, numBuckets: numBuckets) + } + + /// Converts each string in the input Tensor to its hash mod by a number of buckets. + /// + /// The hash function is deterministic on the content of the string within the + /// process and will never change. However, it is not suitable for cryptography. + /// This function may be used when CPU time is scarce and inputs are trusted or + /// unimportant. There is a risk of adversaries constructing inputs that all hash + /// to the same bucket. To prevent this problem, use a strong hash function with + /// `tf.string_to_hash_bucket_strong`. + /// + /// Examples: + /// + /// >>> tf.strings.to_hash_bucket_fast(["Hello", "TensorFlow", "2.x"], 3).numpy() + /// array([0, 2, 2]) + /// + /// - Parameter input: The strings to assign a hash bucket. + /// + /// - Attr num_buckets: The number of buckets. + /// + /// - Output output: A Tensor of the same shape as the input `string_tensor`. + @inlinable @inline(__always) + public static func stringToHashBucketFast( + _ input: StringTensor, + numBuckets: Int64 + ) -> Tensor { + _RawTFEager.stringToHashBucketFast(input, numBuckets: numBuckets) + } + + /// Converts each string in the input Tensor to its hash mod by a number of buckets. + /// + /// The hash function is deterministic on the content of the string within the + /// process. The hash function is a keyed hash function, where attribute `key` + /// defines the key of the hash function. `key` is an array of 2 elements. + /// + /// A strong hash is important when inputs may be malicious, e.g. URLs with + /// additional components. Adversaries could try to make their inputs hash to the + /// same bucket for a denial-of-service attack or to skew the results. A strong + /// hash can be used to make it difficult to find inputs with a skewed hash value + /// distribution over buckets. This requires that the hash function is + /// seeded by a high-entropy (random) "key" unknown to the adversary. + /// + /// The additional robustness comes at a cost of roughly 4x higher compute + /// time than `tf.string_to_hash_bucket_fast`. + /// + /// Examples: + /// + /// >>> tf.strings.to_hash_bucket_strong(["Hello", "TF"], 3, [1, 2]).numpy() + /// array([2, 0]) + /// + /// - Parameter input: The strings to assign a hash bucket. + /// + /// - Attrs: + /// - num_buckets: The number of buckets. + /// - key: The key used to seed the hash function, passed as a list of two uint64 + /// elements. + /// + /// - Output output: A Tensor of the same shape as the input `string_tensor`. + @inlinable @inline(__always) + public static func stringToHashBucketStrong( + _ input: StringTensor, + numBuckets: Int64, + key: [Int32] + ) -> Tensor { + _RawTFEager.stringToHashBucketStrong(input, numBuckets: numBuckets, key: key) + } + + /// Converts each string in the input Tensor to the specified numeric type. + /// + /// (Note that int32 overflow results in an error while float overflow + /// results in a rounded value.) + /// + /// Example: + /// + /// >>> strings = ["5.0", "3.0", "7.0"] + /// >>> tf.strings.to_number(strings) + /// + /// + /// + /// - Attr out_type: The numeric type to interpret each string in `string_tensor` as. + /// + /// - Output output: A Tensor of the same shape as the input `string_tensor`. + @inlinable @inline(__always) + public static func stringToNumber( + stringTensor: StringTensor + ) -> Tensor { + _RawTFEager.stringToNumber(stringTensor: stringTensor) + } + + /// Converts all lowercase characters into their respective uppercase replacements. + /// + /// Example: + /// + /// >>> tf.strings.upper("CamelCase string and ALL CAPS") + /// + /// + @inlinable @inline(__always) + public static func stringUpper( + _ input: StringTensor, + encoding: String + ) -> StringTensor { + _RawTFEager.stringUpper(input, encoding: encoding) + } + + @inlinable @inline(__always) + public static func stubResourceHandleOp( + container: String, + sharedName: String + ) -> ResourceHandle { + _RawTFEager.stubResourceHandleOp(container: container, sharedName: sharedName) + } + + /// Returns x - y element-wise. + /// + /// *NOTE*: `Subtract` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + @inlinable @inline(__always) + public static func sub( + _ x: Tensor, + _ y: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + return _RawXLA.sub(x, y) + case .TF_EAGER: + return _RawTFEager.sub(x, y) + } + + } + + /// Return substrings from `Tensor` of strings. + /// + /// For each string in the input `Tensor`, creates a substring starting at index + /// `pos` with a total length of `len`. + /// + /// If `len` defines a substring that would extend beyond the length of the input + /// string, or if `len` is negative, then as many characters as possible are used. + /// + /// A negative `pos` indicates distance within the string backwards from the end. + /// + /// If `pos` specifies an index which is out of range for any of the input strings, + /// then an `InvalidArgumentError` is thrown. + /// + /// `pos` and `len` must have the same shape, otherwise a `ValueError` is thrown on + /// Op creation. + /// + /// *NOTE*: `Substr` supports broadcasting up to two dimensions. More about + /// broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + /// + /// --- + /// + /// Examples + /// + /// Using scalar `pos` and `len`: + /// + /// ```python + /// input = [b'Hello', b'World'] + /// position = 1 + /// length = 3 + /// + /// output = [b'ell', b'orl'] + /// ``` + /// + /// Using `pos` and `len` with same shape as `input`: + /// + /// ```python + /// input = [[b'ten', b'eleven', b'twelve'], + /// [b'thirteen', b'fourteen', b'fifteen'], + /// [b'sixteen', b'seventeen', b'eighteen']] + /// position = [[1, 2, 3], + /// [1, 2, 3], + /// [1, 2, 3]] + /// length = [[2, 3, 4], + /// [4, 3, 2], + /// [5, 5, 5]] + /// + /// output = [[b'en', b'eve', b'lve'], + /// [b'hirt', b'urt', b'te'], + /// [b'ixtee', b'vente', b'hteen']] + /// ``` + /// + /// Broadcasting `pos` and `len` onto `input`: + /// + /// ``` + /// input = [[b'ten', b'eleven', b'twelve'], + /// [b'thirteen', b'fourteen', b'fifteen'], + /// [b'sixteen', b'seventeen', b'eighteen'], + /// [b'nineteen', b'twenty', b'twentyone']] + /// position = [1, 2, 3] + /// length = [1, 2, 3] + /// + /// output = [[b'e', b'ev', b'lve'], + /// [b'h', b'ur', b'tee'], + /// [b'i', b've', b'hte'], + /// [b'i', b'en', b'nty']] + /// ``` + /// + /// Broadcasting `input` onto `pos` and `len`: + /// + /// ``` + /// input = b'thirteen' + /// position = [1, 5, 7] + /// length = [3, 2, 1] + /// + /// output = [b'hir', b'ee', b'n'] + /// ``` + /// + /// Raises: + /// + /// * `ValueError`: If the first argument cannot be converted to a + /// Tensor of `dtype string`. + /// * `InvalidArgumentError`: If indicies are out of range. + /// * `ValueError`: If `pos` and `len` are not the same shape. + /// + /// + /// - Parameters: + /// - input: Tensor of strings + /// - pos: Scalar defining the position of first character in each substring + /// - len: Scalar defining the number of characters to include in each substring + /// + /// - Attr unit: The unit that is used to create the substring. One of: `"BYTE"` (for + /// defining position and length by bytes) or `"UTF8_CHAR"` (for the UTF-8 + /// encoded Unicode code points). The default is `"BYTE"`. Results are undefined if + /// `unit=UTF8_CHAR` and the `input` strings do not contain structurally valid + /// UTF-8. + /// + /// - Output output: Tensor of substrings + @inlinable @inline(__always) + public static func substr( + _ input: StringTensor, + pos: Tensor, + len: Tensor, + unit: Unit = .byte + ) -> StringTensor { + _RawTFEager.substr(input, pos: pos, len: len, unit: unit) + } + + /// Computes the sum of elements across dimensions of a tensor. + /// + /// Reduces `input` along the dimensions given in `axis`. Unless + /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + /// `axis`. If `keep_dims` is true, the reduced dimensions are + /// retained with length 1. + /// + /// - Parameters: + /// - input: The tensor to reduce. + /// - reduction_indices: The dimensions to reduce. Must be in the range + /// `[-rank(input), rank(input))`. + /// + /// - Attr keep_dims: If true, retain reduced dimensions with length 1. + /// + /// - Output output: The reduced tensor. + @inlinable @inline(__always) + public static func sum< + T: TensorFlowNumeric, + Tidx: TensorFlowIndex + >( + _ input: Tensor, + reductionIndices: Tensor, + keepDims: Bool = false + ) -> Tensor { + switch commonBackend(input.handle.backend, reductionIndices.handle.backend) { + case .XLA: + return _RawXLA.sum(input, reductionIndices: reductionIndices, keepDims: keepDims) + case .TF_EAGER: + return _RawTFEager.sum(input, reductionIndices: reductionIndices, keepDims: keepDims) + } + + } + + @inlinable @inline(__always) + public static func summaryWriter( + sharedName: String, + container: String + ) -> ResourceHandle { + _RawTFEager.summaryWriter(sharedName: sharedName, container: container) + } + + /// Computes the singular value decompositions of one or more matrices. + /// + /// Computes the SVD of each inner matrix in `input` such that + /// `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])` + /// + /// ```python + /// # a is a tensor containing a batch of matrices. + /// # s is a tensor of singular values for each matrix. + /// # u is the tensor containing the left singular vectors for each matrix. + /// # v is the tensor containing the right singular vectors for each matrix. + /// s, u, v = svd(a) + /// s, _, _ = svd(a, compute_uv=False) + /// ``` + /// + /// - Parameter input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions + /// form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`. + /// + /// - Attrs: + /// - compute_uv: If true, left and right singular vectors will be + /// computed and returned in `u` and `v`, respectively. + /// If false, `u` and `v` are not set and should never referenced. + /// - full_matrices: If true, compute full-sized `u` and `v`. If false + /// (the default), compute only the leading `P` singular vectors. + /// Ignored if `compute_uv` is `False`. + /// + /// - Outputs: + /// - s: Singular values. Shape is `[..., P]`. + /// - u: Left singular vectors. If `full_matrices` is `False` then shape is + /// `[..., M, P]`; if `full_matrices` is `True` then shape is + /// `[..., M, M]`. Undefined if `compute_uv` is `False`. + /// - v: Left singular vectors. If `full_matrices` is `False` then shape is + /// `[..., N, P]`. If `full_matrices` is `True` then shape is `[..., N, N]`. + /// Undefined if `compute_uv` is false. + @inlinable @inline(__always) + public static func svd( + _ input: Tensor, + computeUv: Bool = true, + fullMatrices: Bool = false + ) -> (s: Tensor, u: Tensor, v: Tensor) { + switch input.handle.backend { + case .XLA: + return _RawXLA.svd(input, computeUv: computeUv, fullMatrices: fullMatrices) + case .TF_EAGER: + return _RawTFEager.svd(input, computeUv: computeUv, fullMatrices: fullMatrices) + } + } + + /// Forwards `data` to the output port determined by `pred`. + /// + /// If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise, + /// the data goes to `output_false`. + /// + /// See also `RefSwitch` and `Merge`. + /// + /// - Parameters: + /// - data: The tensor to be forwarded to the appropriate output. + /// - pred: A scalar that specifies which output port will receive data. + /// + /// - Outputs: + /// - output_false: If `pred` is false, data will be forwarded to this output. + /// - output_true: If `pred` is true, data will be forwarded to this output. + @inlinable @inline(__always) + public static func switch_( + data: Tensor, + pred: Tensor + ) -> (outputFalse: Tensor, outputTrue: Tensor) { + _RawTFEager.switch_(data: data, pred: pred) + } + + /// Computes the gradient function for function f via backpropagation. + /// + /// - Parameter input: a list of input tensors of size N + M; + /// + /// - Attrs: + /// - Tin: the type list for the input list. + /// - Tout: the type list for the input list. + /// - f: The function we want to compute the gradient for. + /// + /// The function 'f' must be a numerical function which takes N inputs and + /// produces M outputs. Its gradient function 'g', which is computed by + /// this SymbolicGradient op is a function taking N + M inputs and + /// produces N outputs. + /// + /// I.e. if we have + /// (y1, y2, ..., y_M) = f(x1, x2, ..., x_N), + /// then, g is + /// (dL/dx1, dL/dx2, ..., dL/dx_N) = g(x1, x2, ..., x_N, + /// dL/dy1, dL/dy2, ..., dL/dy_M), + /// + /// where L is a scalar-value function of (x1, x2, ..., xN) (e.g., the + /// loss function). dL/dx_i is the partial derivative of L with respect + /// to x_i. + /// + /// (Needs some math expert to say the comment above better.) + /// + /// - Output output: a list of output tensors of size N; + @inlinable @inline(__always) + public static func symbolicGradient< + Tin: TensorArrayProtocol, + Tout: TensorGroup, + FIn: TensorGroup, + FOut: TensorGroup + >( + _ input: Tin, + f: (FIn) -> FOut + ) -> Tout { + _RawTFEager.symbolicGradient(input, f: f) + } + + /// Creates a dataset that emits the records from one or more TFRecord files. + /// + /// - Parameters: + /// - filenames: A scalar or vector containing the name(s) of the file(s) to be + /// read. + /// - compression_type: A scalar containing either (i) the empty string (no + /// compression), (ii) "ZLIB", or (iii) "GZIP". + /// - buffer_size: A scalar representing the number of bytes to buffer. A value of + /// 0 means no buffering will be performed. + @inlinable @inline(__always) + public static func tFRecordDataset( + filenames: StringTensor, + compressionType: StringTensor, + bufferSize: Tensor + ) -> VariantHandle { + _RawTFEager.tFRecordDataset( + filenames: filenames, compressionType: compressionType, bufferSize: bufferSize) + } + + /// A Reader that outputs the records from a TensorFlow Records file. + /// + /// - Attrs: + /// - container: If non-empty, this reader is placed in the given container. + /// Otherwise, a default container is used. + /// - shared_name: If non-empty, this reader is named in the given bucket + /// with this shared_name. Otherwise, the node name is used instead. + /// + /// - Output reader_handle: The handle to reference the Reader. + @inlinable @inline(__always) + public static func tFRecordReaderV2( + container: String, + sharedName: String, + compressionType: String + ) -> ResourceHandle { + _RawTFEager.tFRecordReaderV2( + container: container, sharedName: sharedName, compressionType: compressionType) + } + + /// Returns the result of a TPU compilation. + /// + /// This operation returns the result of a TPU compilation as a serialized + /// CompilationResultProto, which holds a status and an error message if an error + /// occurred during compilation. + @inlinable @inline(__always) + public static func tPUCompilationResult() -> StringTensor { + _RawTFEager.tPUCompilationResult() + } + + /// An op enabling differentiation of TPU Embeddings. + /// + /// This op simply returns its first input, which is assumed to have been sliced + /// from the Tensors returned by TPUEmbeddingDequeueActivations. The presence of + /// this op, and its first argument being a trainable Variable, enables automatic + /// differentiation of graphs containing embeddings via the TPU Embedding Python + /// libraries. + /// + /// - Parameters: + /// - embedding_variable: A trainable variable, enabling optimizers to find this op. + /// - sliced_activations: The embedding activations Tensor to return. + /// + /// - Attrs: + /// - table_id: The id of the table in the embedding layer configuration from which + /// these activations were computed. + /// - lookup_id: Identifier of the set of embedding indices which produced these + /// activations. + @inlinable @inline(__always) + public static func tPUEmbeddingActivations( + embeddingVariable: Tensor, + slicedActivations: Tensor, + tableId: Int64, + lookupId: Int64 + ) -> Tensor { + switch commonBackend(embeddingVariable.handle.backend, slicedActivations.handle.backend) { + case .XLA: + let output_device = slicedActivations.device + let embeddingVariable = Tensor(copying: embeddingVariable, to: .defaultTFEager) + let slicedActivations = Tensor(copying: slicedActivations, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.tPUEmbeddingActivations( + embeddingVariable: embeddingVariable, slicedActivations: slicedActivations, + tableId: tableId, lookupId: lookupId), to: output_device) + case .TF_EAGER: + return _RawTFEager.tPUEmbeddingActivations( + embeddingVariable: embeddingVariable, slicedActivations: slicedActivations, + tableId: tableId, lookupId: lookupId) + } + + } + + /// A TPU core selector Op. + /// + /// This Op produces a set of TPU cores (for warm-up) or a single TPU core + /// (for regular inference) to execute the TPU program on. The output is + /// consumed by TPUPartitionedCall. + /// + /// - Output device_ordinals: A vector 1 or more TPU cores. + @inlinable @inline(__always) + public static func tPUOrdinalSelector() -> Tensor { + _RawTFEager.tPUOrdinalSelector() + } + + /// Calls a function placed on a specified TPU device. + /// + /// - Parameters: + /// - args: The arguments to the function. + /// - device_ordinal: The TPU device ordinal to run the function on. + /// + /// - Attrs: + /// - Tin: The types of the arguments to the function. + /// - Tout: The types of the outputs of the function. + /// - f: The function to call. + /// + /// - Output output: The output of the function call. + @inlinable @inline(__always) + public static func tPUPartitionedCall< + Tin: TensorArrayProtocol, + Tout: TensorGroup, + FIn: TensorGroup, + FOut: TensorGroup + >( + args: Tin, + deviceOrdinal: Tensor, + f: (FIn) -> FOut, + autotunerThresh: Int64 = 0 + ) -> Tout { + _RawTFEager.tPUPartitionedCall( + args: args, deviceOrdinal: deviceOrdinal, f: f, autotunerThresh: autotunerThresh) + } + + /// Metadata indicating how the TPU computation should be replicated. + /// + /// This operation holds the metadata common to operations of a `tpu.replicate()` computation subgraph. + /// + /// - Attrs: + /// - num_replicas: Number of replicas of the computation + /// - num_cores_per_replica: Number of cores per replica. Used for model parallelism. + /// - topology: TopologyProto indicating the topology of the TPU pod slice. + /// - use_tpu: Whether to place the computation on the TPU. + /// - device_assignment: The assignment of devices for the computation. + /// - computation_shape: DEPRECATED. Use num_cores_per_replica instead. + @inlinable @inline(__always) + public static func tPUReplicateMetadata( + numReplicas: Int64, + numCoresPerReplica: Int64 = 1, + topology: String, + useTpu: Bool = true, + deviceAssignment: [Int32], + computationShape: [Int32], + hostComputeCore: [String], + paddingMap: [String], + stepMarkerLocation: String = "STEP_MARK_AT_ENTRY", + allowSoftPlacement: Bool = false + ) { + _RawTFEager.tPUReplicateMetadata( + numReplicas: numReplicas, numCoresPerReplica: numCoresPerReplica, topology: topology, + useTpu: useTpu, deviceAssignment: deviceAssignment, computationShape: computationShape, + hostComputeCore: hostComputeCore, paddingMap: paddingMap, + stepMarkerLocation: stepMarkerLocation, allowSoftPlacement: allowSoftPlacement) + } + + /// Connects N inputs to an N-way replicated TPU computation. + /// + /// This operation holds a replicated input to a `tpu.replicate()` computation subgraph. + /// Each replicated input has the same shape and type alongside the output. + /// + /// For example: + /// ``` + /// %a = "tf.opA"() + /// %b = "tf.opB"() + /// %replicated_input = "tf.TPUReplicatedInput"(%a, %b) + /// %computation = "tf.Computation"(%replicated_input) + /// ``` + /// The above computation has a replicated input of two replicas. + @inlinable @inline(__always) + public static func tPUReplicatedInput( + inputs: [Tensor], + isMirroredVariable: Bool = false, + index: Int64 = -1 + ) -> Tensor { + _RawTFEager.tPUReplicatedInput( + inputs: inputs, isMirroredVariable: isMirroredVariable, index: index) + } + + /// Connects N outputs from an N-way replicated TPU computation. + /// + /// This operation holds a replicated output from a `tpu.replicate()` computation subgraph. + /// Each replicated output has the same shape and type alongside the input. + /// + /// For example: + /// ``` + /// %computation = "tf.Computation"() + /// %replicated_output:2 = "tf.TPUReplicatedOutput"(%computation) + /// ``` + /// The above computation has a replicated output of two replicas. + @inlinable @inline(__always) + public static func tPUReplicatedOutput( + _ input: Tensor, + numReplicas: Int64 + ) -> [Tensor] { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return [Tensor]( + copying: _RawTFEager.tPUReplicatedOutput(input, numReplicas: numReplicas), + to: output_device) + case .TF_EAGER: + return _RawTFEager.tPUReplicatedOutput(input, numReplicas: numReplicas) + } + + } + + @inlinable @inline(__always) + public static func tRTEngineOp< + SegmentfuncIn: TensorGroup, + SegmentfuncOut: TensorGroup, + Intt: TensorArrayProtocol, + Outt: TensorGroup + >( + inTensor: Intt, + serializedSegment: String, + segmentFunc: (SegmentfuncIn) -> SegmentfuncOut, + maxCachedEnginesCount: Int64 = 1, + workspaceSizeBytes: Int64, + precisionMode: PrecisionMode, + calibrationData: String, + useCalibration: Bool = true, + segmentFuncdefName: String, + cachedEngineBatches: [Int32], + fixedInputSize: Bool = true, + inputShapes: [TensorShape?], + outputShapes: [TensorShape?], + staticEngine: Bool = true + ) -> Outt { + _RawTFEager.tRTEngineOp( + inTensor: inTensor, serializedSegment: serializedSegment, segmentFunc: segmentFunc, + maxCachedEnginesCount: maxCachedEnginesCount, workspaceSizeBytes: workspaceSizeBytes, + precisionMode: precisionMode, calibrationData: calibrationData, + useCalibration: useCalibration, segmentFuncdefName: segmentFuncdefName, + cachedEngineBatches: cachedEngineBatches, fixedInputSize: fixedInputSize, + inputShapes: inputShapes, outputShapes: outputShapes, staticEngine: staticEngine) + } + + /// Creates a dataset that contains `count` elements from the `input_dataset`. + /// + /// - Parameter count: A scalar representing the number of elements from the `input_dataset` + /// that should be taken. A value of `-1` indicates that all of `input_dataset` + /// is taken. + @inlinable @inline(__always) + public static func takeDataset( + inputDataset: VariantHandle, + count: Tensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.takeDataset( + inputDataset: inputDataset, count: count, outputTypes: outputTypes, + outputShapes: outputShapes) + } + + /// Read `SparseTensors` from a `SparseTensorsMap` and concatenate them. + /// + /// The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where + /// `N` is the minibatch size and the rows correspond to the output handles of + /// `AddSparseToTensorsMap` or `AddManySparseToTensorsMap`. The ranks of the + /// original `SparseTensor` objects that went into the given input ops must all + /// match. When the final `SparseTensor` is created, it has rank one + /// higher than the ranks of the incoming `SparseTensor` objects + /// (they have been concatenated along a new row dimension on the left). + /// + /// The output `SparseTensor` object's shape values for all dimensions but the + /// first are the max across the input `SparseTensor` objects' shape values + /// for the corresponding dimensions. Its first shape value is `N`, the minibatch + /// size. + /// + /// The input `SparseTensor` objects' indices are assumed ordered in + /// standard lexicographic order. If this is not the case, after this + /// step run `SparseReorder` to restore index ordering. + /// + /// For example, if the handles represent an input, which is a `[2, 3]` matrix + /// representing two original `SparseTensor` objects: + /// + /// ``` + /// index = [ 0] + /// [10] + /// [20] + /// values = [1, 2, 3] + /// shape = [50] + /// ``` + /// + /// and + /// + /// ``` + /// index = [ 2] + /// [10] + /// values = [4, 5] + /// shape = [30] + /// ``` + /// + /// then the final `SparseTensor` will be: + /// + /// ``` + /// index = [0 0] + /// [0 10] + /// [0 20] + /// [1 2] + /// [1 10] + /// values = [1, 2, 3, 4, 5] + /// shape = [2 50] + /// ``` + /// + /// - Parameter sparse_handles: 1-D, The `N` serialized `SparseTensor` objects. + /// Shape: `[N]`. + /// + /// - Attrs: + /// - dtype: The `dtype` of the `SparseTensor` objects stored in the + /// `SparseTensorsMap`. + /// - container: The container name for the `SparseTensorsMap` read by this op. + /// - shared_name: The shared name for the `SparseTensorsMap` read by this op. + /// It should not be blank; rather the `shared_name` or unique Operation name + /// of the Op that created the original `SparseTensorsMap` should be used. + /// + /// - Outputs: + /// - sparse_indices: 2-D. The `indices` of the minibatch `SparseTensor`. + /// - sparse_values: 1-D. The `values` of the minibatch `SparseTensor`. + /// - sparse_shape: 1-D. The `shape` of the minibatch `SparseTensor`. + @inlinable @inline(__always) + public static func takeManySparseFromTensorsMap( + sparseHandles: Tensor, + container: String, + sharedName: String + ) -> (sparseIndices: Tensor, sparseValues: Tensor, sparseShape: Tensor) { + _RawTFEager.takeManySparseFromTensorsMap( + sparseHandles: sparseHandles, container: container, sharedName: sharedName) + } + + /// Creates a dataset that stops iteration when predicate` is false. + /// + /// The `predicate` function must return a scalar boolean and accept the + /// following arguments: + /// + /// * One tensor for each component of an element of `input_dataset`. + /// * One tensor for each value in `other_arguments`. + /// + /// - Parameter other_arguments: A list of tensors, typically values that were captured when + /// building a closure for `predicate`. + /// + /// - Attr predicate: A function returning a scalar boolean. + @inlinable @inline(__always) + public static func takeWhileDataset< + PredicateIn: TensorGroup, + PredicateOut: TensorGroup, + Targuments: TensorArrayProtocol + >( + inputDataset: VariantHandle, + otherArguments: Targuments, + predicate: (PredicateIn) -> PredicateOut, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.takeWhileDataset( + inputDataset: inputDataset, otherArguments: otherArguments, predicate: predicate, + outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Computes tan of x element-wise. + /// + /// Given an input tensor, this function computes tangent of every + /// element in the tensor. Input range is `(-inf, inf)` and + /// output range is `(-inf, inf)`. If input lies outside the boundary, `nan` + /// is returned. + /// + /// ```python + /// x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")]) + /// tf.math.tan(x) ==> [nan 0.45231566 -0.5463025 1.5574077 2.572152 -1.7925274 0.32097113 nan] + /// ``` + @inlinable @inline(__always) + public static func tan( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.tan(x) + case .TF_EAGER: + return _RawTFEager.tan(x) + } + + } + + /// Computes hyperbolic tangent of `x` element-wise. + /// + /// Given an input tensor, this function computes hyperbolic tangent of every + /// element in the tensor. Input range is `[-inf, inf]` and + /// output range is `[-1,1]`. + /// + /// ```python + /// x = tf.constant([-float("inf"), -5, -0.5, 1, 1.2, 2, 3, float("inf")]) + /// tf.math.tanh(x) ==> [-1. -0.99990916 -0.46211717 0.7615942 0.8336547 0.9640276 0.9950547 1.] + /// ``` + @inlinable @inline(__always) + public static func tanh( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.tanh(x) + case .TF_EAGER: + return _RawTFEager.tanh(x) + } + + } + + /// Computes the gradient for the tanh of `x` wrt its input. + /// + /// Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy` + /// is the corresponding input gradient. + @inlinable @inline(__always) + public static func tanhGrad( + _ y: Tensor, + dy: Tensor + ) -> Tensor { + switch commonBackend(y.handle.backend, dy.handle.backend) { + case .XLA: + let output_device = dy.device + let y = Tensor(copying: y, to: .defaultTFEager) + let dy = Tensor(copying: dy, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.tanhGrad(y, dy: dy), to: output_device) + case .TF_EAGER: + return _RawTFEager.tanhGrad(y, dy: dy) + } + + } + + /// Deprecated. Use TensorArrayCloseV3 + @inlinable @inline(__always) + public static func tensorArrayCloseV2( + handle: StringTensor + ) { + _RawTFEager.tensorArrayCloseV2(handle: handle) + } + + /// Delete the TensorArray from its resource container. + /// + /// This enables the user to close and release the resource in the middle + /// of a step/run. + /// + /// - Parameter handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad). + @inlinable @inline(__always) + public static func tensorArrayCloseV3( + handle: ResourceHandle + ) { + _RawTFEager.tensorArrayCloseV3(handle: handle) + } + + /// Deprecated. Use TensorArrayConcatV3 + @inlinable @inline(__always) + public static func tensorArrayConcatV2( + handle: StringTensor, + flowIn: Tensor, + elementShapeExcept0: TensorShape? + ) -> (value: Tensor, lengths: Tensor) { + _RawTFEager.tensorArrayConcatV2( + handle: handle, flowIn: flowIn, elementShapeExcept0: elementShapeExcept0) + } + + /// Concat the elements from the TensorArray into value `value`. + /// + /// Takes `T` elements of shapes + /// + /// ``` + /// (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...) + /// ``` + /// + /// and concatenates them into a Tensor of shape: + /// + /// ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)``` + /// + /// All elements must have the same shape (excepting the first dimension). + /// + /// - Parameters: + /// - handle: The handle to a TensorArray. + /// - flow_in: A float scalar that enforces proper chaining of operations. + /// + /// - Attrs: + /// - dtype: The type of the elem that is returned. + /// - element_shape_except0: The expected shape of an element, if known, + /// excluding the first dimension. Used to validate the shapes of + /// TensorArray elements. If this shape is not fully specified, concatenating + /// zero-size TensorArrays is an error. + /// + /// - Outputs: + /// - value: All of the elements in the TensorArray, concatenated along the first + /// axis. + /// - lengths: A vector of the row sizes of the original T elements in the + /// value output. In the example above, this would be the values: + /// `(n1, n2, ..., n(T-1))`. + @inlinable @inline(__always) + public static func tensorArrayConcatV3( + handle: ResourceHandle, + flowIn: Tensor, + elementShapeExcept0: TensorShape? + ) -> (value: Tensor, lengths: Tensor) { + _RawTFEager.tensorArrayConcatV3( + handle: handle, flowIn: flowIn, elementShapeExcept0: elementShapeExcept0) + } + + /// Deprecated. Use TensorArrayGatherV3 + @inlinable @inline(__always) + public static func tensorArrayGatherV2( + handle: StringTensor, + indices: Tensor, + flowIn: Tensor, + elementShape: TensorShape? + ) -> Tensor { + switch commonBackend(indices.handle.backend, flowIn.handle.backend) { + case .XLA: + let output_device = flowIn.device + let indices = Tensor(copying: indices, to: .defaultTFEager) + let flowIn = Tensor(copying: flowIn, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.tensorArrayGatherV2( + handle: handle, indices: indices, flowIn: flowIn, elementShape: elementShape), + to: output_device) + case .TF_EAGER: + return _RawTFEager.tensorArrayGatherV2( + handle: handle, indices: indices, flowIn: flowIn, elementShape: elementShape) + } + + } + + /// Gather specific elements from the TensorArray into output `value`. + /// + /// All elements selected by `indices` must have the same shape. + /// + /// - Parameters: + /// - handle: The handle to a TensorArray. + /// - indices: The locations in the TensorArray from which to read tensor elements. + /// - flow_in: A float scalar that enforces proper chaining of operations. + /// + /// - Attrs: + /// - dtype: The type of the elem that is returned. + /// - element_shape: The expected shape of an element, if known. Used to + /// validate the shapes of TensorArray elements. If this shape is not + /// fully specified, gathering zero-size TensorArrays is an error. + /// + /// - Output value: All of the elements in the TensorArray, concatenated along a new + /// axis (the new dimension 0). + @inlinable @inline(__always) + public static func tensorArrayGatherV3( + handle: ResourceHandle, + indices: Tensor, + flowIn: Tensor, + elementShape: TensorShape? + ) -> Tensor { + switch commonBackend(indices.handle.backend, flowIn.handle.backend) { + case .XLA: + let output_device = flowIn.device + let indices = Tensor(copying: indices, to: .defaultTFEager) + let flowIn = Tensor(copying: flowIn, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.tensorArrayGatherV3( + handle: handle, indices: indices, flowIn: flowIn, elementShape: elementShape), + to: output_device) + case .TF_EAGER: + return _RawTFEager.tensorArrayGatherV3( + handle: handle, indices: indices, flowIn: flowIn, elementShape: elementShape) + } + + } + + /// Deprecated. Use TensorArrayGradV3 + @inlinable @inline(__always) + public static func tensorArrayGradV2( + handle: StringTensor, + flowIn: Tensor, + source: String + ) -> StringTensor { + _RawTFEager.tensorArrayGradV2(handle: handle, flowIn: flowIn, source: source) + } + + /// Creates a TensorArray for storing the gradients of values in the given handle. + /// + /// If the given TensorArray gradient already exists, returns a reference to it. + /// + /// Locks the size of the original TensorArray by disabling its dynamic size flag. + /// + /// **A note about the input flow_in:** + /// + /// The handle flow_in forces the execution of the gradient lookup to occur + /// only after certain other operations have occurred. For example, when + /// the forward TensorArray is dynamically sized, writes to this TensorArray + /// may resize the object. The gradient TensorArray is statically sized based + /// on the size of the forward TensorArray when this operation executes. + /// Furthermore, the size of the forward TensorArray is frozen by this call. + /// As a result, the flow is used to ensure that the call to generate the gradient + /// TensorArray only happens after all writes are executed. + /// + /// In the case of dynamically sized TensorArrays, gradient computation should + /// only be performed on read operations that have themselves been chained via + /// flow to occur only after all writes have executed. That way the final size + /// of the forward TensorArray is known when this operation is called. + /// + /// **A note about the source attribute:** + /// + /// TensorArray gradient calls use an accumulator TensorArray object. If + /// multiple gradients are calculated and run in the same session, the multiple + /// gradient nodes may accidentally flow through the same accumulator TensorArray. + /// This double counts and generally breaks the TensorArray gradient flow. + /// + /// The solution is to identify which gradient call this particular + /// TensorArray gradient is being called in. This is performed by identifying + /// a unique string (e.g. "gradients", "gradients_1", ...) from the input + /// gradient Tensor's name. This string is used as a suffix when creating + /// the TensorArray gradient object here (the attribute `source`). + /// + /// The attribute `source` is added as a suffix to the forward TensorArray's + /// name when performing the creation / lookup, so that each separate gradient + /// calculation gets its own TensorArray accumulator. + /// + /// - Parameters: + /// - handle: The handle to the forward TensorArray. + /// - flow_in: A float scalar that enforces proper chaining of operations. + /// + /// - Attr source: The gradient source string, used to decide which gradient TensorArray + /// to return. + @inlinable @inline(__always) + public static func tensorArrayGradV3( + handle: ResourceHandle, + flowIn: Tensor, + source: String + ) -> (gradHandle: ResourceHandle, flowOut: Tensor) { + _RawTFEager.tensorArrayGradV3(handle: handle, flowIn: flowIn, source: source) + } + + /// Creates a TensorArray for storing multiple gradients of values in the given handle. + /// + /// Similar to TensorArrayGradV3. However it creates an accumulator with an + /// expanded shape compared to the input TensorArray whose gradient is being + /// computed. This enables multiple gradients for the same TensorArray to be + /// calculated using the same accumulator. + /// + /// - Parameters: + /// - handle: The handle to the forward TensorArray. + /// - flow_in: A float scalar that enforces proper chaining of operations. + /// - shape_to_prepend: An int32 vector representing a shape. Elements in the gradient accumulator will + /// have shape which is this shape_to_prepend value concatenated with shape of the + /// elements in the TensorArray corresponding to the input handle. + /// + /// - Attr source: The gradient source string, used to decide which gradient TensorArray + /// to return. + @inlinable @inline(__always) + public static func tensorArrayGradWithShape( + handle: ResourceHandle, + flowIn: Tensor, + shapeToPrepend: Tensor, + source: String + ) -> (gradHandle: ResourceHandle, flowOut: Tensor) { + _RawTFEager.tensorArrayGradWithShape( + handle: handle, flowIn: flowIn, shapeToPrepend: shapeToPrepend, source: source) + } + + /// Deprecated. Use TensorArrayReadV3 + @inlinable @inline(__always) + public static func tensorArrayReadV2( + handle: StringTensor, + index: Tensor, + flowIn: Tensor + ) -> Tensor { + switch commonBackend(index.handle.backend, flowIn.handle.backend) { + case .XLA: + let output_device = flowIn.device + let index = Tensor(copying: index, to: .defaultTFEager) + let flowIn = Tensor(copying: flowIn, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.tensorArrayReadV2(handle: handle, index: index, flowIn: flowIn), + to: output_device) + case .TF_EAGER: + return _RawTFEager.tensorArrayReadV2(handle: handle, index: index, flowIn: flowIn) + } + + } + + /// Read an element from the TensorArray into output `value`. + /// + /// - Parameters: + /// - handle: The handle to a TensorArray. + /// - flow_in: A float scalar that enforces proper chaining of operations. + /// + /// - Attr dtype: The type of the elem that is returned. + /// + /// - Output value: The tensor that is read from the TensorArray. + @inlinable @inline(__always) + public static func tensorArrayReadV3( + handle: ResourceHandle, + index: Tensor, + flowIn: Tensor + ) -> Tensor { + switch commonBackend(index.handle.backend, flowIn.handle.backend) { + case .XLA: + let output_device = flowIn.device + let index = Tensor(copying: index, to: .defaultTFEager) + let flowIn = Tensor(copying: flowIn, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.tensorArrayReadV3(handle: handle, index: index, flowIn: flowIn), + to: output_device) + case .TF_EAGER: + return _RawTFEager.tensorArrayReadV3(handle: handle, index: index, flowIn: flowIn) + } + + } + + /// Deprecated. Use TensorArrayScatterV3 + @inlinable @inline(__always) + public static func tensorArrayScatterV2( + handle: StringTensor, + indices: Tensor, + value: Tensor, + flowIn: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(indices.handle.backend, value.handle.backend), flowIn.handle.backend) + { + case .XLA: + let output_device = flowIn.device + let indices = Tensor(copying: indices, to: .defaultTFEager) + let value = Tensor(copying: value, to: .defaultTFEager) + let flowIn = Tensor(copying: flowIn, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.tensorArrayScatterV2( + handle: handle, indices: indices, value: value, flowIn: flowIn), to: output_device) + case .TF_EAGER: + return _RawTFEager.tensorArrayScatterV2( + handle: handle, indices: indices, value: value, flowIn: flowIn) + } + + } + + /// Scatter the data from the input value into specific TensorArray elements. + /// + /// `indices` must be a vector, its length must match the first dim of `value`. + /// + /// - Parameters: + /// - handle: The handle to a TensorArray. + /// - indices: The locations at which to write the tensor elements. + /// - value: The concatenated tensor to write to the TensorArray. + /// - flow_in: A float scalar that enforces proper chaining of operations. + /// + /// - Output flow_out: A float scalar that enforces proper chaining of operations. + @inlinable @inline(__always) + public static func tensorArrayScatterV3( + handle: ResourceHandle, + indices: Tensor, + value: Tensor, + flowIn: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(indices.handle.backend, value.handle.backend), flowIn.handle.backend) + { + case .XLA: + let output_device = flowIn.device + let indices = Tensor(copying: indices, to: .defaultTFEager) + let value = Tensor(copying: value, to: .defaultTFEager) + let flowIn = Tensor(copying: flowIn, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.tensorArrayScatterV3( + handle: handle, indices: indices, value: value, flowIn: flowIn), to: output_device) + case .TF_EAGER: + return _RawTFEager.tensorArrayScatterV3( + handle: handle, indices: indices, value: value, flowIn: flowIn) + } + + } + + /// Deprecated. Use TensorArraySizeV3 + @inlinable @inline(__always) + public static func tensorArraySizeV2( + handle: StringTensor, + flowIn: Tensor + ) -> Tensor { + switch flowIn.handle.backend { + case .XLA: + let output_device = flowIn.device + let flowIn = Tensor(copying: flowIn, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.tensorArraySizeV2(handle: handle, flowIn: flowIn), to: output_device) + case .TF_EAGER: + return _RawTFEager.tensorArraySizeV2(handle: handle, flowIn: flowIn) + } + + } + + /// Get the current size of the TensorArray. + /// + /// - Parameters: + /// - handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad). + /// - flow_in: A float scalar that enforces proper chaining of operations. + /// + /// - Output size: The current size of the TensorArray. + @inlinable @inline(__always) + public static func tensorArraySizeV3( + handle: ResourceHandle, + flowIn: Tensor + ) -> Tensor { + switch flowIn.handle.backend { + case .XLA: + let output_device = flowIn.device + let flowIn = Tensor(copying: flowIn, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.tensorArraySizeV3(handle: handle, flowIn: flowIn), to: output_device) + case .TF_EAGER: + return _RawTFEager.tensorArraySizeV3(handle: handle, flowIn: flowIn) + } + + } + + /// Deprecated. Use TensorArraySplitV3 + @inlinable @inline(__always) + public static func tensorArraySplitV2( + handle: StringTensor, + value: Tensor, + lengths: Tensor, + flowIn: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(value.handle.backend, lengths.handle.backend), flowIn.handle.backend) + { + case .XLA: + let output_device = flowIn.device + let value = Tensor(copying: value, to: .defaultTFEager) + let lengths = Tensor(copying: lengths, to: .defaultTFEager) + let flowIn = Tensor(copying: flowIn, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.tensorArraySplitV2( + handle: handle, value: value, lengths: lengths, flowIn: flowIn), to: output_device) + case .TF_EAGER: + return _RawTFEager.tensorArraySplitV2( + handle: handle, value: value, lengths: lengths, flowIn: flowIn) + } + + } + + /// Split the data from the input value into TensorArray elements. + /// + /// Assuming that `lengths` takes on values + /// + /// ```(n0, n1, ..., n(T-1))``` + /// + /// and that `value` has shape + /// + /// ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```, + /// + /// this splits values into a TensorArray with T tensors. + /// + /// TensorArray index t will be the subtensor of values with starting position + /// + /// ```(n0 + n1 + ... + n(t-1), 0, 0, ...)``` + /// + /// and having size + /// + /// ```nt x d0 x d1 x ...``` + /// + /// - Parameters: + /// - handle: The handle to a TensorArray. + /// - value: The concatenated tensor to write to the TensorArray. + /// - lengths: The vector of lengths, how to split the rows of value into the + /// TensorArray. + /// - flow_in: A float scalar that enforces proper chaining of operations. + /// + /// - Output flow_out: A float scalar that enforces proper chaining of operations. + @inlinable @inline(__always) + public static func tensorArraySplitV3( + handle: ResourceHandle, + value: Tensor, + lengths: Tensor, + flowIn: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(value.handle.backend, lengths.handle.backend), flowIn.handle.backend) + { + case .XLA: + let output_device = flowIn.device + let value = Tensor(copying: value, to: .defaultTFEager) + let lengths = Tensor(copying: lengths, to: .defaultTFEager) + let flowIn = Tensor(copying: flowIn, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.tensorArraySplitV3( + handle: handle, value: value, lengths: lengths, flowIn: flowIn), to: output_device) + case .TF_EAGER: + return _RawTFEager.tensorArraySplitV3( + handle: handle, value: value, lengths: lengths, flowIn: flowIn) + } + + } + + /// Deprecated. Use TensorArrayV3 + @inlinable @inline(__always) + public static func tensorArrayV2( + size: Tensor, + dtype: TensorDataType, + elementShape: TensorShape?, + dynamicSize: Bool = false, + clearAfterRead: Bool = true, + tensorArrayName: String + ) -> StringTensor { + _RawTFEager.tensorArrayV2( + size: size, dtype: dtype, elementShape: elementShape, dynamicSize: dynamicSize, + clearAfterRead: clearAfterRead, tensorArrayName: tensorArrayName) + } + + /// An array of Tensors of given size. + /// + /// Write data via Write and read via Read or Pack. + /// + /// - Parameter size: The size of the array. + /// + /// - Attrs: + /// - dtype: The type of the elements on the tensor_array. + /// - element_shape: The expected shape of an element, if known. Used to + /// validate the shapes of TensorArray elements. If this shape is not + /// fully specified, gathering zero-size TensorArrays is an error. + /// - dynamic_size: A boolean that determines whether writes to the TensorArray + /// are allowed to grow the size. By default, this is not allowed. + /// - clear_after_read: If true (default), Tensors in the TensorArray are cleared + /// after being read. This disables multiple read semantics but allows early + /// release of memory. + /// - identical_element_shapes: If true (default is false), then all + /// elements in the TensorArray will be expected to have have identical shapes. + /// This allows certain behaviors, like dynamically checking for + /// consistent shapes on write, and being able to fill in properly + /// shaped zero tensors on stack -- even if the element_shape attribute + /// is not fully defined. + /// - tensor_array_name: Overrides the name used for the temporary tensor_array + /// resource. Default value is the name of the 'TensorArray' op (which + /// is guaranteed unique). + /// + /// - Outputs: + /// - handle: The handle to the TensorArray. + /// - flow: A scalar used to control gradient flow. + @inlinable @inline(__always) + public static func tensorArrayV3( + size: Tensor, + dtype: TensorDataType, + elementShape: TensorShape?, + dynamicSize: Bool = false, + clearAfterRead: Bool = true, + identicalElementShapes: Bool = false, + tensorArrayName: String + ) -> (handle: ResourceHandle, flow: Tensor) { + _RawTFEager.tensorArrayV3( + size: size, dtype: dtype, elementShape: elementShape, dynamicSize: dynamicSize, + clearAfterRead: clearAfterRead, identicalElementShapes: identicalElementShapes, + tensorArrayName: tensorArrayName) + } + + /// Deprecated. Use TensorArrayGradV3 + @inlinable @inline(__always) + public static func tensorArrayWriteV2( + handle: StringTensor, + index: Tensor, + value: Tensor, + flowIn: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(index.handle.backend, value.handle.backend), flowIn.handle.backend) + { + case .XLA: + let output_device = flowIn.device + let index = Tensor(copying: index, to: .defaultTFEager) + let value = Tensor(copying: value, to: .defaultTFEager) + let flowIn = Tensor(copying: flowIn, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.tensorArrayWriteV2( + handle: handle, index: index, value: value, flowIn: flowIn), to: output_device) + case .TF_EAGER: + return _RawTFEager.tensorArrayWriteV2( + handle: handle, index: index, value: value, flowIn: flowIn) + } + + } + + /// Push an element onto the tensor_array. + /// + /// - Parameters: + /// - handle: The handle to a TensorArray. + /// - index: The position to write to inside the TensorArray. + /// - value: The tensor to write to the TensorArray. + /// - flow_in: A float scalar that enforces proper chaining of operations. + /// + /// - Output flow_out: A float scalar that enforces proper chaining of operations. + @inlinable @inline(__always) + public static func tensorArrayWriteV3( + handle: ResourceHandle, + index: Tensor, + value: Tensor, + flowIn: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(index.handle.backend, value.handle.backend), flowIn.handle.backend) + { + case .XLA: + let output_device = flowIn.device + let index = Tensor(copying: index, to: .defaultTFEager) + let value = Tensor(copying: value, to: .defaultTFEager) + let flowIn = Tensor(copying: flowIn, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.tensorArrayWriteV3( + handle: handle, index: index, value: value, flowIn: flowIn), to: output_device) + case .TF_EAGER: + return _RawTFEager.tensorArrayWriteV3( + handle: handle, index: index, value: value, flowIn: flowIn) + } + + } + + /// Creates a dataset that emits `components` as a tuple of tensors once. + @inlinable @inline(__always) + public static func tensorDataset( + components: ToutputTypes, + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.tensorDataset(components: components, outputShapes: outputShapes) + } + + /// Creates a tree resource and returns a handle to it. + /// + /// - Parameters: + /// - tree_handle: Handle to the tree resource to be created. + /// - tree_config: Serialized proto string of the boosted_trees.Tree. + @inlinable @inline(__always) + public static func tensorForestCreateTreeVariable( + treeHandle: ResourceHandle, + treeConfig: StringTensor + ) { + _RawTFEager.tensorForestCreateTreeVariable(treeHandle: treeHandle, treeConfig: treeConfig) + } + + /// Deserializes a proto into the tree handle + /// + /// - Parameters: + /// - tree_handle: Handle to the tree resource to be restored. + /// - tree_config: Serialied proto string of the boosted_trees.Tree proto. + @inlinable @inline(__always) + public static func tensorForestTreeDeserialize( + treeHandle: ResourceHandle, + treeConfig: StringTensor + ) { + _RawTFEager.tensorForestTreeDeserialize(treeHandle: treeHandle, treeConfig: treeConfig) + } + + /// Checks whether a tree has been initialized. + /// + /// - Parameter tree_handle: Handle to the tree. + /// + /// - Output is_initialized: Whether the tree is initialized. + @inlinable @inline(__always) + public static func tensorForestTreeIsInitializedOp( + treeHandle: ResourceHandle + ) -> Tensor { + _RawTFEager.tensorForestTreeIsInitializedOp(treeHandle: treeHandle) + } + + /// Output the logits for the given input data + /// + /// - Parameters: + /// - tree_handle: Handle to the tree resource. + /// - dense_features: Rank 2 dense features tensor. + /// + /// - Attr logits_dimension: Scalar, dimension of the logits. + /// + /// - Output logits: The logits predictions from the tree for each instance in the batch. + @inlinable @inline(__always) + public static func tensorForestTreePredict( + treeHandle: ResourceHandle, + denseFeatures: Tensor, + logitsDimension: Int64 + ) -> Tensor { + switch denseFeatures.handle.backend { + case .XLA: + let output_device = denseFeatures.device + let denseFeatures = Tensor(copying: denseFeatures, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.tensorForestTreePredict( + treeHandle: treeHandle, denseFeatures: denseFeatures, logitsDimension: logitsDimension), + to: output_device) + case .TF_EAGER: + return _RawTFEager.tensorForestTreePredict( + treeHandle: treeHandle, denseFeatures: denseFeatures, logitsDimension: logitsDimension) + } + + } + + /// Creates a handle to a TensorForestTreeResource + @inlinable @inline(__always) + public static func tensorForestTreeResourceHandleOp( + container: String, + sharedName: String + ) -> ResourceHandle { + _RawTFEager.tensorForestTreeResourceHandleOp(container: container, sharedName: sharedName) + } + + /// Serializes the tree handle to a proto + /// + /// - Parameter tree_handle: Handle to the tree resource to be serialized. + /// + /// - Output tree_config: Serialied proto string of the tree resource. + @inlinable @inline(__always) + public static func tensorForestTreeSerialize( + treeHandle: ResourceHandle + ) -> StringTensor { + _RawTFEager.tensorForestTreeSerialize(treeHandle: treeHandle) + } + + /// Get the number of nodes in a tree + /// + /// - Parameter tree_handle: Handle to the tree resource. + /// + /// - Output tree_size: The size of the tree. + @inlinable @inline(__always) + public static func tensorForestTreeSize( + treeHandle: ResourceHandle + ) -> Tensor { + _RawTFEager.tensorForestTreeSize(treeHandle: treeHandle) + } + + /// Concats all tensors in the list along the 0th dimension. + /// + /// Requires that all tensors have the same shape except the first dimension. + /// + /// input_handle: The input list. + /// tensor: The concated result. + /// lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient. + /// + @inlinable @inline(__always) + public static func tensorListConcat( + inputHandle: VariantHandle, + elementShape: TensorShape? + ) -> (tensor: Tensor, lengths: Tensor) { + _RawTFEager.tensorListConcat(inputHandle: inputHandle, elementShape: elementShape) + } + + @inlinable @inline(__always) + public static func tensorListConcatLists( + inputA: VariantHandle, + inputB: VariantHandle, + elementDtype: TensorDataType + ) -> VariantHandle { + _RawTFEager.tensorListConcatLists(inputA: inputA, inputB: inputB, elementDtype: elementDtype) + } + + /// Concats all tensors in the list along the 0th dimension. + /// + /// Requires that all tensors have the same shape except the first dimension. + /// + /// input_handle: The input list. + /// element_shape: The shape of the uninitialized elements in the list. If the first + /// dimension is not -1, it is assumed that all list elements have the same + /// leading dim. + /// leading_dims: The list of leading dims of uninitialized list elements. Used if + /// the leading dim of input_handle.element_shape or the element_shape input arg + /// is not already set. + /// tensor: The concated result. + /// lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient. + /// + @inlinable @inline(__always) + public static func tensorListConcatV2< + ElementDtype: TensorFlowScalar, + ShapeType: TensorFlowIndex + >( + inputHandle: VariantHandle, + elementShape: Tensor, + leadingDims: Tensor + ) -> (tensor: Tensor, lengths: Tensor) { + _RawTFEager.tensorListConcatV2( + inputHandle: inputHandle, elementShape: elementShape, leadingDims: leadingDims) + } + + /// The shape of the elements of the given list, as a tensor. + /// + /// input_handle: the list + /// element_shape: the shape of elements of the list + @inlinable @inline(__always) + public static func tensorListElementShape( + inputHandle: VariantHandle + ) -> Tensor { + _RawTFEager.tensorListElementShape(inputHandle: inputHandle) + } + + /// Creates a TensorList which, when stacked, has the value of `tensor`. + /// + /// Each tensor in the result list corresponds to one row of the input tensor. + /// + /// tensor: The input tensor. + /// output_handle: The list. + @inlinable @inline(__always) + public static func tensorListFromTensor< + ElementDtype: TensorFlowScalar, + ShapeType: TensorFlowIndex + >( + _ tensor: Tensor, + elementShape: Tensor + ) -> VariantHandle { + _RawTFEager.tensorListFromTensor(tensor, elementShape: elementShape) + } + + /// Creates a Tensor by indexing into the TensorList. + /// + /// Each row in the produced Tensor corresponds to the element in the TensorList + /// specified by the given index (see `tf.gather`). + /// + /// input_handle: The input tensor list. + /// indices: The indices used to index into the list. + /// values: The tensor. + @inlinable @inline(__always) + public static func tensorListGather( + inputHandle: VariantHandle, + indices: Tensor, + elementShape: Tensor + ) -> Tensor { + switch commonBackend(indices.handle.backend, elementShape.handle.backend) { + case .XLA: + let output_device = elementShape.device + let indices = Tensor(copying: indices, to: .defaultTFEager) + let elementShape = Tensor(copying: elementShape, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.tensorListGather( + inputHandle: inputHandle, indices: indices, elementShape: elementShape), + to: output_device) + case .TF_EAGER: + return _RawTFEager.tensorListGather( + inputHandle: inputHandle, indices: indices, elementShape: elementShape) } - /// Op returns the number of elements in the underlying container. - @inlinable @inline(__always) - public static func mapSize( - capacity: Int64 = 0, - memoryLimit: Int64 = 0, - dtypes: [TensorDataType], - container: String, - sharedName: String - ) -> Tensor { - _RawTFEager.mapSize( - capacity: capacity, memoryLimit: memoryLimit, dtypes: dtypes, container: container, - sharedName: sharedName) - } - - /// Stage (key, values) in the underlying container which behaves like a hashtable. - /// - /// - Parameters: - /// - key: int64 - /// - values: a list of tensors - /// dtypes A list of data types that inserted values should adhere to. - /// - /// - Attrs: - /// - capacity: Maximum number of elements in the Staging Area. If > 0, inserts - /// on the container will block when the capacity is reached. - /// - container: If non-empty, this queue is placed in the given container. Otherwise, - /// a default container is used. - /// - shared_name: It is necessary to match this name to the matching Unstage Op. - @inlinable @inline(__always) - public static func mapStage( - key: Tensor, - indices: Tensor, - _ values: FakeDtypes, - capacity: Int64 = 0, - memoryLimit: Int64 = 0, - dtypes: [TensorDataType], - container: String, - sharedName: String - ) { - _RawTFEager.mapStage( - key: key, indices: indices, values, capacity: capacity, memoryLimit: memoryLimit, - dtypes: dtypes, container: container, sharedName: sharedName) - } - - /// Op removes and returns the values associated with the key - /// - /// from the underlying container. If the underlying container - /// does not contain this key, the op will block until it does. - @inlinable @inline(__always) - public static func mapUnstage( - key: Tensor, - indices: Tensor, - capacity: Int64 = 0, - memoryLimit: Int64 = 0, - container: String, - sharedName: String - ) -> Dtypes { - _RawTFEager.mapUnstage( - key: key, indices: indices, capacity: capacity, memoryLimit: memoryLimit, - container: container, sharedName: sharedName) + } + + @inlinable @inline(__always) + public static func tensorListGetItem( + inputHandle: VariantHandle, + index: Tensor, + elementShape: Tensor + ) -> Tensor { + switch commonBackend(index.handle.backend, elementShape.handle.backend) { + case .XLA: + let output_device = elementShape.device + let index = Tensor(copying: index, to: .defaultTFEager) + let elementShape = Tensor(copying: elementShape, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.tensorListGetItem( + inputHandle: inputHandle, index: index, elementShape: elementShape), to: output_device) + case .TF_EAGER: + return _RawTFEager.tensorListGetItem( + inputHandle: inputHandle, index: index, elementShape: elementShape) } - /// Op removes and returns a random (key, value) - /// - /// from the underlying container. If the underlying container - /// does not contain elements, the op will block until it does. - @inlinable @inline(__always) - public static func mapUnstageNoKey( - indices: Tensor, - capacity: Int64 = 0, - memoryLimit: Int64 = 0, - container: String, - sharedName: String - ) -> (key: Tensor, values: Dtypes) { - _RawTFEager.mapUnstageNoKey( - indices: indices, capacity: capacity, memoryLimit: memoryLimit, container: container, - sharedName: sharedName) - } - - /// Multiply the matrix "a" by the matrix "b". - /// - /// The inputs must be two-dimensional matrices and the inner dimension of - /// "a" (after being transposed if transpose_a is true) must match the - /// outer dimension of "b" (after being transposed if transposed_b is - /// true). - /// - /// *Note*: The default kernel implementation for MatMul on GPUs uses - /// cublas. - /// - /// - Attrs: - /// - transpose_a: If true, "a" is transposed before multiplication. - /// - transpose_b: If true, "b" is transposed before multiplication. - @inlinable @inline(__always) - public static func matMul( - _ a: Tensor, - _ b: Tensor, - transposeA: Bool = false, - transposeB: Bool = false - ) -> Tensor { - switch commonBackend(a.handle.backend, b.handle.backend) { - case .XLA: - return _RawXLA.matMul(a, b, transposeA: transposeA, transposeB: transposeB) - case .TF_EAGER: - return _RawTFEager.matMul(a, b, transposeA: transposeA, transposeB: transposeB) - } - - } - - /// Returns the set of files matching one or more glob patterns. - /// - /// Note that this routine only supports wildcard characters in the - /// basename portion of the pattern, not in the directory portion. - /// Note also that the order of filenames returned is deterministic. - /// - /// - Parameter pattern: Shell wildcard pattern(s). Scalar or vector of type string. - /// - /// - Output filenames: A vector of matching filenames. - @inlinable @inline(__always) - public static func matchingFiles( - pattern: StringTensor - ) -> StringTensor { - _RawTFEager.matchingFiles(pattern: pattern) - } - - @inlinable @inline(__always) - public static func matchingFilesDataset( - patterns: StringTensor - ) -> VariantHandle { - _RawTFEager.matchingFilesDataset(patterns: patterns) - } - - /// Copy a tensor setting everything outside a central band in each innermost matrix - /// - /// to zero. - /// - /// The `band` part is computed as follows: - /// Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a - /// tensor with the same shape where - /// - /// `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`. - /// - /// The indicator function - /// - /// `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) && - /// (num_upper < 0 || (n-m) <= num_upper)`. - /// - /// For example: - /// - /// ``` - /// # if 'input' is [[ 0, 1, 2, 3] - /// [-1, 0, 1, 2] - /// [-2, -1, 0, 1] - /// [-3, -2, -1, 0]], - /// - /// tf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3] - /// [-1, 0, 1, 2] - /// [ 0, -1, 0, 1] - /// [ 0, 0, -1, 0]], - /// - /// tf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0] - /// [-1, 0, 1, 0] - /// [-2, -1, 0, 1] - /// [ 0, -2, -1, 0]] - /// ``` - /// - /// Useful special cases: - /// - /// ``` - /// tf.matrix_band_part(input, 0, -1) ==> Upper triangular part. - /// tf.matrix_band_part(input, -1, 0) ==> Lower triangular part. - /// tf.matrix_band_part(input, 0, 0) ==> Diagonal. - /// ``` - /// - /// - Parameters: - /// - input: Rank `k` tensor. - /// - num_lower: 0-D tensor. Number of subdiagonals to keep. If negative, keep entire - /// lower triangle. - /// - num_upper: 0-D tensor. Number of superdiagonals to keep. If negative, keep - /// entire upper triangle. - /// - /// - Output band: Rank `k` tensor of the same shape as input. The extracted banded tensor. - @inlinable @inline(__always) - public static func matrixBandPart< - T: TensorFlowScalar, - Tindex: TensorFlowIndex - >( - _ input: Tensor, - numLower: Tensor, - numUpper: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(input.handle.backend, numLower.handle.backend), numUpper.handle.backend) - { - case .XLA: - let output_device = numUpper.device - let input = Tensor(copying: input, to: .defaultTFEager) - let numLower = Tensor(copying: numLower, to: .defaultTFEager) - let numUpper = Tensor(copying: numUpper, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.matrixBandPart(input, numLower: numLower, numUpper: numUpper), - to: output_device) - case .TF_EAGER: - return _RawTFEager.matrixBandPart(input, numLower: numLower, numUpper: numUpper) - } - - } - - /// Computes the determinant of one or more square matrices. - /// - /// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions - /// form square matrices. The output is a tensor containing the determinants - /// for all input submatrices `[..., :, :]`. - /// - /// - Parameter input: Shape is `[..., M, M]`. - /// - /// - Output output: Shape is `[...]`. - @inlinable @inline(__always) - public static func matrixDeterminant( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.matrixDeterminant(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.matrixDeterminant(input) - } - - } - - /// Returns a batched diagonal tensor with a given batched diagonal values. - /// - /// Given a `diagonal`, this operation returns a tensor with the `diagonal` and - /// everything else padded with zeros. The diagonal is computed as follows: - /// - /// Assume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a - /// tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where: - /// - /// `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`. - /// - /// For example: - /// - /// ``` - /// # 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]] - /// - /// and diagonal.shape = (2, 4) - /// - /// tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0] - /// [0, 2, 0, 0] - /// [0, 0, 3, 0] - /// [0, 0, 0, 4]], - /// [[5, 0, 0, 0] - /// [0, 6, 0, 0] - /// [0, 0, 7, 0] - /// [0, 0, 0, 8]]] - /// - /// which has shape (2, 4, 4) - /// ``` - /// - /// - Parameter diagonal: Rank `k`, where `k >= 1`. - /// - /// - Output output: Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`. - @inlinable @inline(__always) - public static func matrixDiag( - diagonal: Tensor - ) -> Tensor { - switch diagonal.handle.backend { - case .XLA: - let output_device = diagonal.device - let diagonal = Tensor(copying: diagonal, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.matrixDiag(diagonal: diagonal), to: output_device) - case .TF_EAGER: - return _RawTFEager.matrixDiag(diagonal: diagonal) - } - - } - - /// Returns the batched diagonal part of a batched tensor. - /// - /// This operation returns a tensor with the `diagonal` part - /// of the batched `input`. The `diagonal` part is computed as follows: - /// - /// Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a - /// tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where: - /// - /// `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`. - /// - /// The input must be at least a matrix. - /// - /// For example: - /// - /// ``` - /// # 'input' is [[[1, 0, 0, 0] - /// [0, 2, 0, 0] - /// [0, 0, 3, 0] - /// [0, 0, 0, 4]], - /// [[5, 0, 0, 0] - /// [0, 6, 0, 0] - /// [0, 0, 7, 0] - /// [0, 0, 0, 8]]] - /// - /// and input.shape = (2, 4, 4) - /// - /// tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]] - /// - /// which has shape (2, 4) - /// ``` - /// - /// - Parameter input: Rank `k` tensor where `k >= 2`. - /// - /// - Output diagonal: The extracted diagonal(s) having shape - /// `diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`. - @inlinable @inline(__always) - public static func matrixDiagPart( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.matrixDiagPart(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.matrixDiagPart(input) - } - - } - - /// Returns the batched diagonal part of a batched tensor. - /// - /// Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched - /// `input`. - /// - /// Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`. - /// Let `max_diag_len` be the maximum length among all diagonals to be extracted, - /// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` - /// Let `num_diags` be the number of diagonals to extract, - /// `num_diags = k[1] - k[0] + 1`. - /// - /// If `num_diags == 1`, the output tensor is of rank `r - 1` with shape - /// `[I, J, ..., L, max_diag_len]` and values: - /// - /// ``` - /// diagonal[i, j, ..., l, n] - /// = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, - /// padding_value ; otherwise. - /// ``` - /// where `y = max(-k[1], 0)`, `x = max(k[1], 0)`. - /// - /// Otherwise, the output tensor has rank `r` with dimensions - /// `[I, J, ..., L, num_diags, max_diag_len]` with values: - /// - /// ``` - /// diagonal[i, j, ..., l, m, n] - /// = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, - /// padding_value ; otherwise. - /// ``` - /// where `d = k[1] - m`, `y = max(-d, 0)`, and `x = max(d, 0)`. - /// - /// The input must be at least a matrix. - /// - /// For example: - /// - /// ``` - /// input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4) - /// [5, 6, 7, 8], - /// [9, 8, 7, 6]], - /// [[5, 4, 3, 2], - /// [1, 2, 3, 4], - /// [5, 6, 7, 8]]]) - /// - /// # A main diagonal from each batch. - /// tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3) - /// [5, 2, 7]] - /// - /// # A superdiagonal from each batch. - /// tf.matrix_diag_part(input, k = 1) - /// ==> [[2, 7, 6], # Output shape: (2, 3) - /// [4, 3, 8]] - /// - /// # A tridiagonal band from each batch. - /// tf.matrix_diag_part(input, k = (-1, 1)) - /// ==> [[[2, 7, 6], # Output shape: (2, 3, 3) - /// [1, 6, 7], - /// [5, 8, 0]], - /// [[4, 3, 8], - /// [5, 2, 7], - /// [1, 6, 0]]] - /// - /// # Padding value = 9 - /// tf.matrix_diag_part(input, k = (1, 3), padding_value = 9) - /// ==> [[[4, 9, 9], # Output shape: (2, 3, 3) - /// [3, 8, 9], - /// [2, 7, 6]], - /// [[2, 9, 9], - /// [3, 4, 9], - /// [4, 3, 8]]] - /// ``` - /// - /// - Parameters: - /// - input: Rank `r` tensor where `r >= 2`. - /// - k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main - /// diagonal, and negative value means subdiagonals. `k` can be a single integer - /// (for a single diagonal) or a pair of integers specifying the low and high ends - /// of a matrix band. `k[0]` must not be larger than `k[1]`. - /// - padding_value: The value to fill the area outside the specified diagonal band with. - /// Default is 0. - /// - /// - Output diagonal: The extracted diagonal(s). - @inlinable @inline(__always) - public static func matrixDiagPartV2( - _ input: Tensor, - k: Tensor, - paddingValue: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(input.handle.backend, k.handle.backend), paddingValue.handle.backend) - { - case .XLA: - let output_device = paddingValue.device - let input = Tensor(copying: input, to: .defaultTFEager) - let k = Tensor(copying: k, to: .defaultTFEager) - let paddingValue = Tensor(copying: paddingValue, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.matrixDiagPartV2(input, k: k, paddingValue: paddingValue), - to: output_device) - case .TF_EAGER: - return _RawTFEager.matrixDiagPartV2(input, k: k, paddingValue: paddingValue) - } - - } - - /// Returns a batched diagonal tensor with given batched diagonal values. - /// - /// Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th - /// diagonals of a matrix, with everything else padded with `padding`. `num_rows` - /// and `num_cols` specify the dimension of the innermost matrix of the output. If - /// both are not specified, the op assumes the innermost matrix is square and infers - /// its size from `k` and the innermost dimension of `diagonal`. If only one of them - /// is specified, the op assumes the unspecified value is the smallest possible - /// based on other criteria. - /// - /// Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has - /// rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one - /// diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank - /// `r` with shape `[I, J, ..., L, num_rows, num_cols]`. - /// - /// The second innermost dimension of `diagonal` has double meaning. - /// When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size - /// [I, J, ..., M], and the output tensor is: - /// - /// ``` - /// output[i, j, ..., l, m, n] - /// = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper - /// padding_value ; otherwise - /// ``` - /// - /// Otherwise, `M` is treated as the number of diagonals for the matrix in the - /// same batch (`M = k[1]-k[0]+1`), and the output tensor is: - /// - /// ``` - /// output[i, j, ..., l, m, n] - /// = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] - /// padding_value ; otherwise - /// ``` - /// where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`. - /// - /// For example: - /// - /// ``` - /// # The main diagonal. - /// diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4) - /// [5, 6, 7, 8]]) - /// tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4) - /// [0, 2, 0, 0], - /// [0, 0, 3, 0], - /// [0, 0, 0, 4]], - /// [[5, 0, 0, 0], - /// [0, 6, 0, 0], - /// [0, 0, 7, 0], - /// [0, 0, 0, 8]]] - /// - /// # A superdiagonal (per batch). - /// diagonal = np.array([[1, 2, 3], # Input shape: (2, 3) - /// [4, 5, 6]]) - /// tf.matrix_diag(diagonal, k = 1) - /// ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4) - /// [0, 0, 2, 0], - /// [0, 0, 0, 3], - /// [0, 0, 0, 0]], - /// [[0, 4, 0, 0], - /// [0, 0, 5, 0], - /// [0, 0, 0, 6], - /// [0, 0, 0, 0]]] - /// - /// # A band of diagonals. - /// diagonals = np.array([[[1, 2, 3], # Input shape: (2, 2, 3) - /// [4, 5, 0]], - /// [[6, 7, 9], - /// [9, 1, 0]]]) - /// tf.matrix_diag(diagonals, k = (-1, 0)) - /// ==> [[[1, 0, 0], # Output shape: (2, 3, 3) - /// [4, 2, 0], - /// [0, 5, 3]], - /// [[6, 0, 0], - /// [9, 7, 0], - /// [0, 1, 9]]] - /// - /// # Rectangular matrix. - /// diagonal = np.array([1, 2]) # Input shape: (2) - /// tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4) - /// ==> [[0, 0, 0, 0], # Output shape: (3, 4) - /// [1, 0, 0, 0], - /// [0, 2, 0, 0]] - /// - /// # Rectangular matrix with inferred num_cols and padding_value = 9. - /// tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9) - /// ==> [[9, 9], # Output shape: (3, 2) - /// [1, 9], - /// [9, 2]] - /// ``` - /// - /// - Parameters: - /// - diagonal: Rank `r`, where `r >= 1` - /// - k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main - /// diagonal, and negative value means subdiagonals. `k` can be a single integer - /// (for a single diagonal) or a pair of integers specifying the low and high ends - /// of a matrix band. `k[0]` must not be larger than `k[1]`. - /// - num_rows: The number of rows of the output matrix. If it is not provided, the op assumes - /// the output matrix is a square matrix and infers the matrix size from k and the - /// innermost dimension of `diagonal`. - /// - num_cols: The number of columns of the output matrix. If it is not provided, the op - /// assumes the output matrix is a square matrix and infers the matrix size from - /// k and the innermost dimension of `diagonal`. - /// - padding_value: The number to fill the area outside the specified diagonal band with. - /// Default is 0. - /// - /// - Output output: Has rank `r+1` when `k` is an integer or `k[0] == k[1]`, rank `r` otherwise. - @inlinable @inline(__always) - public static func matrixDiagV2( - diagonal: Tensor, - k: Tensor, - numRows: Tensor, - numCols: Tensor, - paddingValue: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend( - commonBackend(diagonal.handle.backend, k.handle.backend), numRows.handle.backend), - numCols.handle.backend), paddingValue.handle.backend) - { - case .XLA: - let output_device = paddingValue.device - let diagonal = Tensor(copying: diagonal, to: .defaultTFEager) - let k = Tensor(copying: k, to: .defaultTFEager) - let numRows = Tensor(copying: numRows, to: .defaultTFEager) - let numCols = Tensor(copying: numCols, to: .defaultTFEager) - let paddingValue = Tensor(copying: paddingValue, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.matrixDiagV2( - diagonal: diagonal, k: k, numRows: numRows, numCols: numCols, paddingValue: paddingValue - ), to: output_device) - case .TF_EAGER: - return _RawTFEager.matrixDiagV2( - diagonal: diagonal, k: k, numRows: numRows, numCols: numCols, paddingValue: paddingValue) - } - - } - - /// Deprecated, use python implementation tf.linalg.matrix_exponential. - @inlinable @inline(__always) - public static func matrixExponential( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.matrixExponential(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.matrixExponential(input) - } - - } - - /// Computes the inverse of one or more square invertible matrices or their - /// - /// adjoints (conjugate transposes). - /// - /// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions - /// form square matrices. The output is a tensor of the same shape as the input - /// containing the inverse for all input submatrices `[..., :, :]`. - /// - /// The op uses LU decomposition with partial pivoting to compute the inverses. - /// - /// If a matrix is not invertible there is no guarantee what the op does. It - /// may detect the condition and raise an exception or it may simply return a - /// garbage result. - /// - /// - Parameter input: Shape is `[..., M, M]`. - /// - /// - Output output: Shape is `[..., M, M]`. - /// - /// @compatibility(numpy) - /// Equivalent to np.linalg.inv - /// @end_compatibility - @inlinable @inline(__always) - public static func matrixInverse( - _ input: Tensor, - adjoint: Bool = false - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.matrixInverse(input, adjoint: adjoint), to: output_device) - case .TF_EAGER: - return _RawTFEager.matrixInverse(input, adjoint: adjoint) - } - - } - - /// Computes the matrix logarithm of one or more square matrices: - /// - /// - /// \\(log(exp(A)) = A\\) - /// - /// This op is only defined for complex matrices. If A is positive-definite and - /// real, then casting to a complex matrix, taking the logarithm and casting back - /// to a real matrix will give the correct result. - /// - /// This function computes the matrix logarithm using the Schur-Parlett algorithm. - /// Details of the algorithm can be found in Section 11.6.2 of: - /// Nicholas J. Higham, Functions of Matrices: Theory and Computation, SIAM 2008. - /// ISBN 978-0-898716-46-7. - /// - /// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions - /// form square matrices. The output is a tensor of the same shape as the input - /// containing the exponential for all input submatrices `[..., :, :]`. - /// - /// - Parameter input: Shape is `[..., M, M]`. - /// - /// - Output output: Shape is `[..., M, M]`. - /// - /// @compatibility(scipy) - /// Equivalent to scipy.linalg.logm - /// @end_compatibility - @inlinable @inline(__always) - public static func matrixLogarithm( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.matrixLogarithm(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.matrixLogarithm(input) - } - - } - - /// Returns a batched matrix tensor with new batched diagonal values. - /// - /// Given `input` and `diagonal`, this operation returns a tensor with the - /// same shape and values as `input`, except for the main diagonal of the - /// innermost matrices. These will be overwritten by the values in `diagonal`. - /// - /// The output is computed as follows: - /// - /// Assume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has - /// `k` dimensions `[I, J, K, ..., min(M, N)]`. Then the output is a - /// tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where: - /// - /// * `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`. - /// * `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`. - /// - /// - Parameters: - /// - input: Rank `k+1`, where `k >= 1`. - /// - diagonal: Rank `k`, where `k >= 1`. - /// - /// - Output output: Rank `k+1`, with `output.shape = input.shape`. - @inlinable @inline(__always) - public static func matrixSetDiag( - _ input: Tensor, - diagonal: Tensor - ) -> Tensor { - switch commonBackend(input.handle.backend, diagonal.handle.backend) { - case .XLA: - let output_device = diagonal.device - let input = Tensor(copying: input, to: .defaultTFEager) - let diagonal = Tensor(copying: diagonal, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.matrixSetDiag(input, diagonal: diagonal), to: output_device) - case .TF_EAGER: - return _RawTFEager.matrixSetDiag(input, diagonal: diagonal) - } - - } - - /// Returns a batched matrix tensor with new batched diagonal values. - /// - /// Given `input` and `diagonal`, this operation returns a tensor with the - /// same shape and values as `input`, except for the specified diagonals of the - /// innermost matrices. These will be overwritten by the values in `diagonal`. - /// - /// `input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or - /// `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`. - /// Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`. - /// `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`. - /// `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`, - /// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` - /// - /// The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`. - /// If `k` is scalar or `k[0] == k[1]`: - /// - /// ``` - /// output[i, j, ..., l, m, n] - /// = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1] - /// input[i, j, ..., l, m, n] ; otherwise - /// ``` - /// - /// Otherwise, - /// - /// ``` - /// output[i, j, ..., l, m, n] - /// = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] - /// input[i, j, ..., l, m, n] ; otherwise - /// ``` - /// where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`. - /// - /// For example: - /// - /// ``` - /// # The main diagonal. - /// input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4) - /// [7, 7, 7, 7], - /// [7, 7, 7, 7]], - /// [[7, 7, 7, 7], - /// [7, 7, 7, 7], - /// [7, 7, 7, 7]]]) - /// diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3) - /// [4, 5, 6]]) - /// tf.matrix_set_diag(diagonal) ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4) - /// [7, 2, 7, 7], - /// [7, 7, 3, 7]], - /// [[4, 7, 7, 7], - /// [7, 5, 7, 7], - /// [7, 7, 6, 7]]] - /// - /// # A superdiagonal (per batch). - /// tf.matrix_set_diag(diagonal, k = 1) - /// ==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4) - /// [7, 7, 2, 7], - /// [7, 7, 7, 3]], - /// [[7, 4, 7, 7], - /// [7, 7, 5, 7], - /// [7, 7, 7, 6]]] - /// - /// # A band of diagonals. - /// diagonals = np.array([[[1, 2, 3], # Diagonal shape: (2, 2, 3) - /// [4, 5, 0]], - /// [[6, 1, 2], - /// [3, 4, 0]]]) - /// tf.matrix_set_diag(diagonals, k = (-1, 0)) - /// ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4) - /// [4, 2, 7, 7], - /// [0, 5, 3, 7]], - /// [[6, 7, 7, 7], - /// [3, 1, 7, 7], - /// [7, 4, 2, 7]]] - /// - /// ``` - /// - /// - Parameters: - /// - input: Rank `r+1`, where `r >= 1`. - /// - diagonal: Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank `r+1`. - /// `k >= 1`. - /// - k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main - /// diagonal, and negative value means subdiagonals. `k` can be a single integer - /// (for a single diagonal) or a pair of integers specifying the low and high ends - /// of a matrix band. `k[0]` must not be larger than `k[1]`. - /// - /// - Output output: Rank `r+1`, with `output.shape = input.shape`. - @inlinable @inline(__always) - public static func matrixSetDiagV2( - _ input: Tensor, - diagonal: Tensor, - k: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(input.handle.backend, diagonal.handle.backend), k.handle.backend) - { - case .XLA: - let output_device = k.device - let input = Tensor(copying: input, to: .defaultTFEager) - let diagonal = Tensor(copying: diagonal, to: .defaultTFEager) - let k = Tensor(copying: k, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.matrixSetDiagV2(input, diagonal: diagonal, k: k), to: output_device) - case .TF_EAGER: - return _RawTFEager.matrixSetDiagV2(input, diagonal: diagonal, k: k) - } - - } - - /// Solves systems of linear equations. - /// - /// `Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions - /// form square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is - /// a tensor shape `[..., M, K]`. If `adjoint` is `False` then each output matrix - /// satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. - /// If `adjoint` is `True` then each output matrix satisfies - /// `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`. - /// - /// - Parameters: - /// - matrix: Shape is `[..., M, M]`. - /// - rhs: Shape is `[..., M, K]`. - /// - /// - Attr adjoint: Boolean indicating whether to solve with `matrix` or its (block-wise) - /// adjoint. - /// - /// - Output output: Shape is `[..., M, K]`. - @inlinable @inline(__always) - public static func matrixSolve( - matrix: Tensor, - rhs: Tensor, - adjoint: Bool = false - ) -> Tensor { - switch commonBackend(matrix.handle.backend, rhs.handle.backend) { - case .XLA: - let output_device = rhs.device - let matrix = Tensor(copying: matrix, to: .defaultTFEager) - let rhs = Tensor(copying: rhs, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.matrixSolve(matrix: matrix, rhs: rhs, adjoint: adjoint), - to: output_device) - case .TF_EAGER: - return _RawTFEager.matrixSolve(matrix: matrix, rhs: rhs, adjoint: adjoint) - } - - } - - /// Solves one or more linear least-squares problems. - /// - /// `matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions - /// form real or complex matrices of size `[M, N]`. `Rhs` is a tensor of the same - /// type as `matrix` and shape `[..., M, K]`. - /// The output is a tensor shape `[..., N, K]` where each output matrix solves - /// each of the equations - /// `matrix[..., :, :]` * `output[..., :, :]` = `rhs[..., :, :]` - /// in the least squares sense. - /// - /// We use the following notation for (complex) matrix and right-hand sides - /// in the batch: - /// - /// `matrix`=\\(A \in \mathbb{C}^{m \times n}\\), - /// `rhs`=\\(B \in \mathbb{C}^{m \times k}\\), - /// `output`=\\(X \in \mathbb{C}^{n \times k}\\), - /// `l2_regularizer`=\\(\lambda \in \mathbb{R}\\). - /// - /// If `fast` is `True`, then the solution is computed by solving the normal - /// equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then - /// \\(X = (A^H A + \lambda I)^{-1} A^H B\\), which solves the least-squares - /// problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k} } ||A Z - B||_F^2 + \lambda ||Z||_F^2\\). - /// If \\(m \lt n\\) then `output` is computed as - /// \\(X = A^H (A A^H + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is the - /// minimum-norm solution to the under-determined linear system, i.e. - /// \\(X = \mathrm{argmin}_{Z \in \mathbb{C}^{n \times k} } ||Z||_F^2 \\), - /// subject to \\(A Z = B\\). Notice that the fast path is only numerically stable - /// when \\(A\\) is numerically full rank and has a condition number - /// \\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach} } }\\) or \\(\lambda\\) is - /// sufficiently large. - /// - /// If `fast` is `False` an algorithm based on the numerically robust complete - /// orthogonal decomposition is used. This computes the minimum-norm - /// least-squares solution, even when \\(A\\) is rank deficient. This path is - /// typically 6-7 times slower than the fast path. If `fast` is `False` then - /// `l2_regularizer` is ignored. - /// - /// - Parameters: - /// - matrix: Shape is `[..., M, N]`. - /// - rhs: Shape is `[..., M, K]`. - /// - l2_regularizer: Scalar tensor. - /// - /// @compatibility(numpy) - /// Equivalent to np.linalg.lstsq - /// @end_compatibility - /// - /// - Output output: Shape is `[..., N, K]`. - @inlinable @inline(__always) - public static func matrixSolveLs( - matrix: Tensor, - rhs: Tensor, - l2Regularizer: Tensor, - fast: Bool = true - ) -> Tensor { - switch commonBackend( - commonBackend(matrix.handle.backend, rhs.handle.backend), l2Regularizer.handle.backend) - { - case .XLA: - let output_device = l2Regularizer.device - let matrix = Tensor(copying: matrix, to: .defaultTFEager) - let rhs = Tensor(copying: rhs, to: .defaultTFEager) - let l2Regularizer = Tensor(copying: l2Regularizer, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.matrixSolveLs( - matrix: matrix, rhs: rhs, l2Regularizer: l2Regularizer, fast: fast), to: output_device) - case .TF_EAGER: - return _RawTFEager.matrixSolveLs( - matrix: matrix, rhs: rhs, l2Regularizer: l2Regularizer, fast: fast) - } - - } - - /// Computes the matrix square root of one or more square matrices: - /// - /// matmul(sqrtm(A), sqrtm(A)) = A - /// - /// The input matrix should be invertible. If the input matrix is real, it should - /// have no eigenvalues which are real and negative (pairs of complex conjugate - /// eigenvalues are allowed). - /// - /// The matrix square root is computed by first reducing the matrix to - /// quasi-triangular form with the real Schur decomposition. The square root - /// of the quasi-triangular matrix is then computed directly. Details of - /// the algorithm can be found in: Nicholas J. Higham, "Computing real - /// square roots of a real matrix", Linear Algebra Appl., 1987. - /// - /// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions - /// form square matrices. The output is a tensor of the same shape as the input - /// containing the matrix square root for all input submatrices `[..., :, :]`. - /// - /// - Parameter input: Shape is `[..., M, M]`. - /// - /// - Output output: Shape is `[..., M, M]`. - /// - /// @compatibility(scipy) - /// Equivalent to scipy.linalg.sqrtm - /// @end_compatibility - @inlinable @inline(__always) - public static func matrixSquareRoot( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.matrixSquareRoot(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.matrixSquareRoot(input) - } - - } - - /// Solves systems of linear equations with upper or lower triangular matrices by backsubstitution. - /// - /// - /// `matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form - /// square matrices. If `lower` is `True` then the strictly upper triangular part - /// of each inner-most matrix is assumed to be zero and not accessed. - /// If `lower` is False then the strictly lower triangular part of each inner-most - /// matrix is assumed to be zero and not accessed. - /// `rhs` is a tensor of shape `[..., M, N]`. - /// - /// The output is a tensor of shape `[..., M, N]`. If `adjoint` is - /// `True` then the innermost matrices in `output` satisfy matrix equations - /// `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. - /// If `adjoint` is `False` then the strictly then the innermost matrices in - /// `output` satisfy matrix equations - /// `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`. - /// - /// Note, the batch shapes for the inputs only need to broadcast. - /// - /// Example: - /// ```python - /// - /// a = tf.constant([[3, 0, 0, 0], - /// [2, 1, 0, 0], - /// [1, 0, 1, 0], - /// [1, 1, 1, 1]], dtype=tf.float32) - /// - /// b = tf.constant([[4], - /// [2], - /// [4], - /// [2]], dtype=tf.float32) - /// - /// x = tf.linalg.triangular_solve(a, b, lower=True) - /// x - /// # - /// - /// # in python3 one can use `a@x` - /// tf.matmul(a, x) - /// # - /// ``` - /// - /// - Parameters: - /// - matrix: Shape is `[..., M, M]`. - /// - rhs: Shape is `[..., M, K]`. - /// - /// - Attrs: - /// - lower: Boolean indicating whether the innermost matrices in `matrix` are - /// lower or upper triangular. - /// - adjoint: Boolean indicating whether to solve with `matrix` or its (block-wise) - /// adjoint. - /// - /// @compatibility(numpy) - /// Equivalent to scipy.linalg.solve_triangular - /// @end_compatibility - /// - /// - Output output: Shape is `[..., M, K]`. - @inlinable @inline(__always) - public static func matrixTriangularSolve( - matrix: Tensor, - rhs: Tensor, - lower: Bool = true, - adjoint: Bool = false - ) -> Tensor { - switch commonBackend(matrix.handle.backend, rhs.handle.backend) { - case .XLA: - let output_device = rhs.device - let matrix = Tensor(copying: matrix, to: .defaultTFEager) - let rhs = Tensor(copying: rhs, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.matrixTriangularSolve( - matrix: matrix, rhs: rhs, lower: lower, adjoint: adjoint), to: output_device) - case .TF_EAGER: - return _RawTFEager.matrixTriangularSolve( - matrix: matrix, rhs: rhs, lower: lower, adjoint: adjoint) - } - - } - - /// Computes the maximum of elements across dimensions of a tensor. - /// - /// Reduces `input` along the dimensions given in `axis`. Unless - /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - /// `axis`. If `keep_dims` is true, the reduced dimensions are - /// retained with length 1. - /// - /// - Parameters: - /// - input: The tensor to reduce. - /// - reduction_indices: The dimensions to reduce. Must be in the range - /// `[-rank(input), rank(input))`. - /// - /// - Attr keep_dims: If true, retain reduced dimensions with length 1. - /// - /// - Output output: The reduced tensor. - @inlinable @inline(__always) - public static func max< - T: TensorFlowNumeric, - Tidx: TensorFlowIndex - >( - _ input: Tensor, - reductionIndices: Tensor, - keepDims: Bool = false - ) -> Tensor { - switch commonBackend(input.handle.backend, reductionIndices.handle.backend) { - case .XLA: - return _RawXLA.max(input, reductionIndices: reductionIndices, keepDims: keepDims) - case .TF_EAGER: - return _RawTFEager.max(input, reductionIndices: reductionIndices, keepDims: keepDims) - } - - } - - /// Creates a dataset that overrides the maximum intra-op parallelism. - /// - /// - Parameter max_intra_op_parallelism: Identifies the maximum intra-op parallelism to use. - @inlinable @inline(__always) - public static func maxIntraOpParallelismDataset( - inputDataset: VariantHandle, - maxIntraOpParallelism: Tensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.maxIntraOpParallelismDataset( - inputDataset: inputDataset, maxIntraOpParallelism: maxIntraOpParallelism, - outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Performs max pooling on the input. - /// - /// - Parameter input: 4-D input to pool over. - /// - /// - Attrs: - /// - ksize: The size of the window for each dimension of the input tensor. - /// - strides: The stride of the sliding window for each dimension of the - /// input tensor. - /// - padding: The type of padding algorithm to use. - /// - data_format: Specify the data format of the input and output data. With the - /// default format "NHWC", the data is stored in the order of: - /// [batch, in_height, in_width, in_channels]. - /// Alternatively, the format could be "NCHW", the data storage order of: - /// [batch, in_channels, in_height, in_width]. - /// - /// - Output output: The max pooled output tensor. - @inlinable @inline(__always) - public static func maxPool( - _ input: Tensor, - ksize: [Int32], - strides: [Int32], - padding: Padding, - dataFormat: DataFormat2 = .nhwc - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.maxPool( - input, ksize: ksize, strides: strides, padding: padding, dataFormat: dataFormat), - to: output_device) - case .TF_EAGER: - return _RawTFEager.maxPool( - input, ksize: ksize, strides: strides, padding: padding, dataFormat: dataFormat) - } - - } - - /// Performs 3D max pooling on the input. - /// - /// - Parameter input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over. - /// - /// - Attrs: - /// - ksize: 1-D tensor of length 5. The size of the window for each dimension of - /// the input tensor. Must have `ksize[0] = ksize[4] = 1`. - /// - strides: 1-D tensor of length 5. The stride of the sliding window for each - /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. - /// - padding: The type of padding algorithm to use. - /// - data_format: The data format of the input and output data. With the - /// default format "NDHWC", the data is stored in the order of: - /// [batch, in_depth, in_height, in_width, in_channels]. - /// Alternatively, the format could be "NCDHW", the data storage order is: - /// [batch, in_channels, in_depth, in_height, in_width]. - /// - /// - Output output: The max pooled output tensor. - @inlinable @inline(__always) - public static func maxPool3D( - _ input: Tensor, - ksize: [Int32], - strides: [Int32], - padding: Padding, - dataFormat: DataFormat1 = .ndhwc - ) -> Tensor { - switch input.handle.backend { - case .XLA: - return _RawXLA.maxPool3D( - input, ksize: ksize, strides: strides, padding: padding, dataFormat: dataFormat) - case .TF_EAGER: - return _RawTFEager.maxPool3D( - input, ksize: ksize, strides: strides, padding: padding, dataFormat: dataFormat) - } - - } - - /// Computes gradients of max pooling function. - /// - /// - Parameters: - /// - orig_input: The original input tensor. - /// - orig_output: The original output tensor. - /// - grad: Output backprop of shape `[batch, depth, rows, cols, channels]`. - /// - /// - Attrs: - /// - ksize: 1-D tensor of length 5. The size of the window for each dimension of - /// the input tensor. Must have `ksize[0] = ksize[4] = 1`. - /// - strides: 1-D tensor of length 5. The stride of the sliding window for each - /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. - /// - padding: The type of padding algorithm to use. - /// - data_format: The data format of the input and output data. With the - /// default format "NDHWC", the data is stored in the order of: - /// [batch, in_depth, in_height, in_width, in_channels]. - /// Alternatively, the format could be "NCDHW", the data storage order is: - /// [batch, in_channels, in_depth, in_height, in_width]. - @inlinable @inline(__always) - public static func maxPool3DGrad< - T: FloatingPoint & TensorFlowScalar, - Tinput: FloatingPoint & TensorFlowScalar - >( - origInput: Tensor, - origOutput: Tensor, - grad: Tensor, - ksize: [Int32], - strides: [Int32], - padding: Padding, - dataFormat: DataFormat1 = .ndhwc - ) -> Tensor { - switch commonBackend( - commonBackend(origInput.handle.backend, origOutput.handle.backend), grad.handle.backend) - { - case .XLA: - return _RawXLA.maxPool3DGrad( - origInput: origInput, origOutput: origOutput, grad: grad, ksize: ksize, strides: strides, - padding: padding, dataFormat: dataFormat) - case .TF_EAGER: - return _RawTFEager.maxPool3DGrad( - origInput: origInput, origOutput: origOutput, grad: grad, ksize: ksize, strides: strides, - padding: padding, dataFormat: dataFormat) - } - - } - - /// Computes second-order gradients of the maxpooling function. - /// - /// - Parameters: - /// - orig_input: The original input tensor. - /// - orig_output: The original output tensor. - /// - grad: Output backprop of shape `[batch, depth, rows, cols, channels]`. - /// - /// - Attrs: - /// - ksize: 1-D tensor of length 5. The size of the window for each dimension of - /// the input tensor. Must have `ksize[0] = ksize[4] = 1`. - /// - strides: 1-D tensor of length 5. The stride of the sliding window for each - /// dimension of `input`. Must have `strides[0] = strides[4] = 1`. - /// - padding: The type of padding algorithm to use. - /// - data_format: The data format of the input and output data. With the - /// default format "NDHWC", the data is stored in the order of: - /// [batch, in_depth, in_height, in_width, in_channels]. - /// Alternatively, the format could be "NCDHW", the data storage order is: - /// [batch, in_channels, in_depth, in_height, in_width]. - /// - /// - Output output: Gradients of gradients w.r.t. the input to `max_pool`. - @inlinable @inline(__always) - public static func maxPool3DGradGrad( - origInput: Tensor, - origOutput: Tensor, - grad: Tensor, - ksize: [Int32], - strides: [Int32], - padding: Padding, - dataFormat: DataFormat1 = .ndhwc - ) -> Tensor { - switch commonBackend( - commonBackend(origInput.handle.backend, origOutput.handle.backend), grad.handle.backend) - { - case .XLA: - let output_device = grad.device - let origInput = Tensor(copying: origInput, to: .defaultTFEager) - let origOutput = Tensor(copying: origOutput, to: .defaultTFEager) - let grad = Tensor(copying: grad, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.maxPool3DGradGrad( - origInput: origInput, origOutput: origOutput, grad: grad, ksize: ksize, - strides: strides, padding: padding, dataFormat: dataFormat), to: output_device) - case .TF_EAGER: - return _RawTFEager.maxPool3DGradGrad( - origInput: origInput, origOutput: origOutput, grad: grad, ksize: ksize, strides: strides, - padding: padding, dataFormat: dataFormat) - } - - } - - /// Computes gradients of the maxpooling function. - /// - /// - Parameters: - /// - orig_input: The original input tensor. - /// - orig_output: The original output tensor. - /// - grad: 4-D. Gradients w.r.t. the output of `max_pool`. - /// - /// - Attrs: - /// - ksize: The size of the window for each dimension of the input tensor. - /// - strides: The stride of the sliding window for each dimension of the - /// input tensor. - /// - padding: The type of padding algorithm to use. - /// - data_format: Specify the data format of the input and output data. With the - /// default format "NHWC", the data is stored in the order of: - /// [batch, in_height, in_width, in_channels]. - /// Alternatively, the format could be "NCHW", the data storage order of: - /// [batch, in_channels, in_height, in_width]. - /// - /// - Output output: Gradients w.r.t. the input to `max_pool`. - @inlinable @inline(__always) - public static func maxPoolGrad( - origInput: Tensor, - origOutput: Tensor, - grad: Tensor, - ksize: [Int32], - strides: [Int32], - padding: Padding, - dataFormat: DataFormat = .nhwc - ) -> Tensor { - switch commonBackend( - commonBackend(origInput.handle.backend, origOutput.handle.backend), grad.handle.backend) - { - case .XLA: - let output_device = grad.device - let origInput = Tensor(copying: origInput, to: .defaultTFEager) - let origOutput = Tensor(copying: origOutput, to: .defaultTFEager) - let grad = Tensor(copying: grad, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.maxPoolGrad( - origInput: origInput, origOutput: origOutput, grad: grad, ksize: ksize, - strides: strides, padding: padding, dataFormat: dataFormat), to: output_device) - case .TF_EAGER: - return _RawTFEager.maxPoolGrad( - origInput: origInput, origOutput: origOutput, grad: grad, ksize: ksize, strides: strides, - padding: padding, dataFormat: dataFormat) - } - - } - - /// Computes second-order gradients of the maxpooling function. - /// - /// - Parameters: - /// - orig_input: The original input tensor. - /// - orig_output: The original output tensor. - /// - grad: 4-D. Gradients of gradients w.r.t. the input of `max_pool`. - /// - /// - Attrs: - /// - ksize: The size of the window for each dimension of the input tensor. - /// - strides: The stride of the sliding window for each dimension of the - /// input tensor. - /// - padding: The type of padding algorithm to use. - /// - data_format: Specify the data format of the input and output data. With the - /// default format "NHWC", the data is stored in the order of: - /// [batch, in_height, in_width, in_channels]. - /// Alternatively, the format could be "NCHW", the data storage order of: - /// [batch, in_channels, in_height, in_width]. - /// - /// - Output output: Gradients of gradients w.r.t. the input to `max_pool`. - @inlinable @inline(__always) - public static func maxPoolGradGrad( - origInput: Tensor, - origOutput: Tensor, - grad: Tensor, - ksize: [Int32], - strides: [Int32], - padding: Padding, - dataFormat: DataFormat = .nhwc - ) -> Tensor { - switch commonBackend( - commonBackend(origInput.handle.backend, origOutput.handle.backend), grad.handle.backend) - { - case .XLA: - let output_device = grad.device - let origInput = Tensor(copying: origInput, to: .defaultTFEager) - let origOutput = Tensor(copying: origOutput, to: .defaultTFEager) - let grad = Tensor(copying: grad, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.maxPoolGradGrad( - origInput: origInput, origOutput: origOutput, grad: grad, ksize: ksize, - strides: strides, padding: padding, dataFormat: dataFormat), to: output_device) - case .TF_EAGER: - return _RawTFEager.maxPoolGradGrad( - origInput: origInput, origOutput: origOutput, grad: grad, ksize: ksize, strides: strides, - padding: padding, dataFormat: dataFormat) - } - - } - - /// Computes second-order gradients of the maxpooling function. - /// - /// - Parameters: - /// - orig_input: The original input tensor. - /// - orig_output: The original output tensor. - /// - grad: 4-D. Gradients of gradients w.r.t. the input of `max_pool`. - /// - ksize: The size of the window for each dimension of the input tensor. - /// - strides: The stride of the sliding window for each dimension of the - /// input tensor. - /// - /// - Attrs: - /// - padding: The type of padding algorithm to use. - /// - data_format: Specify the data format of the input and output data. With the - /// default format "NHWC", the data is stored in the order of: - /// [batch, in_height, in_width, in_channels]. - /// Alternatively, the format could be "NCHW", the data storage order of: - /// [batch, in_channels, in_height, in_width]. - /// - /// - Output output: Gradients of gradients w.r.t. the input to `max_pool`. - @inlinable @inline(__always) - public static func maxPoolGradGradV2( - origInput: Tensor, - origOutput: Tensor, - grad: Tensor, - ksize: Tensor, - strides: Tensor, - padding: Padding, - dataFormat: DataFormat = .nhwc - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend( - commonBackend(origInput.handle.backend, origOutput.handle.backend), grad.handle.backend), - ksize.handle.backend), strides.handle.backend) - { - case .XLA: - let output_device = strides.device - let origInput = Tensor(copying: origInput, to: .defaultTFEager) - let origOutput = Tensor(copying: origOutput, to: .defaultTFEager) - let grad = Tensor(copying: grad, to: .defaultTFEager) - let ksize = Tensor(copying: ksize, to: .defaultTFEager) - let strides = Tensor(copying: strides, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.maxPoolGradGradV2( - origInput: origInput, origOutput: origOutput, grad: grad, ksize: ksize, - strides: strides, padding: padding, dataFormat: dataFormat), to: output_device) - case .TF_EAGER: - return _RawTFEager.maxPoolGradGradV2( - origInput: origInput, origOutput: origOutput, grad: grad, ksize: ksize, strides: strides, - padding: padding, dataFormat: dataFormat) - } - - } - - /// Computes second-order gradients of the maxpooling function. - /// - /// - Parameters: - /// - input: The original input. - /// - grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the - /// input of `max_pool`. - /// - argmax: The indices of the maximum values chosen for each output of `max_pool`. - /// - /// - Attrs: - /// - ksize: The size of the window for each dimension of the input tensor. - /// - strides: The stride of the sliding window for each dimension of the - /// input tensor. - /// - padding: The type of padding algorithm to use. - /// - include_batch_in_index: Whether to include batch dimension in flattened index of `argmax`. - /// - /// - Output output: Gradients of gradients w.r.t. the input of `max_pool`. - @inlinable @inline(__always) - public static func maxPoolGradGradWithArgmax< - Targmax: TensorFlowIndex, - T: TensorFlowNumeric - >( - _ input: Tensor, - grad: Tensor, - argmax: Tensor, - ksize: [Int32], - strides: [Int32], - padding: Padding, - includeBatchInIndex: Bool = false - ) -> Tensor { - switch commonBackend( - commonBackend(input.handle.backend, grad.handle.backend), argmax.handle.backend) - { - case .XLA: - let output_device = argmax.device - let input = Tensor(copying: input, to: .defaultTFEager) - let grad = Tensor(copying: grad, to: .defaultTFEager) - let argmax = Tensor(copying: argmax, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.maxPoolGradGradWithArgmax( - input, grad: grad, argmax: argmax, ksize: ksize, strides: strides, padding: padding, - includeBatchInIndex: includeBatchInIndex), to: output_device) - case .TF_EAGER: - return _RawTFEager.maxPoolGradGradWithArgmax( - input, grad: grad, argmax: argmax, ksize: ksize, strides: strides, padding: padding, - includeBatchInIndex: includeBatchInIndex) - } - - } - - /// Computes gradients of the maxpooling function. - /// - /// - Parameters: - /// - orig_input: The original input tensor. - /// - orig_output: The original output tensor. - /// - grad: 4-D. Gradients w.r.t. the output of `max_pool`. - /// - ksize: The size of the window for each dimension of the input tensor. - /// - strides: The stride of the sliding window for each dimension of the - /// input tensor. - /// - /// - Attrs: - /// - padding: The type of padding algorithm to use. - /// - data_format: Specify the data format of the input and output data. With the - /// default format "NHWC", the data is stored in the order of: - /// [batch, in_height, in_width, in_channels]. - /// Alternatively, the format could be "NCHW", the data storage order of: - /// [batch, in_channels, in_height, in_width]. - /// - /// - Output output: Gradients w.r.t. the input to `max_pool`. - @inlinable @inline(__always) - public static func maxPoolGradV2( - origInput: Tensor, - origOutput: Tensor, - grad: Tensor, - ksize: Tensor, - strides: Tensor, - padding: Padding, - dataFormat: DataFormat = .nhwc - ) -> Tensor { - switch commonBackend( + } + + /// Returns the number of tensors in the input tensor list. + /// + /// input_handle: the input list + /// length: the number of tensors in the list + @inlinable @inline(__always) + public static func tensorListLength( + inputHandle: VariantHandle + ) -> Tensor { + _RawTFEager.tensorListLength(inputHandle: inputHandle) + } + + /// Returns the last element of the input list as well as a list with all but that element. + /// + /// Fails if the list is empty. + /// + /// input_handle: the input list + /// tensor: the withdrawn last element of the list + /// element_dtype: the type of elements in the list + /// element_shape: the shape of the output tensor + @inlinable @inline(__always) + public static func tensorListPopBack( + inputHandle: VariantHandle, + elementShape: Tensor + ) -> (outputHandle: VariantHandle, tensor: Tensor) { + _RawTFEager.tensorListPopBack(inputHandle: inputHandle, elementShape: elementShape) + } + + /// Returns a list which has the passed-in `Tensor` as last element and the other elements of the given list in `input_handle`. + /// + /// tensor: The tensor to put on the list. + /// input_handle: The old list. + /// output_handle: A list with the elements of the old list followed by tensor. + /// element_dtype: the type of elements in the list. + /// element_shape: a shape compatible with that of elements in the list. + @inlinable @inline(__always) + public static func tensorListPushBack( + inputHandle: VariantHandle, + _ tensor: Tensor + ) -> VariantHandle { + _RawTFEager.tensorListPushBack(inputHandle: inputHandle, tensor) + } + + @inlinable @inline(__always) + public static func tensorListPushBackBatch( + inputHandles: VariantHandle, + _ tensor: Tensor + ) -> VariantHandle { + _RawTFEager.tensorListPushBackBatch(inputHandles: inputHandles, tensor) + } + + /// List of the given size with empty elements. + /// + /// element_shape: the shape of the future elements of the list + /// num_elements: the number of elements to reserve + /// handle: the output list + /// element_dtype: the desired type of elements in the list. + @inlinable @inline(__always) + public static func tensorListReserve( + elementShape: Tensor, + numElements: Tensor, + elementDtype: TensorDataType + ) -> VariantHandle { + _RawTFEager.tensorListReserve( + elementShape: elementShape, numElements: numElements, elementDtype: elementDtype) + } + + /// Resizes the list. + /// + /// + /// input_handle: the input list + /// size: size of the output list + /// + @inlinable @inline(__always) + public static func tensorListResize( + inputHandle: VariantHandle, + size: Tensor + ) -> VariantHandle { + _RawTFEager.tensorListResize(inputHandle: inputHandle, size: size) + } + + /// Creates a TensorList by indexing into a Tensor. + /// + /// Each member of the TensorList corresponds to one row of the input tensor, + /// specified by the given index (see `tf.gather`). + /// + /// tensor: The input tensor. + /// indices: The indices used to index into the list. + /// element_shape: The shape of the elements in the list (can be less specified than + /// the shape of the tensor). + /// output_handle: The TensorList. + @inlinable @inline(__always) + public static func tensorListScatter< + ElementDtype: TensorFlowScalar, + ShapeType: TensorFlowIndex + >( + _ tensor: Tensor, + indices: Tensor, + elementShape: Tensor + ) -> VariantHandle { + _RawTFEager.tensorListScatter(tensor, indices: indices, elementShape: elementShape) + } + + /// Scatters tensor at indices in an input list. + /// + /// Each member of the TensorList corresponds to one row of the input tensor, + /// specified by the given index (see `tf.gather`). + /// + /// input_handle: The list to scatter into. + /// tensor: The input tensor. + /// indices: The indices used to index into the list. + /// output_handle: The TensorList. + @inlinable @inline(__always) + public static func tensorListScatterIntoExistingList( + inputHandle: VariantHandle, + _ tensor: Tensor, + indices: Tensor + ) -> VariantHandle { + _RawTFEager.tensorListScatterIntoExistingList( + inputHandle: inputHandle, tensor, indices: indices) + } + + /// Creates a TensorList by indexing into a Tensor. + /// + /// Each member of the TensorList corresponds to one row of the input tensor, + /// specified by the given index (see `tf.gather`). + /// + /// tensor: The input tensor. + /// indices: The indices used to index into the list. + /// element_shape: The shape of the elements in the list (can be less specified than + /// the shape of the tensor). + /// num_elements: The size of the output list. Must be large enough to accommodate + /// the largest index in indices. If -1, the list is just large enough to include + /// the largest index in indices. + /// output_handle: The TensorList. + @inlinable @inline(__always) + public static func tensorListScatterV2< + ElementDtype: TensorFlowScalar, + ShapeType: TensorFlowIndex + >( + _ tensor: Tensor, + indices: Tensor, + elementShape: Tensor, + numElements: Tensor + ) -> VariantHandle { + _RawTFEager.tensorListScatterV2( + tensor, indices: indices, elementShape: elementShape, numElements: numElements) + } + + @inlinable @inline(__always) + public static func tensorListSetItem( + inputHandle: VariantHandle, + index: Tensor, + item: Tensor + ) -> VariantHandle { + _RawTFEager.tensorListSetItem(inputHandle: inputHandle, index: index, item: item) + } + + /// Splits a tensor into a list. + /// + /// list[i] corresponds to lengths[i] tensors from the input tensor. + /// The tensor must have rank at least 1 and contain exactly sum(lengths) elements. + /// + /// tensor: The input tensor. + /// element_shape: A shape compatible with that of elements in the tensor. + /// lengths: Vector of sizes of the 0th dimension of tensors in the list. + /// output_handle: The list. + @inlinable @inline(__always) + public static func tensorListSplit< + ElementDtype: TensorFlowScalar, + ShapeType: TensorFlowIndex + >( + _ tensor: Tensor, + elementShape: Tensor, + lengths: Tensor + ) -> VariantHandle { + _RawTFEager.tensorListSplit(tensor, elementShape: elementShape, lengths: lengths) + } + + /// Stacks all tensors in the list. + /// + /// Requires that all tensors have the same shape. + /// + /// input_handle: the input list + /// tensor: the gathered result + /// num_elements: optional. If not -1, the number of elements in the list. + /// + @inlinable @inline(__always) + public static func tensorListStack( + inputHandle: VariantHandle, + elementShape: Tensor, + numElements: Int64 = -1 + ) -> Tensor { + switch elementShape.handle.backend { + case .XLA: + let output_device = elementShape.device + let elementShape = Tensor(copying: elementShape, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.tensorListStack( + inputHandle: inputHandle, elementShape: elementShape, numElements: numElements), + to: output_device) + case .TF_EAGER: + return _RawTFEager.tensorListStack( + inputHandle: inputHandle, elementShape: elementShape, numElements: numElements) + } + + } + + /// Adds sparse `updates` to an existing tensor according to `indices`. + /// + /// This operation creates a new tensor by adding sparse `updates` to the passed + /// in `tensor`. + /// This operation is very similar to `tf.scatter_nd_add`, except that the updates + /// are added onto an existing tensor (as opposed to a variable). If the memory + /// for the existing tensor cannot be re-used, a copy is made and updated. + /// + /// `indices` is an integer tensor containing indices into a new tensor of shape + /// `shape`. The last dimension of `indices` can be at most the rank of `shape`: + /// + /// indices.shape[-1] <= shape.rank + /// + /// The last dimension of `indices` corresponds to indices into elements + /// (if `indices.shape[-1] = shape.rank`) or slices + /// (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of + /// `shape`. `updates` is a tensor with shape + /// + /// indices.shape[:-1] + shape[indices.shape[-1]:] + /// + /// The simplest form of tensor_scatter_add is to add individual elements to a + /// tensor by index. For example, say we want to add 4 elements in a rank-1 + /// tensor with 8 elements. + /// + /// In Python, this scatter add operation would look like this: + /// + /// ```python + /// indices = tf.constant([[4], [3], [1], [7]]) + /// updates = tf.constant([9, 10, 11, 12]) + /// tensor = tf.ones([8], dtype=tf.int32) + /// updated = tf.tensor_scatter_nd_add(tensor, indices, updates) + /// print(updated) + /// ``` + /// + /// The resulting tensor would look like this: + /// + /// [1, 12, 1, 11, 10, 1, 1, 13] + /// + /// We can also, insert entire slices of a higher rank tensor all at once. For + /// example, if we wanted to insert two slices in the first dimension of a + /// rank-3 tensor with two matrices of new values. + /// + /// In Python, this scatter add operation would look like this: + /// + /// ```python + /// indices = tf.constant([[0], [2]]) + /// updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], + /// [7, 7, 7, 7], [8, 8, 8, 8]], + /// [[5, 5, 5, 5], [6, 6, 6, 6], + /// [7, 7, 7, 7], [8, 8, 8, 8]]]) + /// tensor = tf.ones([4, 4, 4],dtype=tf.int32) + /// updated = tf.tensor_scatter_nd_add(tensor, indices, updates) + /// print(updated) + /// ``` + /// + /// The resulting tensor would look like this: + /// + /// [[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], + /// [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], + /// [[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], + /// [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] + /// + /// Note that on CPU, if an out of bound index is found, an error is returned. + /// On GPU, if an out of bound index is found, the index is ignored. + /// + /// - Parameters: + /// - tensor: Tensor to copy/update. + /// - indices: Index tensor. + /// - updates: Updates to scatter into output. + /// + /// - Output output: A new tensor copied from tensor and updates added according to the indices. + @inlinable @inline(__always) + public static func tensorScatterAdd< + T: TensorFlowScalar, + Tindices: TensorFlowIndex + >( + _ tensor: Tensor, + indices: Tensor, + updates: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(tensor.handle.backend, indices.handle.backend), updates.handle.backend) + { + case .XLA: + let output_device = updates.device + let tensor = Tensor(copying: tensor, to: .defaultTFEager) + let indices = Tensor(copying: indices, to: .defaultTFEager) + let updates = Tensor(copying: updates, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.tensorScatterAdd(tensor, indices: indices, updates: updates), + to: output_device) + case .TF_EAGER: + return _RawTFEager.tensorScatterAdd(tensor, indices: indices, updates: updates) + } + + } + + /// Subtracts sparse `updates` from an existing tensor according to `indices`. + /// + /// This operation creates a new tensor by subtracting sparse `updates` from the + /// passed in `tensor`. + /// This operation is very similar to `tf.scatter_nd_sub`, except that the updates + /// are subtracted from an existing tensor (as opposed to a variable). If the memory + /// for the existing tensor cannot be re-used, a copy is made and updated. + /// + /// `indices` is an integer tensor containing indices into a new tensor of shape + /// `shape`. The last dimension of `indices` can be at most the rank of `shape`: + /// + /// indices.shape[-1] <= shape.rank + /// + /// The last dimension of `indices` corresponds to indices into elements + /// (if `indices.shape[-1] = shape.rank`) or slices + /// (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of + /// `shape`. `updates` is a tensor with shape + /// + /// indices.shape[:-1] + shape[indices.shape[-1]:] + /// + /// The simplest form of tensor_scatter_sub is to subtract individual elements + /// from a tensor by index. For example, say we want to insert 4 scattered elements + /// in a rank-1 tensor with 8 elements. + /// + /// In Python, this scatter subtract operation would look like this: + /// + /// ```python + /// indices = tf.constant([[4], [3], [1], [7]]) + /// updates = tf.constant([9, 10, 11, 12]) + /// tensor = tf.ones([8], dtype=tf.int32) + /// updated = tf.tensor_scatter_nd_sub(tensor, indices, updates) + /// print(updated) + /// ``` + /// + /// The resulting tensor would look like this: + /// + /// [1, -10, 1, -9, -8, 1, 1, -11] + /// + /// We can also, insert entire slices of a higher rank tensor all at once. For + /// example, if we wanted to insert two slices in the first dimension of a + /// rank-3 tensor with two matrices of new values. + /// + /// In Python, this scatter add operation would look like this: + /// + /// ```python + /// indices = tf.constant([[0], [2]]) + /// updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], + /// [7, 7, 7, 7], [8, 8, 8, 8]], + /// [[5, 5, 5, 5], [6, 6, 6, 6], + /// [7, 7, 7, 7], [8, 8, 8, 8]]]) + /// tensor = tf.ones([4, 4, 4],dtype=tf.int32) + /// updated = tf.tensor_scatter_nd_sub(tensor, indices, updates) + /// print(updated) + /// ``` + /// + /// The resulting tensor would look like this: + /// + /// [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]], + /// [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], + /// [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]], + /// [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] + /// + /// Note that on CPU, if an out of bound index is found, an error is returned. + /// On GPU, if an out of bound index is found, the index is ignored. + /// + /// - Parameters: + /// - tensor: Tensor to copy/update. + /// - indices: Index tensor. + /// - updates: Updates to scatter into output. + /// + /// - Output output: A new tensor copied from tensor and updates subtracted according to the indices. + @inlinable @inline(__always) + public static func tensorScatterSub< + T: TensorFlowScalar, + Tindices: TensorFlowIndex + >( + _ tensor: Tensor, + indices: Tensor, + updates: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(tensor.handle.backend, indices.handle.backend), updates.handle.backend) + { + case .XLA: + let output_device = updates.device + let tensor = Tensor(copying: tensor, to: .defaultTFEager) + let indices = Tensor(copying: indices, to: .defaultTFEager) + let updates = Tensor(copying: updates, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.tensorScatterSub(tensor, indices: indices, updates: updates), + to: output_device) + case .TF_EAGER: + return _RawTFEager.tensorScatterSub(tensor, indices: indices, updates: updates) + } + + } + + /// Scatter `updates` into an existing tensor according to `indices`. + /// + /// This operation creates a new tensor by applying sparse `updates` to the passed + /// in `tensor`. + /// This operation is very similar to `tf.scatter_nd`, except that the updates are + /// scattered onto an existing tensor (as opposed to a zero-tensor). If the memory + /// for the existing tensor cannot be re-used, a copy is made and updated. + /// + /// If `indices` contains duplicates, then their updates are accumulated (summed). + /// + /// **WARNING**: The order in which updates are applied is nondeterministic, so the + /// output will be nondeterministic if `indices` contains duplicates -- because + /// of some numerical approximation issues, numbers summed in different order + /// may yield different results. + /// + /// `indices` is an integer tensor containing indices into a new tensor of shape + /// `shape`. The last dimension of `indices` can be at most the rank of `shape`: + /// + /// indices.shape[-1] <= shape.rank + /// + /// The last dimension of `indices` corresponds to indices into elements + /// (if `indices.shape[-1] = shape.rank`) or slices + /// (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of + /// `shape`. `updates` is a tensor with shape + /// + /// indices.shape[:-1] + shape[indices.shape[-1]:] + /// + /// The simplest form of scatter is to insert individual elements in a tensor by + /// index. For example, say we want to insert 4 scattered elements in a rank-1 + /// tensor with 8 elements. + /// + ///
+ /// + ///
+ /// + /// In Python, this scatter operation would look like this: + /// + /// >>> indices = tf.constant([[4], [3], [1], [7]]) + /// >>> updates = tf.constant([9, 10, 11, 12]) + /// >>> tensor = tf.ones([8], dtype=tf.int32) + /// >>> print(tf.tensor_scatter_nd_update(tensor, indices, updates)) + /// tf.Tensor([ 1 11 1 10 9 1 1 12], shape=(8,), dtype=int32) + /// + /// We can also, insert entire slices of a higher rank tensor all at once. For + /// example, if we wanted to insert two slices in the first dimension of a + /// rank-3 tensor with two matrices of new values. + /// + /// In Python, this scatter operation would look like this: + /// + /// >>> indices = tf.constant([[0], [2]]) + /// >>> updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], + /// ... [7, 7, 7, 7], [8, 8, 8, 8]], + /// ... [[5, 5, 5, 5], [6, 6, 6, 6], + /// ... [7, 7, 7, 7], [8, 8, 8, 8]]]) + /// >>> tensor = tf.ones([4, 4, 4], dtype=tf.int32) + /// >>> print(tf.tensor_scatter_nd_update(tensor, indices, updates).numpy()) + /// [[[5 5 5 5] + /// [6 6 6 6] + /// [7 7 7 7] + /// [8 8 8 8]] + /// [[1 1 1 1] + /// [1 1 1 1] + /// [1 1 1 1] + /// [1 1 1 1]] + /// [[5 5 5 5] + /// [6 6 6 6] + /// [7 7 7 7] + /// [8 8 8 8]] + /// [[1 1 1 1] + /// [1 1 1 1] + /// [1 1 1 1] + /// [1 1 1 1]]] + /// + /// Note that on CPU, if an out of bound index is found, an error is returned. + /// On GPU, if an out of bound index is found, the index is ignored. + /// + /// - Parameters: + /// - tensor: Tensor to copy/update. + /// - indices: Index tensor. + /// - updates: Updates to scatter into output. + /// + /// - Output output: A new tensor with the given shape and updates applied according + /// to the indices. + @inlinable @inline(__always) + public static func tensorScatterUpdate< + T: TensorFlowScalar, + Tindices: TensorFlowIndex + >( + _ tensor: Tensor, + indices: Tensor, + updates: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(tensor.handle.backend, indices.handle.backend), updates.handle.backend) + { + case .XLA: + let output_device = updates.device + let tensor = Tensor(copying: tensor, to: .defaultTFEager) + let indices = Tensor(copying: indices, to: .defaultTFEager) + let updates = Tensor(copying: updates, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.tensorScatterUpdate(tensor, indices: indices, updates: updates), + to: output_device) + case .TF_EAGER: + return _RawTFEager.tensorScatterUpdate(tensor, indices: indices, updates: updates) + } + + } + + /// Creates a dataset that emits each dim-0 slice of `components` once. + @inlinable @inline(__always) + public static func tensorSliceDataset( + components: ToutputTypes, + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.tensorSliceDataset(components: components, outputShapes: outputShapes) + } + + /// Assign `value` to the sliced l-value reference of `input`. + /// + /// The values of `value` are assigned to the positions in the tensor `input` that + /// are selected by the slice parameters. The slice parameters `begin` `end` + /// `strides` etc. work exactly as in `StridedSlice`. + /// + /// NOTE this op currently does not support broadcasting and so `value`'s shape + /// must be exactly the shape produced by the slice of `input`. + @inlinable @inline(__always) + public static func tensorStridedSliceUpdate< + T: TensorFlowScalar, + Index: TensorFlowIndex + >( + _ input: Tensor, + begin: Tensor, + end: Tensor, + strides: Tensor, + value: Tensor, + beginMask: Int64 = 0, + endMask: Int64 = 0, + ellipsisMask: Int64 = 0, + newAxisMask: Int64 = 0, + shrinkAxisMask: Int64 = 0 + ) -> Tensor { + switch commonBackend( + commonBackend( commonBackend( - commonBackend( - commonBackend(origInput.handle.backend, origOutput.handle.backend), grad.handle.backend), - ksize.handle.backend), strides.handle.backend) - { - case .XLA: - return _RawXLA.maxPoolGradV2( - origInput: origInput, origOutput: origOutput, grad: grad, ksize: ksize, strides: strides, - padding: padding, dataFormat: dataFormat) - case .TF_EAGER: - return _RawTFEager.maxPoolGradV2( - origInput: origInput, origOutput: origOutput, grad: grad, ksize: ksize, strides: strides, - padding: padding, dataFormat: dataFormat) - } - - } - - /// Computes gradients of the maxpooling function. - /// - /// - Parameters: - /// - input: The original input. - /// - grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the - /// output of `max_pool`. - /// - argmax: The indices of the maximum values chosen for each output of `max_pool`. - /// - /// - Attrs: - /// - ksize: The size of the window for each dimension of the input tensor. - /// - strides: The stride of the sliding window for each dimension of the - /// input tensor. - /// - padding: The type of padding algorithm to use. - /// - include_batch_in_index: Whether to include batch dimension in flattened index of `argmax`. - /// - /// - Output output: Gradients w.r.t. the input of `max_pool`. - @inlinable @inline(__always) - public static func maxPoolGradWithArgmax< - Targmax: TensorFlowIndex, - T: TensorFlowNumeric - >( - _ input: Tensor, - grad: Tensor, - argmax: Tensor, - ksize: [Int32], - strides: [Int32], - padding: Padding, - includeBatchInIndex: Bool = false - ) -> Tensor { - switch commonBackend( - commonBackend(input.handle.backend, grad.handle.backend), argmax.handle.backend) - { - case .XLA: - let output_device = argmax.device - let input = Tensor(copying: input, to: .defaultTFEager) - let grad = Tensor(copying: grad, to: .defaultTFEager) - let argmax = Tensor(copying: argmax, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.maxPoolGradWithArgmax( - input, grad: grad, argmax: argmax, ksize: ksize, strides: strides, padding: padding, - includeBatchInIndex: includeBatchInIndex), to: output_device) - case .TF_EAGER: - return _RawTFEager.maxPoolGradWithArgmax( - input, grad: grad, argmax: argmax, ksize: ksize, strides: strides, padding: padding, - includeBatchInIndex: includeBatchInIndex) - } - - } - - /// Performs max pooling on the input. - /// - /// - Parameters: - /// - input: 4-D input to pool over. - /// - ksize: The size of the window for each dimension of the input tensor. - /// - strides: The stride of the sliding window for each dimension of the - /// input tensor. - /// - /// - Attrs: - /// - padding: The type of padding algorithm to use. - /// - data_format: Specify the data format of the input and output data. With the - /// default format "NHWC", the data is stored in the order of: - /// [batch, in_height, in_width, in_channels]. - /// Alternatively, the format could be "NCHW", the data storage order of: - /// [batch, in_channels, in_height, in_width]. - /// - /// - Output output: The max pooled output tensor. - @inlinable @inline(__always) - public static func maxPoolV2( - _ input: Tensor, - ksize: Tensor, - strides: Tensor, - padding: Padding, - dataFormat: DataFormat2 = .nhwc - ) -> Tensor { - switch commonBackend( - commonBackend(input.handle.backend, ksize.handle.backend), strides.handle.backend) - { - case .XLA: - return _RawXLA.maxPoolV2( - input, ksize: ksize, strides: strides, padding: padding, dataFormat: dataFormat) - case .TF_EAGER: - return _RawTFEager.maxPoolV2( - input, ksize: ksize, strides: strides, padding: padding, dataFormat: dataFormat) - } - - } - - /// Performs max pooling on the input and outputs both max values and indices. - /// - /// The indices in `argmax` are flattened, so that a maximum value at position - /// `[b, y, x, c]` becomes flattened index: - /// `(y * width + x) * channels + c` if `include_batch_in_index` is False; - /// `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. - /// - /// The indices returned are always in `[0, height) x [0, width)` before flattening, - /// even if padding is involved and the mathematically correct answer is outside - /// (either negative or too large). This is a bug, but fixing it is difficult to do - /// in a safe backwards compatible way, especially due to flattening. - /// - /// - Parameter input: 4-D with shape `[batch, height, width, channels]`. Input to pool over. - /// - /// - Attrs: - /// - ksize: The size of the window for each dimension of the input tensor. - /// - strides: The stride of the sliding window for each dimension of the - /// input tensor. - /// - padding: The type of padding algorithm to use. - /// - include_batch_in_index: Whether to include batch dimension in flattened index of `argmax`. - /// - /// - Outputs: - /// - output: The max pooled output tensor. - /// - argmax: 4-D. The flattened indices of the max values chosen for each output. - @inlinable @inline(__always) - public static func maxPoolWithArgmax< - Targmax: TensorFlowIndex, - T: TensorFlowNumeric - >( - _ input: Tensor, - ksize: [Int32], - strides: [Int32], - padding: Padding, - includeBatchInIndex: Bool = false - ) -> (output: Tensor, argmax: Tensor) { - _RawTFEager.maxPoolWithArgmax( - input, ksize: ksize, strides: strides, padding: padding, - includeBatchInIndex: includeBatchInIndex) + commonBackend(input.handle.backend, begin.handle.backend), end.handle.backend), + strides.handle.backend), value.handle.backend) + { + case .XLA: + return _RawXLA.tensorStridedSliceUpdate( + input, begin: begin, end: end, strides: strides, value: value, beginMask: beginMask, + endMask: endMask, ellipsisMask: ellipsisMask, newAxisMask: newAxisMask, + shrinkAxisMask: shrinkAxisMask) + case .TF_EAGER: + return _RawTFEager.tensorStridedSliceUpdate( + input, begin: begin, end: end, strides: strides, value: value, beginMask: beginMask, + endMask: endMask, ellipsisMask: ellipsisMask, newAxisMask: newAxisMask, + shrinkAxisMask: shrinkAxisMask) } - /// Returns the max of x and y (i.e. x > y ? x : y) element-wise. - /// - /// *NOTE*: `Maximum` supports broadcasting. More about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - @inlinable @inline(__always) - public static func maximum( - _ x: Tensor, - _ y: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - return _RawXLA.maximum(x, y) - case .TF_EAGER: - return _RawTFEager.maximum(x, y) - } - - } - - /// Computes the mean of elements across dimensions of a tensor. - /// - /// Reduces `input` along the dimensions given in `axis`. Unless - /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - /// `axis`. If `keep_dims` is true, the reduced dimensions are - /// retained with length 1. - /// - /// - Parameters: - /// - input: The tensor to reduce. - /// - reduction_indices: The dimensions to reduce. Must be in the range - /// `[-rank(input), rank(input))`. - /// - /// - Attr keep_dims: If true, retain reduced dimensions with length 1. - /// - /// - Output output: The reduced tensor. - @inlinable @inline(__always) - public static func mean< - T: TensorFlowNumeric, - Tidx: TensorFlowIndex - >( - _ input: Tensor, - reductionIndices: Tensor, - keepDims: Bool = false - ) -> Tensor { - switch commonBackend(input.handle.backend, reductionIndices.handle.backend) { - case .XLA: - return _RawXLA.mean(input, reductionIndices: reductionIndices, keepDims: keepDims) - case .TF_EAGER: - return _RawTFEager.mean(input, reductionIndices: reductionIndices, keepDims: keepDims) - } - - } - - /// Forwards the value of an available tensor from `inputs` to `output`. - /// - /// `Merge` waits for at least one of the tensors in `inputs` to become available. - /// It is usually combined with `Switch` to implement branching. - /// - /// `Merge` forwards the first tensor to become available to `output`, and sets - /// `value_index` to its index in `inputs`. - /// - /// - Parameter inputs: The input tensors, exactly one of which will become available. - /// - /// - Outputs: - /// - output: Will be set to the available input tensor. - /// - value_index: The index of the chosen input tensor in `inputs`. - @inlinable @inline(__always) - public static func merge( - inputs: [Tensor] - ) -> (output: Tensor, valueIndex: Tensor) { - _RawTFEager.merge(inputs: inputs) - } - - /// Merges summaries. - /// - /// This op creates a - /// [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) - /// protocol buffer that contains the union of all the values in the input - /// summaries. - /// - /// When the Op is run, it reports an `InvalidArgument` error if multiple values - /// in the summaries to merge use the same tag. - /// - /// - Parameter inputs: Can be of any shape. Each must contain serialized `Summary` protocol - /// buffers. - /// - /// - Output summary: Scalar. Serialized `Summary` protocol buffer. - @inlinable @inline(__always) - public static func mergeSummary( - inputs: [StringTensor] - ) -> StringTensor { - _RawTFEager.mergeSummary(inputs: inputs) - } - - /// V2 format specific: merges the metadata files of sharded checkpoints. The - /// - /// result is one logical checkpoint, with one physical metadata file and renamed - /// data files. - /// - /// Intended for "grouping" multiple checkpoints in a sharded checkpoint setup. - /// - /// If delete_old_dirs is true, attempts to delete recursively the dirname of each - /// path in the input checkpoint_prefixes. This is useful when those paths are non - /// user-facing temporary locations. - /// - /// - Parameters: - /// - checkpoint_prefixes: prefixes of V2 checkpoints to merge. - /// - destination_prefix: scalar. The desired final prefix. Allowed to be the same - /// as one of the checkpoint_prefixes. - /// - /// - Attr delete_old_dirs: see above. - @inlinable @inline(__always) - public static func mergeV2Checkpoints( - checkpointPrefixes: StringTensor, - destinationPrefix: StringTensor, - deleteOldDirs: Bool = true - ) { - _RawTFEager.mergeV2Checkpoints( - checkpointPrefixes: checkpointPrefixes, destinationPrefix: destinationPrefix, - deleteOldDirs: deleteOldDirs) - } - - /// Transforms a spectrogram into a form that's useful for speech recognition. - /// - /// Mel Frequency Cepstral Coefficients are a way of representing audio data that's - /// been effective as an input feature for machine learning. They are created by - /// taking the spectrum of a spectrogram (a 'cepstrum'), and discarding some of the - /// higher frequencies that are less significant to the human ear. They have a long - /// history in the speech recognition world, and https://en.wikipedia.org/wiki/Mel-frequency_cepstrum - /// is a good resource to learn more. - /// - /// - Parameters: - /// - spectrogram: Typically produced by the Spectrogram op, with magnitude_squared - /// set to true. - /// - sample_rate: How many samples per second the source audio used. - /// - /// - Attrs: - /// - upper_frequency_limit: The highest frequency to use when calculating the - /// ceptstrum. - /// - lower_frequency_limit: The lowest frequency to use when calculating the - /// ceptstrum. - /// - filterbank_channel_count: Resolution of the Mel bank used internally. - /// - dct_coefficient_count: How many output channels to produce per time slice. - @inlinable @inline(__always) - public static func mfcc( - spectrogram: Tensor, - sampleRate: Tensor, - upperFrequencyLimit: Double = 4000, - lowerFrequencyLimit: Double = 20, - filterbankChannelCount: Int64 = 40, - dctCoefficientCount: Int64 = 13 - ) -> Tensor { - switch commonBackend(spectrogram.handle.backend, sampleRate.handle.backend) { - case .XLA: - let output_device = sampleRate.device - let spectrogram = Tensor(copying: spectrogram, to: .defaultTFEager) - let sampleRate = Tensor(copying: sampleRate, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.mfcc( - spectrogram: spectrogram, sampleRate: sampleRate, - upperFrequencyLimit: upperFrequencyLimit, lowerFrequencyLimit: lowerFrequencyLimit, - filterbankChannelCount: filterbankChannelCount, dctCoefficientCount: dctCoefficientCount - ), to: output_device) - case .TF_EAGER: - return _RawTFEager.mfcc( - spectrogram: spectrogram, sampleRate: sampleRate, - upperFrequencyLimit: upperFrequencyLimit, lowerFrequencyLimit: lowerFrequencyLimit, - filterbankChannelCount: filterbankChannelCount, dctCoefficientCount: dctCoefficientCount) - } - - } - - /// Computes the minimum of elements across dimensions of a tensor. - /// - /// Reduces `input` along the dimensions given in `axis`. Unless - /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - /// `axis`. If `keep_dims` is true, the reduced dimensions are - /// retained with length 1. - /// - /// - Parameters: - /// - input: The tensor to reduce. - /// - reduction_indices: The dimensions to reduce. Must be in the range - /// `[-rank(input), rank(input))`. - /// - /// - Attr keep_dims: If true, retain reduced dimensions with length 1. - /// - /// - Output output: The reduced tensor. - @inlinable @inline(__always) - public static func min< - T: TensorFlowNumeric, - Tidx: TensorFlowIndex - >( - _ input: Tensor, - reductionIndices: Tensor, - keepDims: Bool = false - ) -> Tensor { - switch commonBackend(input.handle.backend, reductionIndices.handle.backend) { - case .XLA: - return _RawXLA.min(input, reductionIndices: reductionIndices, keepDims: keepDims) - case .TF_EAGER: - return _RawTFEager.min(input, reductionIndices: reductionIndices, keepDims: keepDims) - } - - } - - /// Returns the min of x and y (i.e. x < y ? x : y) element-wise. - /// - /// *NOTE*: `Minimum` supports broadcasting. More about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - @inlinable @inline(__always) - public static func minimum( - _ x: Tensor, - _ y: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - return _RawXLA.minimum(x, y) - case .TF_EAGER: - return _RawTFEager.minimum(x, y) - } - - } - - /// Pads a tensor with mirrored values. - /// - /// This operation pads a `input` with mirrored values according to the `paddings` - /// you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is - /// the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates - /// how many values to add before the contents of `input` in that dimension, and - /// `paddings[D, 1]` indicates how many values to add after the contents of `input` - /// in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater - /// than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true - /// (if false, respectively). - /// - /// The padded size of each dimension D of the output is: - /// - /// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` - /// - /// For example: - /// - /// ``` - /// # 't' is [[1, 2, 3], [4, 5, 6]]. - /// # 'paddings' is [[1, 1]], [2, 2]]. - /// # 'mode' is SYMMETRIC. - /// # rank of 't' is 2. - /// pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2] - /// [2, 1, 1, 2, 3, 3, 2] - /// [5, 4, 4, 5, 6, 6, 5] - /// [5, 4, 4, 5, 6, 6, 5]] - /// ``` - /// - /// - Parameters: - /// - input: The input tensor to be padded. - /// - paddings: A two-column matrix specifying the padding sizes. The number of - /// rows must be the same as the rank of `input`. - /// - /// - Attr mode: Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions - /// do not include the borders, while in symmetric mode the padded regions - /// do include the borders. For example, if `input` is `[1, 2, 3]` and `paddings` - /// is `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and - /// it is `[1, 2, 3, 3, 2]` in symmetric mode. - /// - /// - Output output: The padded tensor. - @inlinable @inline(__always) - public static func mirrorPad< - T: TensorFlowScalar, - Tpaddings: TensorFlowIndex - >( - _ input: Tensor, - paddings: Tensor, - mode: Mode1 - ) -> Tensor { - switch commonBackend(input.handle.backend, paddings.handle.backend) { - case .XLA: - return _RawXLA.mirrorPad(input, paddings: paddings, mode: mode) - case .TF_EAGER: - return _RawTFEager.mirrorPad(input, paddings: paddings, mode: mode) - } - - } - - /// Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor. - /// - /// This operation folds the padded areas of `input` by `MirrorPad` according to the - /// `paddings` you specify. `paddings` must be the same as `paddings` argument - /// given to the corresponding `MirrorPad` op. - /// - /// The folded size of each dimension D of the output is: - /// - /// `input.dim_size(D) - paddings(D, 0) - paddings(D, 1)` - /// - /// For example: - /// - /// ``` - /// # 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]]. - /// # 'paddings' is [[0, 1]], [0, 1]]. - /// # 'mode' is SYMMETRIC. - /// # rank of 't' is 2. - /// pad(t, paddings) ==> [[ 1, 5] - /// [11, 28]] - /// ``` - /// - /// - Parameters: - /// - input: The input tensor to be folded. - /// - paddings: A two-column matrix specifying the padding sizes. The number of - /// rows must be the same as the rank of `input`. - /// - /// - Attr mode: The mode used in the `MirrorPad` op. - /// - /// - Output output: The folded tensor. - @inlinable @inline(__always) - public static func mirrorPadGrad< - T: TensorFlowScalar, - Tpaddings: TensorFlowIndex - >( - _ input: Tensor, - paddings: Tensor, - mode: Mode1 - ) -> Tensor { - switch commonBackend(input.handle.backend, paddings.handle.backend) { - case .XLA: - return _RawXLA.mirrorPadGrad(input, paddings: paddings, mode: mode) - case .TF_EAGER: - return _RawTFEager.mirrorPadGrad(input, paddings: paddings, mode: mode) - } - - } - - @inlinable @inline(__always) - public static func mixedStruct( - nA: Int64 - ) -> (a: [Tensor], b: Tensor) { - _RawTFEager.mixedStruct(nA: nA) - } - - /// Wraps an arbitrary MLIR computation expressed as a module with a main() function. - /// - /// This operation does not have an associated kernel and is not intended to be - /// executed in a regular TensorFlow session. Instead it is intended to be used for - /// testing or for special case where a user intends to pass custom MLIR computation - /// through a TensorFlow graph with the intent of having custom tooling processing - /// it downstream (when targeting a different environment, like TensorFlow lite for - /// example). - /// The MLIR module is expected to have a main() function that will be used as an - /// entry point. The inputs to the operations will be passed as argument to the - /// main() function and the returned values of the main function mapped to the - /// outputs. - /// Example usage: - /// - /// ``` - /// import tensorflow as tf - /// from tensorflow.compiler.mlir.tensorflow.gen_mlir_passthrough_op import mlir_passthrough_op - /// - /// mlir_module = '''python - /// func @main(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10x10xf32> { - /// %add = "magic.op"(%arg0, %arg1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10x10xf32> - /// return %ret : tensor<10x10xf32> - /// } - /// ''' - /// - /// @tf.function - /// def foo(x, y): - /// return = mlir_passthrough_op([x, y], mlir_module, Toutputs=[tf.float32]) - /// - /// graph_def = foo.get_concrete_function(tf.TensorSpec([10], tf.float32), tf.TensorSpec([10], tf.float32)).graph.as_graph_def() - /// ``` - @inlinable @inline(__always) - public static func mlirPassthroughOp< - Tinputs: TensorArrayProtocol, - Toutputs: TensorGroup - >( - inputs: Tinputs, - mlirModule: String - ) -> Toutputs { - _RawTFEager.mlirPassthroughOp(inputs: inputs, mlirModule: mlirModule) - } - - /// Returns element-wise remainder of division. This emulates C semantics in that - /// - /// the result here is consistent with a truncating divide. E.g. - /// `tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`. - /// - /// *NOTE*: `Mod` supports broadcasting. More about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - @inlinable @inline(__always) - public static func mod( - _ x: Tensor, - _ y: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - return _RawXLA.mod(x, y) - case .TF_EAGER: - return _RawTFEager.mod(x, y) - } - - } - - /// Identity transformation that models performance. - /// - /// Identity transformation that models performance. - /// - /// - Parameter input_dataset: A variant tensor representing the input dataset. - @inlinable @inline(__always) - public static func modelDataset( - inputDataset: VariantHandle, - algorithm: Int64 = 0, - cpuBudget: Int64 = 0, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.modelDataset( - inputDataset: inputDataset, algorithm: algorithm, cpuBudget: cpuBudget, - outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Returns x * y element-wise. - /// - /// *NOTE*: `Multiply` supports broadcasting. More about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - @inlinable @inline(__always) - public static func mul( - _ x: Tensor, - _ y: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - return _RawXLA.mul(x, y) - case .TF_EAGER: - return _RawTFEager.mul(x, y) - } - - } - - /// Returns x * y element-wise. Returns zero if y is zero, even if x if infinite or NaN. - /// - /// *NOTE*: `MulNoNan` supports broadcasting. More about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - @inlinable @inline(__always) - public static func mulNoNan( - _ x: Tensor, - _ y: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - let output_device = y.device - let x = Tensor(copying: x, to: .defaultTFEager) - let y = Tensor(copying: y, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.mulNoNan(x, y), to: output_device) - case .TF_EAGER: - return _RawTFEager.mulNoNan(x, y) - } - - } - - /// Creates a MultiDeviceIterator resource. - /// - /// - Attrs: - /// - devices: A list of devices the iterator works across. - /// - shared_name: If non-empty, this resource will be shared under the given name - /// across multiple sessions. - /// - container: If non-empty, this resource is placed in the given container. - /// Otherwise, a default container is used. - /// - output_types: The type list for the return values. - /// - output_shapes: The list of shapes being produced. - /// - /// - Output handle: Handle to the resource created. - @inlinable @inline(__always) - public static func multiDeviceIterator( - devices: [String], - sharedName: String, - container: String, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> ResourceHandle { - _RawTFEager.multiDeviceIterator( - devices: devices, sharedName: sharedName, container: container, outputTypes: outputTypes, - outputShapes: outputShapes) - } - - /// Generates a MultiDeviceIterator resource from its provided string handle. - /// - /// - Parameter string_handle: String representing the resource. - /// - /// - Attrs: - /// - output_types: The type list for the return values. - /// - output_shapes: The list of shapes being produced. - /// - /// - Output multi_device_iterator: A MultiDeviceIterator resource. - @inlinable @inline(__always) - public static func multiDeviceIteratorFromStringHandle( - stringHandle: StringTensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> ResourceHandle { - _RawTFEager.multiDeviceIteratorFromStringHandle( - stringHandle: stringHandle, outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Gets next element for the provided shard number. - /// - /// - Parameters: - /// - multi_device_iterator: A MultiDeviceIterator resource. - /// - shard_num: Integer representing which shard to fetch data for. - /// - incarnation_id: Which incarnation of the MultiDeviceIterator is running. - /// - /// - Attrs: - /// - output_types: The type list for the return values. - /// - output_shapes: The list of shapes being produced. - /// - /// - Output components: Result of the get_next on the dataset. - @inlinable @inline(__always) - public static func multiDeviceIteratorGetNextFromShard( - multiDeviceIterator: ResourceHandle, - shardNum: Tensor, - incarnationId: Tensor, - outputShapes: [TensorShape?] - ) -> OutputTypes { - _RawTFEager.multiDeviceIteratorGetNextFromShard( - multiDeviceIterator: multiDeviceIterator, shardNum: shardNum, incarnationId: incarnationId, - outputShapes: outputShapes) - } - - /// Initializes the multi device iterator with the given dataset. - /// - /// - Parameters: - /// - dataset: Dataset to be iterated upon. - /// - multi_device_iterator: A MultiDeviceIteratorResource. - /// - max_buffer_size: The maximum size of the host side per device buffer to keep. - /// - /// - Output incarnation_id: An int64 indicating which incarnation of the MultiDeviceIterator - /// is running. - @inlinable @inline(__always) - public static func multiDeviceIteratorInit( - dataset: VariantHandle, - multiDeviceIterator: ResourceHandle, - maxBufferSize: Tensor - ) -> Tensor { - switch maxBufferSize.handle.backend { - case .XLA: - let output_device = maxBufferSize.device - let maxBufferSize = Tensor(copying: maxBufferSize, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.multiDeviceIteratorInit( - dataset: dataset, multiDeviceIterator: multiDeviceIterator, maxBufferSize: maxBufferSize - ), to: output_device) - case .TF_EAGER: - return _RawTFEager.multiDeviceIteratorInit( - dataset: dataset, multiDeviceIterator: multiDeviceIterator, maxBufferSize: maxBufferSize) - } - - } - - /// Produces a string handle for the given MultiDeviceIterator. - /// - /// - Parameter multi_device_iterator: A MultiDeviceIterator resource. - /// - /// - Output string_handle: A string representing the resource. - @inlinable @inline(__always) - public static func multiDeviceIteratorToStringHandle( - multiDeviceIterator: ResourceHandle - ) -> StringTensor { - _RawTFEager.multiDeviceIteratorToStringHandle(multiDeviceIterator: multiDeviceIterator) - } - - /// Draws samples from a multinomial distribution. - /// - /// - Parameters: - /// - logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` - /// represents the unnormalized log probabilities for all classes. - /// - num_samples: 0-D. Number of independent samples to draw for each row slice. - /// - /// - Attrs: - /// - seed: If either seed or seed2 is set to be non-zero, the internal random number - /// generator is seeded by the given seed. Otherwise, a random seed is used. - /// - seed2: A second seed to avoid seed collision. - /// - /// - Output output: 2-D Tensor with shape `[batch_size, num_samples]`. Each slice `[i, :]` - /// contains the drawn class labels with range `[0, num_classes)`. - @inlinable @inline(__always) - public static func multinomial< - T: TensorFlowNumeric, - OutputDtype: TensorFlowIndex - >( - logits: Tensor, - numSamples: Tensor, - seed: Int64 = 0, - seed2: Int64 = 0 - ) -> Tensor { - switch commonBackend(logits.handle.backend, numSamples.handle.backend) { - case .XLA: - let output_device = numSamples.device - let logits = Tensor(copying: logits, to: .defaultTFEager) - let numSamples = Tensor(copying: numSamples, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.multinomial( - logits: logits, numSamples: numSamples, seed: seed, seed2: seed2), to: output_device) - case .TF_EAGER: - return _RawTFEager.multinomial( - logits: logits, numSamples: numSamples, seed: seed, seed2: seed2) - } - - } - - /// Creates an empty hash table that uses tensors as the backing store. - /// - /// It uses "open addressing" with quadratic reprobing to resolve - /// collisions. - /// - /// This op creates a mutable hash table, specifying the type of its keys and - /// values. Each value must be a scalar. Data can be inserted into the table using - /// the insert operations. It does not support the initialization operation. - /// - /// - Parameter empty_key: The key used to represent empty key buckets internally. Must not - /// be used in insert or lookup operations. - /// - /// - Attrs: - /// - container: If non-empty, this table is placed in the given container. - /// Otherwise, a default container is used. - /// - shared_name: If non-empty, this table is shared under the given name across - /// multiple sessions. - /// - key_dtype: Type of the table keys. - /// - value_dtype: Type of the table values. - /// - value_shape: The shape of each value. - /// - initial_num_buckets: The initial number of hash table buckets. Must be a power - /// to 2. - /// - max_load_factor: The maximum ratio between number of entries and number of - /// buckets before growing the table. Must be between 0 and 1. - /// - /// - Output table_handle: Handle to a table. - @inlinable @inline(__always) - public static func mutableDenseHashTableV2( - emptyKey: Tensor, - deletedKey: Tensor, - container: String, - sharedName: String, - useNodeNameSharing: Bool = false, - valueDtype: TensorDataType, - valueShape: TensorShape?, - initialNumBuckets: Int64 = 131072, - maxLoadFactor: Double = 0.8 - ) -> ResourceHandle { - _RawTFEager.mutableDenseHashTableV2( - emptyKey: emptyKey, deletedKey: deletedKey, container: container, sharedName: sharedName, - useNodeNameSharing: useNodeNameSharing, valueDtype: valueDtype, valueShape: valueShape, - initialNumBuckets: initialNumBuckets, maxLoadFactor: maxLoadFactor) - } - - /// Creates an empty hash table. - /// - /// This op creates a mutable hash table, specifying the type of its keys and - /// values. Each value must be a vector. Data can be inserted into the table using - /// the insert operations. It does not support the initialization operation. - /// - /// - Attrs: - /// - container: If non-empty, this table is placed in the given container. - /// Otherwise, a default container is used. - /// - shared_name: If non-empty, this table is shared under the given name across - /// multiple sessions. - /// - key_dtype: Type of the table keys. - /// - value_dtype: Type of the table values. - /// - /// - Output table_handle: Handle to a table. - @inlinable @inline(__always) - public static func mutableHashTableOfTensorsV2( - container: String, - sharedName: String, - useNodeNameSharing: Bool = false, - keyDtype: TensorDataType, - valueDtype: TensorDataType, - valueShape: TensorShape? - ) -> ResourceHandle { - _RawTFEager.mutableHashTableOfTensorsV2( - container: container, sharedName: sharedName, useNodeNameSharing: useNodeNameSharing, - keyDtype: keyDtype, valueDtype: valueDtype, valueShape: valueShape) - } - - /// Creates an empty hash table. - /// - /// This op creates a mutable hash table, specifying the type of its keys and - /// values. Each value must be a scalar. Data can be inserted into the table using - /// the insert operations. It does not support the initialization operation. - /// - /// - Attrs: - /// - container: If non-empty, this table is placed in the given container. - /// Otherwise, a default container is used. - /// - shared_name: If non-empty, this table is shared under the given name across - /// multiple sessions. - /// - use_node_name_sharing: If true and shared_name is empty, the table is shared - /// using the node name. - /// - key_dtype: Type of the table keys. - /// - value_dtype: Type of the table values. - /// - /// - Output table_handle: Handle to a table. - @inlinable @inline(__always) - public static func mutableHashTableV2( - container: String, - sharedName: String, - useNodeNameSharing: Bool = false, - keyDtype: TensorDataType, - valueDtype: TensorDataType - ) -> ResourceHandle { - _RawTFEager.mutableHashTableV2( - container: container, sharedName: sharedName, useNodeNameSharing: useNodeNameSharing, - keyDtype: keyDtype, valueDtype: valueDtype) - } - - /// Locks a mutex resource. The output is the lock. So long as the lock tensor - /// - /// is alive, any other request to use `MutexLock` with this mutex will wait. - /// - /// This is particularly useful for creating a critical section when used in - /// conjunction with `MutexLockIdentity`: - /// - /// ```python - /// - /// mutex = mutex_v2( - /// shared_name=handle_name, container=container, name=name) - /// - /// def execute_in_critical_section(fn, *args, **kwargs): - /// lock = gen_resource_variable_ops.mutex_lock(mutex) - /// - /// with ops.control_dependencies([lock]): - /// r = fn(*args, **kwargs) - /// - /// with ops.control_dependencies(nest.flatten(r)): - /// with ops.colocate_with(mutex): - /// ensure_lock_exists = mutex_lock_identity(lock) - /// - /// # Make sure that if any element of r is accessed, all of - /// # them are executed together. - /// r = nest.map_structure(tf.identity, r) - /// - /// with ops.control_dependencies([ensure_lock_exists]): - /// return nest.map_structure(tf.identity, r) - /// ``` - /// - /// While `fn` is running in the critical section, no other functions which wish to - /// use this critical section may run. - /// - /// Often the use case is that two executions of the same graph, in parallel, - /// wish to run `fn`; and we wish to ensure that only one of them executes - /// at a time. This is especially important if `fn` modifies one or more - /// variables at a time. - /// - /// It is also useful if two separate functions must share a resource, but we - /// wish to ensure the usage is exclusive. - /// - /// - Parameter mutex: The mutex resource to lock. - /// - /// - Output mutex_lock: A tensor that keeps a shared pointer to a lock on the mutex; - /// when the Tensor is destroyed, the use count on the shared pointer is decreased - /// by 1. When it reaches 0, the lock is released. - @inlinable @inline(__always) - public static func mutexLock( - mutex: ResourceHandle - ) -> VariantHandle { - _RawTFEager.mutexLock(mutex: mutex) - } - - /// Creates a Mutex resource that can be locked by `MutexLock`. - /// - /// - Attrs: - /// - container: If non-empty, this variable is placed in the given container. - /// Otherwise, a default container is used. - /// - shared_name: If non-empty, this variable is named in the given bucket - /// with this shared_name. Otherwise, the node name is used instead. - /// - /// - Output resource: The mutex resource. - @inlinable @inline(__always) - public static func mutexV2( - container: String, - sharedName: String - ) -> ResourceHandle { - _RawTFEager.mutexV2(container: container, sharedName: sharedName) - } - - @inlinable @inline(__always) - public static func nInPolymorphicTwice( - _ a: [Tensor], - _ b: [Tensor] - ) { - _RawTFEager.nInPolymorphicTwice(a, b) + } + + /// Outputs a `Summary` protocol buffer with a tensor. + /// + /// This op is being phased out in favor of TensorSummaryV2, which lets callers pass + /// a tag as well as a serialized SummaryMetadata proto string that contains + /// plugin-specific data. We will keep this op to maintain backwards compatibility. + /// + /// - Parameter tensor: A tensor to serialize. + /// + /// - Attrs: + /// - description: A json-encoded SummaryDescription proto. + /// - labels: An unused list of strings. + /// - display_name: An unused string. + @inlinable @inline(__always) + public static func tensorSummary( + _ tensor: Tensor, + description: String, + labels: [String], + displayName: String + ) -> StringTensor { + _RawTFEager.tensorSummary( + tensor, description: description, labels: labels, displayName: displayName) + } + + /// Outputs a `Summary` protocol buffer with a tensor and per-plugin data. + /// + /// - Parameters: + /// - tag: A string attached to this summary. Used for organization in TensorBoard. + /// - tensor: A tensor to serialize. + /// - serialized_summary_metadata: A serialized SummaryMetadata proto. Contains plugin + /// data. + @inlinable @inline(__always) + public static func tensorSummaryV2( + tag: StringTensor, + _ tensor: Tensor, + serializedSummaryMetadata: StringTensor + ) -> StringTensor { + _RawTFEager.tensorSummaryV2( + tag: tag, tensor, serializedSummaryMetadata: serializedSummaryMetadata) + } + + @inlinable @inline(__always) + public static func testAttr() -> Tensor { + _RawTFEager.testAttr() + } + + @inlinable @inline(__always) + public static func testStringOutput( + _ input: Tensor + ) -> (output1: Tensor, output2: StringTensor) { + _RawTFEager.testStringOutput(input) + } + + /// Creates a dataset that emits the lines of one or more text files. + /// + /// - Parameters: + /// - filenames: A scalar or a vector containing the name(s) of the file(s) to be + /// read. + /// - compression_type: A scalar containing either (i) the empty string (no + /// compression), (ii) "ZLIB", or (iii) "GZIP". + /// - buffer_size: A scalar containing the number of bytes to buffer. + @inlinable @inline(__always) + public static func textLineDataset( + filenames: StringTensor, + compressionType: StringTensor, + bufferSize: Tensor + ) -> VariantHandle { + _RawTFEager.textLineDataset( + filenames: filenames, compressionType: compressionType, bufferSize: bufferSize) + } + + /// A Reader that outputs the lines of a file delimited by '\n'. + /// + /// - Attrs: + /// - skip_header_lines: Number of lines to skip from the beginning of every file. + /// - container: If non-empty, this reader is placed in the given container. + /// Otherwise, a default container is used. + /// - shared_name: If non-empty, this reader is named in the given bucket + /// with this shared_name. Otherwise, the node name is used instead. + /// + /// - Output reader_handle: The handle to reference the Reader. + @inlinable @inline(__always) + public static func textLineReaderV2( + skipHeaderLines: Int64 = 0, + container: String, + sharedName: String + ) -> ResourceHandle { + _RawTFEager.textLineReaderV2( + skipHeaderLines: skipHeaderLines, container: container, sharedName: sharedName) + } + + /// Creates a dataset that uses a custom thread pool to compute `input_dataset`. + /// + /// - Parameter thread_pool: A resource produced by the ThreadPoolHandle op. + @inlinable @inline(__always) + public static func threadPoolDataset( + inputDataset: VariantHandle, + threadPool: ResourceHandle, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.threadPoolDataset( + inputDataset: inputDataset, threadPool: threadPool, outputTypes: outputTypes, + outputShapes: outputShapes) + } + + /// Creates a dataset that uses a custom thread pool to compute `input_dataset`. + /// + /// - Attrs: + /// - num_threads: The number of threads in the thread pool. + /// - max_intra_op_parallelism: The maximum degree of parallelism to use within operations that execute on this + /// threadpool. + /// - display_name: A human-readable name for the threads that may be visible in some + /// visualizations. + /// threadpool. + /// + /// - Output handle: A resource that can be consumed by one or more ExperimentalThreadPoolDataset + /// ops. + @inlinable @inline(__always) + public static func threadPoolHandle( + numThreads: Int64, + maxIntraOpParallelism: Int64 = 1, + displayName: String, + container: String, + sharedName: String + ) -> ResourceHandle { + _RawTFEager.threadPoolHandle( + numThreads: numThreads, maxIntraOpParallelism: maxIntraOpParallelism, + displayName: displayName, container: container, sharedName: sharedName) + } + + /// Generates labels for candidate sampling with a learned unigram distribution. + /// + /// See explanations of candidate sampling and the data formats at + /// go/candidate-sampling. + /// + /// For each batch, this op picks a single set of sampled candidate labels. + /// + /// The advantages of sampling candidates per-batch are simplicity and the + /// possibility of efficient dense matrix multiplication. The disadvantage is that + /// the sampled candidates must be chosen independently of the context and of the + /// true labels. + /// + /// - Parameter true_classes: A batch_size * num_true matrix, in which each row contains the + /// IDs of the num_true target_classes in the corresponding original label. + /// + /// - Attrs: + /// - num_true: Number of true labels per context. + /// - num_sampled: Number of candidates to randomly sample. + /// - unique: If unique is true, we sample with rejection, so that all sampled + /// candidates in a batch are unique. This requires some approximation to + /// estimate the post-rejection sampling probabilities. + /// - range_max: The sampler will sample integers from the interval [0, range_max). + /// - seed: If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// - seed2: An second seed to avoid seed collision. + /// + /// - Outputs: + /// - sampled_candidates: A vector of length num_sampled, in which each element is + /// the ID of a sampled candidate. + /// - true_expected_count: A batch_size * num_true matrix, representing + /// the number of times each candidate is expected to occur in a batch + /// of sampled candidates. If unique=true, then this is a probability. + /// - sampled_expected_count: A vector of length num_sampled, for each sampled + /// candidate representing the number of times the candidate is expected + /// to occur in a batch of sampled candidates. If unique=true, then this is a + /// probability. + @inlinable @inline(__always) + public static func threadUnsafeUnigramCandidateSampler( + trueClasses: Tensor, + numTrue: Int64, + numSampled: Int64, + unique: Bool, + rangeMax: Int64, + seed: Int64 = 0, + seed2: Int64 = 0 + ) -> ( + sampledCandidates: Tensor, trueExpectedCount: Tensor, + sampledExpectedCount: Tensor + ) { + _RawTFEager.threadUnsafeUnigramCandidateSampler( + trueClasses: trueClasses, numTrue: numTrue, numSampled: numSampled, unique: unique, + rangeMax: rangeMax, seed: seed, seed2: seed2) + } + + /// Constructs a tensor by tiling a given tensor. + /// + /// This operation creates a new tensor by replicating `input` `multiples` times. + /// The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements, + /// and the values of `input` are replicated `multiples[i]` times along the 'i'th + /// dimension. For example, tiling `[a b c d]` by `[2]` produces + /// `[a b c d a b c d]`. + /// + /// >>> a = tf.constant([[1,2,3],[4,5,6]], tf.int32) + /// >>> b = tf.constant([1,2], tf.int32) + /// >>> tf.tile(a, b) + /// + /// >>> c = tf.constant([2,1], tf.int32) + /// >>> tf.tile(a, c) + /// + /// >>> d = tf.constant([2,2], tf.int32) + /// >>> tf.tile(a, d) + /// + /// + /// - Parameters: + /// - input: 1-D or higher. + /// - multiples: 1-D. Length must be the same as the number of dimensions in `input` + @inlinable @inline(__always) + public static func tile< + T: TensorFlowScalar, + Tmultiples: TensorFlowIndex + >( + _ input: Tensor, + multiples: Tensor + ) -> Tensor { + switch commonBackend(input.handle.backend, multiples.handle.backend) { + case .XLA: + return _RawXLA.tile(input, multiples: multiples) + case .TF_EAGER: + return _RawTFEager.tile(input, multiples: multiples) } - @inlinable @inline(__always) - public static func nInTwice( - _ a: [Tensor], - _ b: [StringTensor] - ) { - _RawTFEager.nInTwice(a, b) + } + + /// Returns the gradient of `Tile`. + /// + /// Since `Tile` takes an input and repeats the input `multiples` times + /// along each dimension, `TileGrad` takes in `multiples` and aggregates + /// each repeated tile of `input` into `output`. + @inlinable @inline(__always) + public static func tileGrad( + _ input: Tensor, + multiples: Tensor + ) -> Tensor { + switch commonBackend(input.handle.backend, multiples.handle.backend) { + case .XLA: + let output_device = multiples.device + let input = Tensor(copying: input, to: .defaultTFEager) + let multiples = Tensor(copying: multiples, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.tileGrad(input, multiples: multiples), to: output_device) + case .TF_EAGER: + return _RawTFEager.tileGrad(input, multiples: multiples) } - @inlinable @inline(__always) - public static func nInTwoTypeVariables< - S: TensorFlowScalar, - T: TensorFlowScalar - >( - _ a: [Tensor], - _ b: [Tensor] - ) { - _RawTFEager.nInTwoTypeVariables(a, b) + } + + /// Provides the time since epoch in seconds. + /// + /// Returns the timestamp as a `float64` for seconds since the Unix epoch. + /// + /// Note: the timestamp is computed when the op is executed, not when it is added + /// to the graph. + @inlinable @inline(__always) + public static func timestamp() -> Tensor { + _RawTFEager.timestamp() + } + + /// Finds values and indices of the `k` largest elements for the last dimension. + /// + /// If the input is a vector (rank-1), finds the `k` largest entries in the vector + /// and outputs their values and indices as vectors. Thus `values[j]` is the + /// `j`-th largest entry in `input`, and its index is `indices[j]`. + /// + /// For matrices (resp. higher rank input), computes the top `k` entries in each + /// row (resp. vector along the last dimension). Thus, + /// + /// values.shape = indices.shape = input.shape[:-1] + [k] + /// + /// If two elements are equal, the lower-index element appears first. + /// + /// If `k` varies dynamically, use `TopKV2` below. + /// + /// - Parameter input: 1-D or higher with last dimension at least `k`. + /// + /// - Attrs: + /// - k: Number of top elements to look for along the last dimension (along each + /// row for matrices). + /// - sorted: If true the resulting `k` elements will be sorted by the values in + /// descending order. + /// + /// - Outputs: + /// - values: The `k` largest elements along each last dimensional slice. + /// - indices: The indices of `values` within the last dimension of `input`. + @inlinable @inline(__always) + public static func topK( + _ input: Tensor, + k: Int64, + sorted: Bool = true + ) -> (values: Tensor, indices: Tensor) { + _RawTFEager.topK(input, k: k, sorted: sorted) + } + + /// Finds values and indices of the `k` largest elements for the last dimension. + /// + /// If the input is a vector (rank-1), finds the `k` largest entries in the vector + /// and outputs their values and indices as vectors. Thus `values[j]` is the + /// `j`-th largest entry in `input`, and its index is `indices[j]`. + /// + /// For matrices (resp. higher rank input), computes the top `k` entries in each + /// row (resp. vector along the last dimension). Thus, + /// + /// values.shape = indices.shape = input.shape[:-1] + [k] + /// + /// If two elements are equal, the lower-index element appears first. + /// + /// - Parameters: + /// - input: 1-D or higher with last dimension at least `k`. + /// - k: 0-D. Number of top elements to look for along the last dimension (along each + /// row for matrices). + /// + /// - Attr sorted: If true the resulting `k` elements will be sorted by the values in + /// descending order. + /// + /// - Outputs: + /// - values: The `k` largest elements along each last dimensional slice. + /// - indices: The indices of `values` within the last dimension of `input`. + @inlinable @inline(__always) + public static func topKV2( + _ input: Tensor, + k: Tensor, + sorted: Bool = true + ) -> (values: Tensor, indices: Tensor) { + _RawTFEager.topKV2(input, k: k, sorted: sorted) + } + + /// Shuffle dimensions of x according to a permutation. + /// + /// The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy: + /// `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]` + @inlinable @inline(__always) + public static func transpose< + T: TensorFlowScalar, + Tperm: TensorFlowIndex + >( + _ x: Tensor, + perm: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, perm.handle.backend) { + case .XLA: + return _RawXLA.transpose(x, perm: perm) + case .TF_EAGER: + return _RawTFEager.transpose(x, perm: perm) } - @inlinable @inline(__always) - public static func nIntsIn( - _ a: [Tensor] - ) { - _RawTFEager.nIntsIn(a) + } + + /// Calculate product with tridiagonal matrix. + /// + /// Calculates product of two matrices, where left matrix is a tridiagonal matrix. + /// + /// - Parameters: + /// - superdiag: Tensor of shape `[..., 1, M]`, representing superdiagonals of + /// tri-diagonal matrices to the left of multiplication. Last element is ignored. + /// - maindiag: Tensor of shape `[..., 1, M]`, representing main diagonals of tri-diagonal + /// matrices to the left of multiplication. + /// - subdiag: Tensor of shape `[..., 1, M]`, representing subdiagonals of tri-diagonal + /// matrices to the left of multiplication. First element is ignored. + /// - rhs: Tensor of shape `[..., M, N]`, representing MxN matrices to the right of + /// multiplication. + /// + /// - Output output: Tensor of shape `[..., M, N]` containing the product. + @inlinable @inline(__always) + public static func tridiagonalMatMul( + superdiag: Tensor, + maindiag: Tensor, + subdiag: Tensor, + rhs: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(superdiag.handle.backend, maindiag.handle.backend), subdiag.handle.backend), + rhs.handle.backend) + { + case .XLA: + let output_device = rhs.device + let superdiag = Tensor(copying: superdiag, to: .defaultTFEager) + let maindiag = Tensor(copying: maindiag, to: .defaultTFEager) + let subdiag = Tensor(copying: subdiag, to: .defaultTFEager) + let rhs = Tensor(copying: rhs, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.tridiagonalMatMul( + superdiag: superdiag, maindiag: maindiag, subdiag: subdiag, rhs: rhs), to: output_device + ) + case .TF_EAGER: + return _RawTFEager.tridiagonalMatMul( + superdiag: superdiag, maindiag: maindiag, subdiag: subdiag, rhs: rhs) } - @inlinable @inline(__always) - public static func nIntsOut( - n: Int64 - ) -> [Tensor] { - _RawTFEager.nIntsOut(n: n) + } + + /// Solves tridiagonal systems of equations. + /// + /// Solves tridiagonal systems of equations. + /// Supports batch dimensions and multiple right-hand sides per each left-hand + /// side. + /// On CPU, solution is computed via Gaussian elimination with or without partial + /// pivoting, depending on `partial_pivoting` attribute. On GPU, Nvidia's cuSPARSE + /// library is used: https://docs.nvidia.com/cuda/cusparse/index.html#gtsv + /// + /// - Parameters: + /// - diagonals: Tensor of shape `[..., 3, M]` whose innermost 2 dimensions represent the + /// tridiagonal matrices with three rows being the superdiagonal, diagonals, and + /// subdiagonals, in order. The last element of the superdiagonal and the first + /// element of the subdiagonal is ignored. + /// - rhs: Tensor of shape `[..., M, K]`, representing K right-hand sides per each + /// left-hand side. + /// + /// - Attr partial_pivoting: Whether to apply partial pivoting. Partial pivoting makes the procedure more + /// stable, but slower. + /// + /// - Output output: Tensor of shape `[..., M, K]` containing the solutions + @inlinable @inline(__always) + public static func tridiagonalSolve( + diagonals: Tensor, + rhs: Tensor, + partialPivoting: Bool = true + ) -> Tensor { + switch commonBackend(diagonals.handle.backend, rhs.handle.backend) { + case .XLA: + let output_device = rhs.device + let diagonals = Tensor(copying: diagonals, to: .defaultTFEager) + let rhs = Tensor(copying: rhs, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.tridiagonalSolve( + diagonals: diagonals, rhs: rhs, partialPivoting: partialPivoting), to: output_device) + case .TF_EAGER: + return _RawTFEager.tridiagonalSolve( + diagonals: diagonals, rhs: rhs, partialPivoting: partialPivoting) } - @inlinable @inline(__always) - public static func nIntsOutDefault( - n: Int64 = 3 - ) -> [Tensor] { - _RawTFEager.nIntsOutDefault(n: n) + } + + /// Returns x / y element-wise for integer types. + /// + /// Truncation designates that negative numbers will round fractional quantities + /// toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different + /// than Python semantics. See `FloorDiv` for a division function that matches + /// Python Semantics. + /// + /// *NOTE*: `TruncateDiv` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + @inlinable @inline(__always) + public static func truncateDiv( + _ x: Tensor, + _ y: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + let output_device = y.device + let x = Tensor(copying: x, to: .defaultTFEager) + let y = Tensor(copying: y, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.truncateDiv(x, y), to: output_device) + case .TF_EAGER: + return _RawTFEager.truncateDiv(x, y) } - @inlinable @inline(__always) - public static func nPolymorphicIn( - _ a: [Tensor] - ) { - _RawTFEager.nPolymorphicIn(a) + } + + /// Returns element-wise remainder of division. This emulates C semantics in that + /// + /// the result here is consistent with a truncating divide. E.g. `truncate(x / y) * + /// y + truncate_mod(x, y) = x`. + /// + /// *NOTE*: `TruncateMod` supports broadcasting. More about broadcasting + /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + @inlinable @inline(__always) + public static func truncateMod( + _ x: Tensor, + _ y: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + let output_device = y.device + let x = Tensor(copying: x, to: .defaultTFEager) + let y = Tensor(copying: y, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.truncateMod(x, y), to: output_device) + case .TF_EAGER: + return _RawTFEager.truncateMod(x, y) } - @inlinable @inline(__always) - public static func nPolymorphicOut( - n: Int64 - ) -> [Tensor] { - _RawTFEager.nPolymorphicOut(n: n) + } + + /// Outputs random values from a truncated normal distribution. + /// + /// The generated values follow a normal distribution with mean 0 and standard + /// deviation 1, except that values whose magnitude is more than 2 standard + /// deviations from the mean are dropped and re-picked. + /// + /// - Parameter shape: The shape of the output tensor. + /// + /// - Attrs: + /// - seed: If either `seed` or `seed2` are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// - seed2: A second seed to avoid seed collision. + /// - dtype: The type of the output. + /// + /// - Output output: A tensor of the specified shape filled with random truncated normal + /// values. + @inlinable @inline(__always) + public static func truncatedNormal< + Dtype: FloatingPoint & TensorFlowScalar, + T: TensorFlowIndex + >( + shape: Tensor, + seed: Int64 = 0, + seed2: Int64 = 0 + ) -> Tensor { + switch shape.handle.backend { + case .XLA: + let output_device = shape.device + let shape = Tensor(copying: shape, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.truncatedNormal(shape: shape, seed: seed, seed2: seed2), + to: output_device) + case .TF_EAGER: + return _RawTFEager.truncatedNormal(shape: shape, seed: seed, seed2: seed2) } - @inlinable @inline(__always) - public static func nPolymorphicOutDefault( - n: Int64 = 2 - ) -> [Tensor] { - _RawTFEager.nPolymorphicOutDefault(n: n) + } + + /// Perform batches of RPC requests. + /// + /// This op asynchronously performs either a single RPC request, or a batch + /// of requests. RPC requests are defined by three main parameters: + /// + /// - `address` (the host+port or BNS address of the request) + /// - `method` (the method name for the request) + /// - `request` (the serialized proto string, or vector of strings, + /// of the RPC request argument). + /// + /// For example, if you have an RPC service running on port localhost:2345, + /// and its interface is configured with the following proto declaration: + /// + /// ``` + /// service MyService { + /// rpc MyMethod(MyRequestProto) returns (MyResponseProto) { + /// } + /// }; + /// ``` + /// + /// then call this op with arguments: + /// + /// ``` + /// address = "localhost:2345" + /// method = "MyService/MyMethod" + /// ``` + /// + /// The `request` tensor is a string tensor representing serialized `MyRequestProto` + /// strings; and the output string tensor `response` will have the same shape + /// and contain (upon successful completion) corresponding serialized + /// `MyResponseProto` strings. + /// + /// For example, to send a single, empty, `MyRequestProto`, call + /// this op with `request = ""`. To send 5 **parallel** empty requests, + /// call this op with `request = ["", "", "", "", ""]`. + /// + /// More generally, one can create a batch of `MyRequestProto` serialized protos + /// from regular batched tensors using the `encode_proto` op, and convert + /// the response `MyResponseProto` serialized protos to batched tensors + /// using the `decode_proto` op. + /// + /// **NOTE** Working with serialized proto strings is faster than instantiating + /// actual proto objects in memory, so no performance degradation is expected + /// compared to writing custom kernels for this workflow. + /// + /// Unlike the standard `Rpc` op, if the connection fails or the remote worker + /// returns an error status, this op does **not** reraise the exception. + /// Instead, the `status_code` and `status_message` entry for the corresponding RPC + /// call is set with the error returned from the RPC call. The `response` tensor + /// will contain valid response values for those minibatch entries whose RPCs did + /// not fail; the rest of the entries will have empty strings. + /// + /// - Parameters: + /// - address: `0-D` or `1-D`. The address (i.e. host_name:port) of the RPC server. + /// If this tensor has more than 1 element, then multiple parallel rpc requests + /// are sent. This argument broadcasts with `method` and `request`. + /// - method: `0-D` or `1-D`. The method address on the RPC server. + /// If this tensor has more than 1 element, then multiple parallel rpc requests + /// are sent. This argument broadcasts with `address` and `request`. + /// - request: `0-D` or `1-D`. Serialized proto strings: the rpc request argument. + /// If this tensor has more than 1 element, then multiple parallel rpc requests + /// are sent. This argument broadcasts with `address` and `method`. + /// + /// - Attrs: + /// - protocol: RPC protocol to use. Empty string means use the default protocol. + /// Options include 'grpc'. + /// - fail_fast: `boolean`. If `true` (default), then failures to connect + /// (i.e., the server does not immediately respond) cause an RPC failure. + /// - timeout_in_ms: `int`. If `0` (default), then the kernel will run the RPC + /// request and only time out if the RPC deadline passes or the session times out. + /// If this value is greater than `0`, then the op will raise an exception if + /// the RPC takes longer than `timeout_in_ms`. + /// + /// - Outputs: + /// - response: Same shape as `request`. Serialized proto strings: the rpc responses. + /// - status_code: Same shape as `request`. Values correspond to tensorflow Status enum codes. + /// - status_message: Same shape as `request`. Values correspond to Status messages + /// returned from the RPC calls. + @inlinable @inline(__always) + public static func tryRpc( + address: StringTensor, + method: StringTensor, + request: StringTensor, + protocol_: String, + failFast: Bool = true, + timeoutInMs: Int64 = 0 + ) -> (response: StringTensor, statusCode: Tensor, statusMessage: StringTensor) { + _RawTFEager.tryRpc( + address: address, method: method, request: request, protocol_: protocol_, + failFast: failFast, timeoutInMs: timeoutInMs) + } + + @inlinable @inline(__always) + public static func twoFloatInputs( + _ a: Tensor, + _ b: Tensor + ) { + _RawTFEager.twoFloatInputs(a, b) + } + + @inlinable @inline(__always) + public static func twoFloatInputsFloatOutput( + _ a: Tensor, + _ b: Tensor + ) -> Tensor { + switch commonBackend(a.handle.backend, b.handle.backend) { + case .XLA: + let output_device = b.device + let a = Tensor(copying: a, to: .defaultTFEager) + let b = Tensor(copying: b, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.twoFloatInputsFloatOutput(a, b), to: output_device) + case .TF_EAGER: + return _RawTFEager.twoFloatInputsFloatOutput(a, b) } - @inlinable @inline(__always) - public static func nPolymorphicRestrictIn( - _ a: [Tensor] - ) { - _RawTFEager.nPolymorphicRestrictIn(a) + } + + @inlinable @inline(__always) + public static func twoFloatInputsIntOutput( + _ a: Tensor, + _ b: Tensor + ) -> Tensor { + switch commonBackend(a.handle.backend, b.handle.backend) { + case .XLA: + let output_device = b.device + let a = Tensor(copying: a, to: .defaultTFEager) + let b = Tensor(copying: b, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.twoFloatInputsIntOutput(a, b), to: output_device) + case .TF_EAGER: + return _RawTFEager.twoFloatInputsIntOutput(a, b) } - @inlinable @inline(__always) - public static func nPolymorphicRestrictIn( - _ a: [StringTensor] - ) { - _RawTFEager.nPolymorphicRestrictIn(a) - } - - @inlinable @inline(__always) - public static func nPolymorphicRestrictOut( - n: Int64 - ) -> [Tensor] { - _RawTFEager.nPolymorphicRestrictOut(n: n) - } - - @inlinable @inline(__always) - public static func nPolymorphicRestrictOut( - n: Int64 - ) -> [StringTensor] { - _RawTFEager.nPolymorphicRestrictOut(n: n) - } - - @inlinable @inline(__always) - public static func namespaceTestStringOutput( - _ input: Tensor - ) -> (output1: Tensor, output2: StringTensor) { - _RawTFEager.namespaceTestStringOutput(input) - } - - /// Outputs a tensor containing the reduction across all input tensors. - /// - /// Outputs a tensor containing the reduction across all input tensors passed to ops - /// within the same `shared_name. - /// - /// The graph should be constructed so if one op runs with shared_name value `c`, - /// then `num_devices` ops will run with shared_name value `c`. Failure to do so - /// will cause the graph execution to fail to complete. - /// - /// input: the input to the reduction - /// data: the value of the reduction across all `num_devices` devices. - /// reduction: the reduction operation to perform. - /// num_devices: The number of devices participating in this reduction. - /// shared_name: Identifier that shared between ops of the same reduction. - @inlinable @inline(__always) - public static func ncclAllReduce( - _ input: Tensor, - reduction: Reduction, - numDevices: Int64, - sharedName: String - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.ncclAllReduce( - input, reduction: reduction, numDevices: numDevices, sharedName: sharedName), - to: output_device) - case .TF_EAGER: - return _RawTFEager.ncclAllReduce( - input, reduction: reduction, numDevices: numDevices, sharedName: sharedName) - } - - } - - /// Sends `input` to all devices that are connected to the output. - /// - /// Sends `input` to all devices that are connected to the output. - /// - /// The graph should be constructed so that all ops connected to the output have a - /// valid device assignment, and the op itself is assigned one of these devices. - /// - /// input: The input to the broadcast. - /// output: The same as input. - /// shape: The shape of the input tensor. - /// - @inlinable @inline(__always) - public static func ncclBroadcast( - _ input: Tensor, - shape: TensorShape? - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.ncclBroadcast(input, shape: shape), to: output_device) - case .TF_EAGER: - return _RawTFEager.ncclBroadcast(input, shape: shape) - } - - } - - /// Reduces `input` from `num_devices` using `reduction` to a single device. - /// - /// Reduces `input` from `num_devices` using `reduction` to a single device. - /// - /// The graph should be constructed so that all inputs have a valid device - /// assignment, and the op itself is assigned one of these devices. - /// - /// input: The input to the reduction. - /// data: the value of the reduction across all `num_devices` devices. - /// reduction: the reduction operation to perform. - @inlinable @inline(__always) - public static func ncclReduce( - _ input: [Tensor], - reduction: Reduction - ) -> Tensor { - _RawTFEager.ncclReduce(input, reduction: reduction) - } - - @inlinable @inline(__always) - public static func ndtri( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - let output_device = x.device - let x = Tensor(copying: x, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.ndtri(x), to: output_device) - case .TF_EAGER: - return _RawTFEager.ndtri(x) - } - - } - - /// Selects the k nearest centers for each point. - /// - /// Rows of points are assumed to be input points. Rows of centers are assumed to be - /// the list of candidate centers. For each point, the k centers that have least L2 - /// distance to it are computed. - /// - /// - Parameters: - /// - points: Matrix of shape (n, d). Rows are assumed to be input points. - /// - centers: Matrix of shape (m, d). Rows are assumed to be centers. - /// - k: Number of nearest centers to return for each point. If k is larger than m, then - /// only m centers are returned. - /// - /// - Outputs: - /// - nearest_center_indices: Matrix of shape (n, min(m, k)). Each row contains the indices of the centers - /// closest to the corresponding point, ordered by increasing distance. - /// - nearest_center_distances: Matrix of shape (n, min(m, k)). Each row contains the squared L2 distance to the - /// corresponding center in nearest_center_indices. - @inlinable @inline(__always) - public static func nearestNeighbors( - points: Tensor, - centers: Tensor, - k: Tensor - ) -> (nearestCenterIndices: Tensor, nearestCenterDistances: Tensor) { - _RawTFEager.nearestNeighbors(points: points, centers: centers, k: k) - } - - /// Computes numerical negative value element-wise. - /// - /// I.e., \\(y = -x\\). - @inlinable @inline(__always) - public static func neg( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.neg(x) - case .TF_EAGER: - return _RawTFEager.neg(x) - } - - } - - /// Returns the next representable value of `x1` in the direction of `x2`, element-wise. - /// - /// This operation returns the same result as the C++ std::nextafter function. - /// - /// It can also return a subnormal number. - /// - /// @compatibility(cpp) - /// Equivalent to C++ std::nextafter function. - /// @end_compatibility - @inlinable @inline(__always) - public static func nextAfter( - x1: Tensor, - x2: Tensor - ) -> Tensor { - switch commonBackend(x1.handle.backend, x2.handle.backend) { - case .XLA: - let output_device = x2.device - let x1 = Tensor(copying: x1, to: .defaultTFEager) - let x2 = Tensor(copying: x2, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.nextAfter(x1: x1, x2: x2), to: output_device) - case .TF_EAGER: - return _RawTFEager.nextAfter(x1: x1, x2: x2) - } - - } - - /// Makes its input available to the next iteration. - /// - /// - Parameter data: The tensor to be made available to the next iteration. - /// - /// - Output output: The same tensor as `data`. - @inlinable @inline(__always) - public static func nextIteration( - data: Tensor - ) -> Tensor { - switch data.handle.backend { - case .XLA: - let output_device = data.device - let data = Tensor(copying: data, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.nextIteration(data: data), to: output_device) - case .TF_EAGER: - return _RawTFEager.nextIteration(data: data) - } - - } - - /// Does nothing. Only useful as a placeholder for control edges. - @inlinable @inline(__always) - public static func noOp() { - _RawTFEager.noOp() - } - - /// Non-deterministically generates some integers. - /// - /// This op may use some OS-provided source of non-determinism (e.g. an RNG), so each execution will give different results. - /// - /// - Parameter shape: The shape of the output tensor. - /// - /// - Attr dtype: The type of the output. - /// - /// - Output output: Non-deterministic integer values with specified shape. - @inlinable @inline(__always) - public static func nonDeterministicInts< - Dtype: TensorFlowScalar, - ShapeDtype: TensorFlowScalar - >( - shape: Tensor - ) -> Tensor { - switch shape.handle.backend { - case .XLA: - let output_device = shape.device - let shape = Tensor(copying: shape, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.nonDeterministicInts(shape: shape), to: output_device) - case .TF_EAGER: - return _RawTFEager.nonDeterministicInts(shape: shape) - } - - } - - /// Greedily selects a subset of bounding boxes in descending order of score, - /// - /// pruning away boxes that have high intersection-over-union (IOU) overlap - /// with previously selected boxes. Bounding boxes are supplied as - /// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any - /// diagonal pair of box corners and the coordinates can be provided as normalized - /// (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm - /// is agnostic to where the origin is in the coordinate system. Note that this - /// algorithm is invariant to orthogonal transformations and translations - /// of the coordinate system; thus translating or reflections of the coordinate - /// system result in the same boxes being selected by the algorithm. - /// The output of this operation is a set of integers indexing into the input - /// collection of bounding boxes representing the selected boxes. The bounding - /// box coordinates corresponding to the selected indices can then be obtained - /// using the `tf.gather operation`. For example: - /// selected_indices = tf.image.non_max_suppression( - /// boxes, scores, max_output_size, iou_threshold) - /// selected_boxes = tf.gather(boxes, selected_indices) - /// - /// - Parameters: - /// - boxes: A 2-D float tensor of shape `[num_boxes, 4]`. - /// - scores: A 1-D float tensor of shape `[num_boxes]` representing a single - /// score corresponding to each box (each row of boxes). - /// - max_output_size: A scalar integer tensor representing the maximum number of - /// boxes to be selected by non max suppression. - /// - /// - Attr iou_threshold: A float representing the threshold for deciding whether boxes - /// overlap too much with respect to IOU. - /// - /// - Output selected_indices: A 1-D integer tensor of shape `[M]` representing the selected - /// indices from the boxes tensor, where `M <= max_output_size`. - @inlinable @inline(__always) - public static func nonMaxSuppression( - boxes: Tensor, - scores: Tensor, - maxOutputSize: Tensor, - iouThreshold: Double = 0.5 - ) -> Tensor { - switch commonBackend( - commonBackend(boxes.handle.backend, scores.handle.backend), maxOutputSize.handle.backend) - { - case .XLA: - let output_device = maxOutputSize.device - let boxes = Tensor(copying: boxes, to: .defaultTFEager) - let scores = Tensor(copying: scores, to: .defaultTFEager) - let maxOutputSize = Tensor(copying: maxOutputSize, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.nonMaxSuppression( - boxes: boxes, scores: scores, maxOutputSize: maxOutputSize, iouThreshold: iouThreshold), - to: output_device) - case .TF_EAGER: - return _RawTFEager.nonMaxSuppression( - boxes: boxes, scores: scores, maxOutputSize: maxOutputSize, iouThreshold: iouThreshold) - } - - } - - /// Greedily selects a subset of bounding boxes in descending order of score, - /// - /// pruning away boxes that have high intersection-over-union (IOU) overlap - /// with previously selected boxes. Bounding boxes are supplied as - /// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any - /// diagonal pair of box corners and the coordinates can be provided as normalized - /// (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm - /// is agnostic to where the origin is in the coordinate system. Note that this - /// algorithm is invariant to orthogonal transformations and translations - /// of the coordinate system; thus translating or reflections of the coordinate - /// system result in the same boxes being selected by the algorithm. - /// - /// The output of this operation is a set of integers indexing into the input - /// collection of bounding boxes representing the selected boxes. The bounding - /// box coordinates corresponding to the selected indices can then be obtained - /// using the `tf.gather operation`. For example: - /// - /// selected_indices = tf.image.non_max_suppression_v2( - /// boxes, scores, max_output_size, iou_threshold) - /// selected_boxes = tf.gather(boxes, selected_indices) - /// - /// - Parameters: - /// - boxes: A 2-D float tensor of shape `[num_boxes, 4]`. - /// - scores: A 1-D float tensor of shape `[num_boxes]` representing a single - /// score corresponding to each box (each row of boxes). - /// - max_output_size: A scalar integer tensor representing the maximum number of - /// boxes to be selected by non max suppression. - /// - iou_threshold: A 0-D float tensor representing the threshold for deciding whether - /// boxes overlap too much with respect to IOU. - /// - /// - Output selected_indices: A 1-D integer tensor of shape `[M]` representing the selected - /// indices from the boxes tensor, where `M <= max_output_size`. - @inlinable @inline(__always) - public static func nonMaxSuppressionV2< - T: FloatingPoint & TensorFlowScalar, - TThreshold: FloatingPoint & TensorFlowScalar - >( - boxes: Tensor, - scores: Tensor, - maxOutputSize: Tensor, - iouThreshold: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(boxes.handle.backend, scores.handle.backend), maxOutputSize.handle.backend), - iouThreshold.handle.backend) - { - case .XLA: - let output_device = iouThreshold.device - let boxes = Tensor(copying: boxes, to: .defaultTFEager) - let scores = Tensor(copying: scores, to: .defaultTFEager) - let maxOutputSize = Tensor(copying: maxOutputSize, to: .defaultTFEager) - let iouThreshold = Tensor(copying: iouThreshold, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.nonMaxSuppressionV2( - boxes: boxes, scores: scores, maxOutputSize: maxOutputSize, iouThreshold: iouThreshold), - to: output_device) - case .TF_EAGER: - return _RawTFEager.nonMaxSuppressionV2( - boxes: boxes, scores: scores, maxOutputSize: maxOutputSize, iouThreshold: iouThreshold) - } - - } - - /// Greedily selects a subset of bounding boxes in descending order of score, - /// - /// pruning away boxes that have high intersection-over-union (IOU) overlap - /// with previously selected boxes. Bounding boxes with score less than - /// `score_threshold` are removed. Bounding boxes are supplied as - /// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any - /// diagonal pair of box corners and the coordinates can be provided as normalized - /// (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm - /// is agnostic to where the origin is in the coordinate system and more - /// generally is invariant to orthogonal transformations and translations - /// of the coordinate system; thus translating or reflections of the coordinate - /// system result in the same boxes being selected by the algorithm. - /// The output of this operation is a set of integers indexing into the input - /// collection of bounding boxes representing the selected boxes. The bounding - /// box coordinates corresponding to the selected indices can then be obtained - /// using the `tf.gather operation`. For example: - /// selected_indices = tf.image.non_max_suppression_v2( - /// boxes, scores, max_output_size, iou_threshold, score_threshold) - /// selected_boxes = tf.gather(boxes, selected_indices) - /// - /// - Parameters: - /// - boxes: A 2-D float tensor of shape `[num_boxes, 4]`. - /// - scores: A 1-D float tensor of shape `[num_boxes]` representing a single - /// score corresponding to each box (each row of boxes). - /// - max_output_size: A scalar integer tensor representing the maximum number of - /// boxes to be selected by non max suppression. - /// - iou_threshold: A 0-D float tensor representing the threshold for deciding whether - /// boxes overlap too much with respect to IOU. - /// - score_threshold: A 0-D float tensor representing the threshold for deciding when to remove - /// boxes based on score. - /// - /// - Output selected_indices: A 1-D integer tensor of shape `[M]` representing the selected - /// indices from the boxes tensor, where `M <= max_output_size`. - @inlinable @inline(__always) - public static func nonMaxSuppressionV3< - T: FloatingPoint & TensorFlowScalar, - TThreshold: FloatingPoint & TensorFlowScalar - >( - boxes: Tensor, - scores: Tensor, - maxOutputSize: Tensor, - iouThreshold: Tensor, - scoreThreshold: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend( - commonBackend(boxes.handle.backend, scores.handle.backend), maxOutputSize.handle.backend - ), iouThreshold.handle.backend), scoreThreshold.handle.backend) - { - case .XLA: - let output_device = scoreThreshold.device - let boxes = Tensor(copying: boxes, to: .defaultTFEager) - let scores = Tensor(copying: scores, to: .defaultTFEager) - let maxOutputSize = Tensor(copying: maxOutputSize, to: .defaultTFEager) - let iouThreshold = Tensor(copying: iouThreshold, to: .defaultTFEager) - let scoreThreshold = Tensor(copying: scoreThreshold, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.nonMaxSuppressionV3( - boxes: boxes, scores: scores, maxOutputSize: maxOutputSize, iouThreshold: iouThreshold, - scoreThreshold: scoreThreshold), to: output_device) - case .TF_EAGER: - return _RawTFEager.nonMaxSuppressionV3( - boxes: boxes, scores: scores, maxOutputSize: maxOutputSize, iouThreshold: iouThreshold, - scoreThreshold: scoreThreshold) - } - - } - - /// Greedily selects a subset of bounding boxes in descending order of score, - /// - /// pruning away boxes that have high intersection-over-union (IOU) overlap - /// with previously selected boxes. Bounding boxes with score less than - /// `score_threshold` are removed. Bounding boxes are supplied as - /// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any - /// diagonal pair of box corners and the coordinates can be provided as normalized - /// (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm - /// is agnostic to where the origin is in the coordinate system and more - /// generally is invariant to orthogonal transformations and translations - /// of the coordinate system; thus translating or reflections of the coordinate - /// system result in the same boxes being selected by the algorithm. - /// The output of this operation is a set of integers indexing into the input - /// collection of bounding boxes representing the selected boxes. The bounding - /// box coordinates corresponding to the selected indices can then be obtained - /// using the `tf.gather operation`. For example: - /// selected_indices = tf.image.non_max_suppression_v2( - /// boxes, scores, max_output_size, iou_threshold, score_threshold) - /// selected_boxes = tf.gather(boxes, selected_indices) - /// - /// - Parameters: - /// - boxes: A 2-D float tensor of shape `[num_boxes, 4]`. - /// - scores: A 1-D float tensor of shape `[num_boxes]` representing a single - /// score corresponding to each box (each row of boxes). - /// - max_output_size: A scalar integer tensor representing the maximum number of - /// boxes to be selected by non max suppression. - /// - iou_threshold: A 0-D float tensor representing the threshold for deciding whether - /// boxes overlap too much with respect to IOU. - /// - score_threshold: A 0-D float tensor representing the threshold for deciding when to remove - /// boxes based on score. - /// - /// - Attr pad_to_max_output_size: If true, the output `selected_indices` is padded to be of length - /// `max_output_size`. Defaults to false. - /// - /// - Outputs: - /// - selected_indices: A 1-D integer tensor of shape `[M]` representing the selected - /// indices from the boxes tensor, where `M <= max_output_size`. - /// - valid_outputs: A 0-D integer tensor representing the number of valid elements in - /// `selected_indices`, with the valid elements appearing first. - @inlinable @inline(__always) - public static func nonMaxSuppressionV4< - T: FloatingPoint & TensorFlowScalar, - TThreshold: FloatingPoint & TensorFlowScalar - >( - boxes: Tensor, - scores: Tensor, - maxOutputSize: Tensor, - iouThreshold: Tensor, - scoreThreshold: Tensor, - padToMaxOutputSize: Bool = false - ) -> (selectedIndices: Tensor, validOutputs: Tensor) { - _RawTFEager.nonMaxSuppressionV4( - boxes: boxes, scores: scores, maxOutputSize: maxOutputSize, iouThreshold: iouThreshold, - scoreThreshold: scoreThreshold, padToMaxOutputSize: padToMaxOutputSize) - } - - /// Greedily selects a subset of bounding boxes in descending order of score, - /// - /// pruning away boxes that have high intersection-over-union (IOU) overlap - /// with previously selected boxes. Bounding boxes with score less than - /// `score_threshold` are removed. Bounding boxes are supplied as - /// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any - /// diagonal pair of box corners and the coordinates can be provided as normalized - /// (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm - /// is agnostic to where the origin is in the coordinate system and more - /// generally is invariant to orthogonal transformations and translations - /// of the coordinate system; thus translating or reflections of the coordinate - /// system result in the same boxes being selected by the algorithm. - /// The output of this operation is a set of integers indexing into the input - /// collection of bounding boxes representing the selected boxes. The bounding - /// box coordinates corresponding to the selected indices can then be obtained - /// using the `tf.gather operation`. For example: - /// selected_indices = tf.image.non_max_suppression_v2( - /// boxes, scores, max_output_size, iou_threshold, score_threshold) - /// selected_boxes = tf.gather(boxes, selected_indices) - /// This op also supports a Soft-NMS (with Gaussian weighting) mode (c.f. - /// Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score - /// of other overlapping boxes instead of directly causing them to be pruned. - /// To enable this Soft-NMS mode, set the `soft_nms_sigma` parameter to be - /// larger than 0. - /// - /// - Parameters: - /// - boxes: A 2-D float tensor of shape `[num_boxes, 4]`. - /// - scores: A 1-D float tensor of shape `[num_boxes]` representing a single - /// score corresponding to each box (each row of boxes). - /// - max_output_size: A scalar integer tensor representing the maximum number of - /// boxes to be selected by non max suppression. - /// - iou_threshold: A 0-D float tensor representing the threshold for deciding whether - /// boxes overlap too much with respect to IOU. - /// - score_threshold: A 0-D float tensor representing the threshold for deciding when to remove - /// boxes based on score. - /// - soft_nms_sigma: A 0-D float tensor representing the sigma parameter for Soft NMS; see Bodla et - /// al (c.f. https://arxiv.org/abs/1704.04503). When `soft_nms_sigma=0.0` (which - /// is default), we fall back to standard (hard) NMS. - /// - /// - Attr pad_to_max_output_size: If true, the output `selected_indices` is padded to be of length - /// `max_output_size`. Defaults to false. - /// - /// - Outputs: - /// - selected_indices: A 1-D integer tensor of shape `[M]` representing the selected - /// indices from the boxes tensor, where `M <= max_output_size`. - /// - selected_scores: A 1-D float tensor of shape `[M]` representing the corresponding - /// scores for each selected box, where `M <= max_output_size`. Scores only differ - /// from corresponding input scores when using Soft NMS (i.e. when - /// `soft_nms_sigma>0`) - /// - valid_outputs: A 0-D integer tensor representing the number of valid elements in - /// `selected_indices`, with the valid elements appearing first. - @inlinable @inline(__always) - public static func nonMaxSuppressionV5( - boxes: Tensor, - scores: Tensor, - maxOutputSize: Tensor, - iouThreshold: Tensor, - scoreThreshold: Tensor, - softNmsSigma: Tensor, - padToMaxOutputSize: Bool = false - ) -> (selectedIndices: Tensor, selectedScores: Tensor, validOutputs: Tensor) { - _RawTFEager.nonMaxSuppressionV5( - boxes: boxes, scores: scores, maxOutputSize: maxOutputSize, iouThreshold: iouThreshold, - scoreThreshold: scoreThreshold, softNmsSigma: softNmsSigma, - padToMaxOutputSize: padToMaxOutputSize) - } - - /// Greedily selects a subset of bounding boxes in descending order of score, - /// - /// pruning away boxes that have high overlaps - /// with previously selected boxes. Bounding boxes with score less than - /// `score_threshold` are removed. N-by-n overlap values are supplied as square matrix, - /// which allows for defining a custom overlap criterium (eg. intersection over union, - /// intersection over area, etc.). - /// - /// The output of this operation is a set of integers indexing into the input - /// collection of bounding boxes representing the selected boxes. The bounding - /// box coordinates corresponding to the selected indices can then be obtained - /// using the `tf.gather operation`. For example: - /// - /// selected_indices = tf.image.non_max_suppression_with_overlaps( - /// overlaps, scores, max_output_size, overlap_threshold, score_threshold) - /// selected_boxes = tf.gather(boxes, selected_indices) - /// - /// - Parameters: - /// - overlaps: A 2-D float tensor of shape `[num_boxes, num_boxes]` representing - /// the n-by-n box overlap values. - /// - scores: A 1-D float tensor of shape `[num_boxes]` representing a single - /// score corresponding to each box (each row of boxes). - /// - max_output_size: A scalar integer tensor representing the maximum number of - /// boxes to be selected by non max suppression. - /// - overlap_threshold: A 0-D float tensor representing the threshold for deciding whether - /// boxes overlap too. - /// - score_threshold: A 0-D float tensor representing the threshold for deciding when to remove - /// boxes based on score. - /// - /// - Output selected_indices: A 1-D integer tensor of shape `[M]` representing the selected - /// indices from the boxes tensor, where `M <= max_output_size`. - @inlinable @inline(__always) - public static func nonMaxSuppressionWithOverlaps( - overlaps: Tensor, - scores: Tensor, - maxOutputSize: Tensor, - overlapThreshold: Tensor, - scoreThreshold: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend( - commonBackend(overlaps.handle.backend, scores.handle.backend), - maxOutputSize.handle.backend), overlapThreshold.handle.backend), - scoreThreshold.handle.backend) - { - case .XLA: - let output_device = scoreThreshold.device - let overlaps = Tensor(copying: overlaps, to: .defaultTFEager) - let scores = Tensor(copying: scores, to: .defaultTFEager) - let maxOutputSize = Tensor(copying: maxOutputSize, to: .defaultTFEager) - let overlapThreshold = Tensor(copying: overlapThreshold, to: .defaultTFEager) - let scoreThreshold = Tensor(copying: scoreThreshold, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.nonMaxSuppressionWithOverlaps( - overlaps: overlaps, scores: scores, maxOutputSize: maxOutputSize, - overlapThreshold: overlapThreshold, scoreThreshold: scoreThreshold), to: output_device) - case .TF_EAGER: - return _RawTFEager.nonMaxSuppressionWithOverlaps( - overlaps: overlaps, scores: scores, maxOutputSize: maxOutputSize, - overlapThreshold: overlapThreshold, scoreThreshold: scoreThreshold) - } - - } - - @inlinable @inline(__always) - public static func nonSerializableDataset( - inputDataset: VariantHandle, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.nonSerializableDataset( - inputDataset: inputDataset, outputTypes: outputTypes, outputShapes: outputShapes) - } - - @inlinable @inline(__always) - public static func none() { - _RawTFEager.none() - } - - /// Returns the truth value of (x != y) element-wise. - /// - /// *NOTE*: `NotEqual` supports broadcasting. More about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - @inlinable @inline(__always) - public static func notEqual( - _ x: Tensor, - _ y: Tensor, - incompatibleShapeError: Bool = true - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - return _RawXLA.notEqual(x, y, incompatibleShapeError: incompatibleShapeError) - case .TF_EAGER: - return _RawTFEager.notEqual(x, y, incompatibleShapeError: incompatibleShapeError) - } - - } - - /// Returns the truth value of (x != y) element-wise. - /// - /// *NOTE*: `NotEqual` supports broadcasting. More about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - @inlinable @inline(__always) - public static func notEqual( - _ x: StringTensor, - _ y: StringTensor, - incompatibleShapeError: Bool = true - ) -> Tensor { - _RawTFEager.notEqual(x, y, incompatibleShapeError: incompatibleShapeError) - } - - /// Finds values of the `n`-th order statistic for the last dimension. - /// - /// If the input is a vector (rank-1), finds the entries which is the nth-smallest - /// value in the vector and outputs their values as scalar tensor. - /// - /// For matrices (resp. higher rank input), computes the entries which is the - /// nth-smallest value in each row (resp. vector along the last dimension). Thus, - /// - /// values.shape = input.shape[:-1] - /// - /// - Parameters: - /// - input: 1-D or higher with last dimension at least `n+1`. - /// - n: 0-D. Position of sorted vector to select along the last dimension (along - /// each row for matrices). Valid range of n is `[0, input.shape[:-1])` - /// - /// - Attr reverse: When set to True, find the nth-largest value in the vector and vice - /// versa. - /// - /// - Output values: The `n`-th order statistic along each last dimensional slice. - @inlinable @inline(__always) - public static func nthElement( - _ input: Tensor, - n: Tensor, - reverse: Bool = false - ) -> Tensor { - switch commonBackend(input.handle.backend, n.handle.backend) { - case .XLA: - let output_device = n.device - let input = Tensor(copying: input, to: .defaultTFEager) - let n = Tensor(copying: n, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.nthElement(input, n: n, reverse: reverse), to: output_device) - case .TF_EAGER: - return _RawTFEager.nthElement(input, n: n, reverse: reverse) - } - - } - - @inlinable @inline(__always) - public static func old() { - _RawTFEager.old() - } - - /// Returns a one-hot tensor. - /// - /// The locations represented by indices in `indices` take value `on_value`, - /// while all other locations take value `off_value`. - /// - /// If the input `indices` is rank `N`, the output will have rank `N+1`, - /// The new axis is created at dimension `axis` (default: the new axis is - /// appended at the end). - /// - /// If `indices` is a scalar the output shape will be a vector of length `depth`. - /// - /// If `indices` is a vector of length `features`, the output shape will be: - /// ``` - /// features x depth if axis == -1 - /// depth x features if axis == 0 - /// ``` - /// - /// If `indices` is a matrix (batch) with shape `[batch, features]`, - /// the output shape will be: - /// ``` - /// batch x features x depth if axis == -1 - /// batch x depth x features if axis == 1 - /// depth x batch x features if axis == 0 - /// ``` - /// - /// - /// Examples - /// ========= - /// - /// Suppose that - /// ``` - /// indices = [0, 2, -1, 1] - /// depth = 3 - /// on_value = 5.0 - /// off_value = 0.0 - /// axis = -1 - /// ``` - /// - /// Then output is `[4 x 3]`: - /// ``` - /// output = - /// [5.0 0.0 0.0] // one_hot(0) - /// [0.0 0.0 5.0] // one_hot(2) - /// [0.0 0.0 0.0] // one_hot(-1) - /// [0.0 5.0 0.0] // one_hot(1) - /// ``` - /// - /// Suppose that - /// ``` - /// indices = [0, 2, -1, 1] - /// depth = 3 - /// on_value = 0.0 - /// off_value = 3.0 - /// axis = 0 - /// ``` - /// - /// Then output is `[3 x 4]`: - /// ``` - /// output = - /// [0.0 3.0 3.0 3.0] - /// [3.0 3.0 3.0 0.0] - /// [3.0 3.0 3.0 3.0] - /// [3.0 0.0 3.0 3.0] - /// // ^ one_hot(0) - /// // ^ one_hot(2) - /// // ^ one_hot(-1) - /// // ^ one_hot(1) - /// ``` - /// - /// Suppose that - /// ``` - /// indices = [[0, 2], [1, -1]] - /// depth = 3 - /// on_value = 1.0 - /// off_value = 0.0 - /// axis = -1 - /// ``` - /// - /// Then output is `[2 x 2 x 3]`: - /// ``` - /// output = - /// [ - /// [1.0, 0.0, 0.0] // one_hot(0) - /// [0.0, 0.0, 1.0] // one_hot(2) - /// ][ - /// [0.0, 1.0, 0.0] // one_hot(1) - /// [0.0, 0.0, 0.0] // one_hot(-1) - /// ] - /// ``` - /// - /// - Parameters: - /// - indices: A tensor of indices. - /// - depth: A scalar defining the depth of the one hot dimension. - /// - on_value: A scalar defining the value to fill in output when `indices[j] = i`. - /// - off_value: A scalar defining the value to fill in output when `indices[j] != i`. - /// - /// - Attr axis: The axis to fill (default: -1, a new inner-most axis). - /// - /// - Output output: The one-hot tensor. - @inlinable @inline(__always) - public static func oneHot< - T: TensorFlowScalar, - Ti: TensorFlowInteger - >( - indices: Tensor, - depth: Tensor, - onValue: Tensor, - offValue: Tensor, - axis: Int64 = -1 - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(indices.handle.backend, depth.handle.backend), onValue.handle.backend), - offValue.handle.backend) - { - case .XLA: - return _RawXLA.oneHot( - indices: indices, depth: depth, onValue: onValue, offValue: offValue, axis: axis) - case .TF_EAGER: - return _RawTFEager.oneHot( - indices: indices, depth: depth, onValue: onValue, offValue: offValue, axis: axis) - } - - } - - /// Makes a "one-shot" iterator that can be iterated only once. - /// - /// A one-shot iterator bundles the logic for defining the dataset and - /// the state of the iterator in a single op, which allows simple input - /// pipelines to be defined without an additional initialization - /// ("MakeIterator") step. - /// - /// One-shot iterators have the following limitations: - /// - /// * They do not support parameterization: all logic for creating the underlying - /// dataset must be bundled in the `dataset_factory` function. - /// * They are not resettable. Once a one-shot iterator reaches the end of its - /// underlying dataset, subsequent "IteratorGetNext" operations on that - /// iterator will always produce an `OutOfRange` error. - /// - /// For greater flexibility, use "Iterator" and "MakeIterator" to define - /// an iterator using an arbitrary subgraph, which may capture tensors - /// (including fed values) as parameters, and which may be reset multiple - /// times by rerunning "MakeIterator". - /// - /// - Attr dataset_factory: A function of type `() -> DT_VARIANT`, where the returned - /// DT_VARIANT is a dataset. - /// - /// - Output handle: A handle to the iterator that can be passed to an "IteratorGetNext" - /// op. - @inlinable @inline(__always) - public static func oneShotIterator< - DatasetfactoryIn: TensorGroup, - DatasetfactoryOut: TensorGroup - >( - datasetFactory: (DatasetfactoryIn) -> DatasetfactoryOut, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?], - container: String, - sharedName: String - ) -> ResourceHandle { - _RawTFEager.oneShotIterator( - datasetFactory: datasetFactory, outputTypes: outputTypes, outputShapes: outputShapes, + } + + @inlinable @inline(__always) + public static func twoFloatOutputs() -> (a: Tensor, b: Tensor) { + _RawTFEager.twoFloatOutputs() + } + + @inlinable @inline(__always) + public static func twoIntInputs( + _ a: Tensor, + _ b: Tensor + ) { + _RawTFEager.twoIntInputs(a, b) + } + + @inlinable @inline(__always) + public static func twoIntOutputs() -> (a: Tensor, b: Tensor) { + _RawTFEager.twoIntOutputs() + } + + @inlinable @inline(__always) + public static func typeList( + _ a: T + ) { + _RawTFEager.typeList(a) + } + + @inlinable @inline(__always) + public static func typeListRestrict( + _ a: T + ) { + _RawTFEager.typeListRestrict(a) + } + + @inlinable @inline(__always) + public static func typeListTwice( + _ a: T, + _ b: T + ) { + _RawTFEager.typeListTwice(a, b) + } + + @inlinable @inline(__always) + public static func unary( + _ a: Tensor + ) -> Tensor { + switch a.handle.backend { + case .XLA: + let output_device = a.device + let a = Tensor(copying: a, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.unary(a), to: output_device) + case .TF_EAGER: + return _RawTFEager.unary(a) + } + + } + + /// Reverses the operation of Batch for a single output Tensor. + /// + /// An instance of Unbatch either receives an empty batched_tensor, in which case it + /// asynchronously waits until the values become available from a concurrently + /// running instance of Unbatch with the same container and shared_name, or receives + /// a non-empty batched_tensor in which case it finalizes all other concurrently + /// running instances and outputs its own element from the batch. + /// + /// batched_tensor: The possibly transformed output of Batch. The size of the first + /// dimension should remain unchanged by the transformations for the operation to + /// work. + /// batch_index: The matching batch_index obtained from Batch. + /// id: The id scalar emitted by Batch. + /// unbatched_tensor: The Tensor corresponding to this execution. + /// timeout_micros: Maximum amount of time (in microseconds) to wait to receive the + /// batched input tensor associated with a given invocation of the op. + /// container: Container to control resource sharing. + /// shared_name: Instances of Unbatch with the same container and shared_name are + /// assumed to possibly belong to the same batch. If left empty, the op name will + /// be used as the shared name. + @inlinable @inline(__always) + public static func unbatch( + batchedTensor: Tensor, + batchIndex: Tensor, + id: Tensor, + timeoutMicros: Int64, + container: String, + sharedName: String + ) -> Tensor { + switch commonBackend( + commonBackend(batchedTensor.handle.backend, batchIndex.handle.backend), id.handle.backend) + { + case .XLA: + let output_device = id.device + let batchedTensor = Tensor(copying: batchedTensor, to: .defaultTFEager) + let batchIndex = Tensor(copying: batchIndex, to: .defaultTFEager) + let id = Tensor(copying: id, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.unbatch( + batchedTensor: batchedTensor, batchIndex: batchIndex, id: id, + timeoutMicros: timeoutMicros, container: container, sharedName: sharedName), + to: output_device) + case .TF_EAGER: + return _RawTFEager.unbatch( + batchedTensor: batchedTensor, batchIndex: batchIndex, id: id, + timeoutMicros: timeoutMicros, container: container, sharedName: sharedName) + } + + } + + /// A dataset that splits the elements of its input into multiple elements. + @inlinable @inline(__always) + public static func unbatchDataset( + inputDataset: VariantHandle, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.unbatchDataset( + inputDataset: inputDataset, outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Gradient of Unbatch. + /// + /// Acts like Batch but using the given batch_index index of batching things as they + /// become available. This ensures that the gradients are propagated back in the + /// same session which did the forward pass. + /// + /// original_input: The input to the Unbatch operation this is the gradient of. + /// batch_index: The batch_index given to the Unbatch operation this is the gradient + /// of. + /// grad: The downstream gradient. + /// id: The id scalar emitted by Batch. + /// batched_grad: The return value, either an empty tensor or the batched gradient. + /// container: Container to control resource sharing. + /// shared_name: Instances of UnbatchGrad with the same container and shared_name + /// are assumed to possibly belong to the same batch. If left empty, the op name + /// will be used as the shared name. + @inlinable @inline(__always) + public static func unbatchGrad( + originalInput: Tensor, + batchIndex: Tensor, + grad: Tensor, + id: Tensor, + container: String, + sharedName: String + ) -> Tensor { + switch commonBackend( + commonBackend( + commonBackend(originalInput.handle.backend, batchIndex.handle.backend), + grad.handle.backend), id.handle.backend) + { + case .XLA: + let output_device = id.device + let originalInput = Tensor(copying: originalInput, to: .defaultTFEager) + let batchIndex = Tensor(copying: batchIndex, to: .defaultTFEager) + let grad = Tensor(copying: grad, to: .defaultTFEager) + let id = Tensor(copying: id, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.unbatchGrad( + originalInput: originalInput, batchIndex: batchIndex, grad: grad, id: id, + container: container, sharedName: sharedName), to: output_device) + case .TF_EAGER: + return _RawTFEager.unbatchGrad( + originalInput: originalInput, batchIndex: batchIndex, grad: grad, id: id, container: container, sharedName: sharedName) } - /// Returns a tensor of ones with the same shape and type as x. - /// - /// - Parameter x: a tensor of type T. - /// - /// - Output y: a tensor of the same shape and type as x but filled with ones. - @inlinable @inline(__always) - public static func onesLike( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.onesLike(x) - case .TF_EAGER: - return _RawTFEager.onesLike(x) - } - - } - - @inlinable @inline(__always) - public static func opWithDefaultAttr( - defaultFloat: Double = 123 - ) -> Tensor { - _RawTFEager.opWithDefaultAttr(defaultFloat: defaultFloat) - } - - @inlinable @inline(__always) - public static func opWithFutureDefaultAttr() { - _RawTFEager.opWithFutureDefaultAttr() - } - - /// Creates a dataset by applying optimizations to `input_dataset`. - /// - /// Creates a dataset by applying optimizations to `input_dataset`. - /// - /// - Parameters: - /// - input_dataset: A variant tensor representing the input dataset. - /// - optimizations: A `tf.string` vector `tf.Tensor` identifying optimizations to use. - @inlinable @inline(__always) - public static func optimizeDataset( - inputDataset: VariantHandle, - optimizations: StringTensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?], - optimizationConfigs: [String] - ) -> VariantHandle { - _RawTFEager.optimizeDataset( - inputDataset: inputDataset, optimizations: optimizations, outputTypes: outputTypes, - outputShapes: outputShapes, optimizationConfigs: optimizationConfigs) - } - - /// Constructs an Optional variant from a tuple of tensors. - @inlinable @inline(__always) - public static func optionalFromValue( - components: ToutputTypes - ) -> VariantHandle { - _RawTFEager.optionalFromValue(components: components) - } - - /// Returns the value stored in an Optional variant or raises an error if none exists. - @inlinable @inline(__always) - public static func optionalGetValue( - optional: VariantHandle, - outputShapes: [TensorShape?] - ) -> OutputTypes { - _RawTFEager.optionalGetValue(optional: optional, outputShapes: outputShapes) - } - - /// Returns true if and only if the given Optional variant has a value. - @inlinable @inline(__always) - public static func optionalHasValue( - optional: VariantHandle - ) -> Tensor { - _RawTFEager.optionalHasValue(optional: optional) - } - - /// Creates an Optional variant with no value. - @inlinable @inline(__always) - public static func optionalNone() -> VariantHandle { - _RawTFEager.optionalNone() - } - - /// Op removes all elements in the underlying container. - @inlinable @inline(__always) - public static func orderedMapClear( - capacity: Int64 = 0, - memoryLimit: Int64 = 0, - dtypes: [TensorDataType], - container: String, - sharedName: String - ) { - _RawTFEager.orderedMapClear( - capacity: capacity, memoryLimit: memoryLimit, dtypes: dtypes, container: container, - sharedName: sharedName) - } - - /// Op returns the number of incomplete elements in the underlying container. - @inlinable @inline(__always) - public static func orderedMapIncompleteSize( - capacity: Int64 = 0, - memoryLimit: Int64 = 0, - dtypes: [TensorDataType], - container: String, - sharedName: String - ) -> Tensor { - _RawTFEager.orderedMapIncompleteSize( - capacity: capacity, memoryLimit: memoryLimit, dtypes: dtypes, container: container, - sharedName: sharedName) - } - - /// Op peeks at the values at the specified key. If the - /// - /// underlying container does not contain this key - /// this op will block until it does. This Op is optimized for - /// performance. - @inlinable @inline(__always) - public static func orderedMapPeek( - key: Tensor, - indices: Tensor, - capacity: Int64 = 0, - memoryLimit: Int64 = 0, - container: String, - sharedName: String - ) -> Dtypes { - _RawTFEager.orderedMapPeek( - key: key, indices: indices, capacity: capacity, memoryLimit: memoryLimit, - container: container, sharedName: sharedName) + } + + /// Decodes each string in `input` into a sequence of Unicode code points. + /// + /// The character codepoints for all strings are returned using a single vector + /// `char_values`, with strings expanded to characters in row-major order. + /// + /// The `row_splits` tensor indicates where the codepoints for + /// each input string begin and end within the `char_values` tensor. + /// In particular, the values for the `i`th + /// string (in row-major order) are stored in the slice + /// `[row_splits[i]:row_splits[i+1]]`. Thus: + /// + /// * `char_values[row_splits[i]+j]` is the Unicode codepoint for the `j`th + /// character in the `i`th string (in row-major order). + /// * `row_splits[i+1] - row_splits[i]` is the number of characters in the `i`th + /// string (in row-major order). + /// + /// - Parameter input: The text to be decoded. Can have any shape. Note that the output is flattened + /// to a vector of char values. + /// + /// - Attrs: + /// - input_encoding: Text encoding of the input strings. This is any of the encodings supported + /// by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`. + /// - errors: Error handling policy when there is invalid formatting found in the input. + /// The value of 'strict' will cause the operation to produce a InvalidArgument + /// error on any invalid input formatting. A value of 'replace' (the default) will + /// cause the operation to replace any invalid formatting in the input with the + /// `replacement_char` codepoint. A value of 'ignore' will cause the operation to + /// skip any invalid formatting in the input and produce no corresponding output + /// character. + /// - replacement_char: The replacement character codepoint to be used in place of any invalid + /// formatting in the input when `errors='replace'`. Any valid unicode codepoint may + /// be used. The default value is the default unicode replacement character is + /// 0xFFFD or U+65533.) + /// - replace_control_characters: Whether to replace the C0 control characters (00-1F) with the + /// `replacement_char`. Default is false. + /// + /// - Outputs: + /// - row_splits: A 1D int32 tensor containing the row splits. + /// - char_values: A 1D int32 Tensor containing the decoded codepoints. + @inlinable @inline(__always) + public static func unicodeDecode( + _ input: StringTensor, + inputEncoding: String, + errors: Errors = .replace, + replacementChar: Int64 = 65533, + replaceControlCharacters: Bool = false + ) -> (rowSplits: Tensor, charValues: Tensor) { + _RawTFEager.unicodeDecode( + input, inputEncoding: inputEncoding, errors: errors, replacementChar: replacementChar, + replaceControlCharacters: replaceControlCharacters) + } + + /// Decodes each string in `input` into a sequence of Unicode code points. + /// + /// The character codepoints for all strings are returned using a single vector + /// `char_values`, with strings expanded to characters in row-major order. + /// Similarly, the character start byte offsets are returned using a single vector + /// `char_to_byte_starts`, with strings expanded in row-major order. + /// + /// The `row_splits` tensor indicates where the codepoints and start offsets for + /// each input string begin and end within the `char_values` and + /// `char_to_byte_starts` tensors. In particular, the values for the `i`th + /// string (in row-major order) are stored in the slice + /// `[row_splits[i]:row_splits[i+1]]`. Thus: + /// + /// * `char_values[row_splits[i]+j]` is the Unicode codepoint for the `j`th + /// character in the `i`th string (in row-major order). + /// * `char_to_bytes_starts[row_splits[i]+j]` is the start byte offset for the `j`th + /// character in the `i`th string (in row-major order). + /// * `row_splits[i+1] - row_splits[i]` is the number of characters in the `i`th + /// string (in row-major order). + /// + /// - Parameter input: The text to be decoded. Can have any shape. Note that the output is flattened + /// to a vector of char values. + /// + /// - Attrs: + /// - input_encoding: Text encoding of the input strings. This is any of the encodings supported + /// by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`. + /// - errors: Error handling policy when there is invalid formatting found in the input. + /// The value of 'strict' will cause the operation to produce a InvalidArgument + /// error on any invalid input formatting. A value of 'replace' (the default) will + /// cause the operation to replace any invalid formatting in the input with the + /// `replacement_char` codepoint. A value of 'ignore' will cause the operation to + /// skip any invalid formatting in the input and produce no corresponding output + /// character. + /// - replacement_char: The replacement character codepoint to be used in place of any invalid + /// formatting in the input when `errors='replace'`. Any valid unicode codepoint may + /// be used. The default value is the default unicode replacement character is + /// 0xFFFD or U+65533.) + /// - replace_control_characters: Whether to replace the C0 control characters (00-1F) with the + /// `replacement_char`. Default is false. + /// + /// - Outputs: + /// - row_splits: A 1D int32 tensor containing the row splits. + /// - char_values: A 1D int32 Tensor containing the decoded codepoints. + /// - char_to_byte_starts: A 1D int32 Tensor containing the byte index in the input string where each + /// character in `char_values` starts. + @inlinable @inline(__always) + public static func unicodeDecodeWithOffsets( + _ input: StringTensor, + inputEncoding: String, + errors: Errors = .replace, + replacementChar: Int64 = 65533, + replaceControlCharacters: Bool = false + ) -> (rowSplits: Tensor, charValues: Tensor, charToByteStarts: Tensor) { + _RawTFEager.unicodeDecodeWithOffsets( + input, inputEncoding: inputEncoding, errors: errors, replacementChar: replacementChar, + replaceControlCharacters: replaceControlCharacters) + } + + /// Encode a tensor of ints into unicode strings. + /// + /// Returns a vector of strings, where `output[i]` is constructed by encoding the + /// Unicode codepoints in `input_values[input_splits[i]:input_splits[i+1]]` + /// using `output_encoding`. + /// + /// --- + /// + /// Example: + /// + /// ``` + /// input_values = [72, 101, 108, 108, 111, 87, 111, 114, 108, 100] + /// input_splits = [0, 5, 10] + /// output_encoding = 'UTF-8' + /// + /// output = ['Hello', 'World'] + /// ``` + /// + /// - Parameters: + /// - input_values: A 1D tensor containing the unicode codepoints that should be encoded. + /// - input_splits: A 1D tensor specifying how the unicode codepoints should be split into strings. + /// In particular, `output[i]` is constructed by encoding the codepoints in the + /// slice `input_values[input_splits[i]:input_splits[i+1]]`. + /// + /// - Attrs: + /// - errors: Error handling policy when there is invalid formatting found in the input. + /// The value of 'strict' will cause the operation to produce a InvalidArgument + /// error on any invalid input formatting. A value of 'replace' (the default) will + /// cause the operation to replace any invalid formatting in the input with the + /// `replacement_char` codepoint. A value of 'ignore' will cause the operation to + /// skip any invalid formatting in the input and produce no corresponding output + /// character. + /// - output_encoding: Unicode encoding of the output strings. Valid encodings are: `"UTF-8", + /// "UTF-16-BE", and "UTF-32-BE"`. + /// - replacement_char: The replacement character codepoint to be used in place of any invalid + /// formatting in the input when `errors='replace'`. Any valid unicode codepoint may + /// be used. The default value is the default unicode replacement character is + /// 0xFFFD (U+65533). + /// + /// - Output output: The 1-D Tensor of strings encoded from the provided unicode codepoints. + @inlinable @inline(__always) + public static func unicodeEncode( + inputValues: Tensor, + inputSplits: Tensor, + errors: Errors = .replace, + outputEncoding: OutputEncoding, + replacementChar: Int64 = 65533 + ) -> StringTensor { + _RawTFEager.unicodeEncode( + inputValues: inputValues, inputSplits: inputSplits, errors: errors, + outputEncoding: outputEncoding, replacementChar: replacementChar) + } + + /// Determine the script codes of a given tensor of Unicode integer code points. + /// + /// This operation converts Unicode code points to script codes corresponding to + /// each code point. Script codes correspond to International Components for + /// Unicode (ICU) UScriptCode values. See http://icu-project.org/apiref/icu4c/uscript_8h.html. + /// Returns -1 (USCRIPT_INVALID_CODE) for invalid codepoints. Output shape will + /// match input shape. + /// + /// Examples: + /// + /// >>> tf.strings.unicode_script([1, 31, 38]) + /// + /// + /// - Parameter input: A Tensor of int32 Unicode code points. + /// + /// - Output output: A Tensor of int32 script codes corresponding to each input code point. + @inlinable @inline(__always) + public static func unicodeScript( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.unicodeScript(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.unicodeScript(input) + } + + } + + /// Transcode the input text from a source encoding to a destination encoding. + /// + /// The input is a string tensor of any shape. The output is a string tensor of + /// the same shape containing the transcoded strings. Output strings are always + /// valid unicode. If the input contains invalid encoding positions, the + /// `errors` attribute sets the policy for how to deal with them. If the default + /// error-handling policy is used, invalid formatting will be substituted in the + /// output by the `replacement_char`. If the errors policy is to `ignore`, any + /// invalid encoding positions in the input are skipped and not included in the + /// output. If it set to `strict` then any invalid formatting will result in an + /// InvalidArgument error. + /// + /// This operation can be used with `output_encoding = input_encoding` to enforce + /// correct formatting for inputs even if they are already in the desired encoding. + /// + /// If the input is prefixed by a Byte Order Mark needed to determine encoding + /// (e.g. if the encoding is UTF-16 and the BOM indicates big-endian), then that + /// BOM will be consumed and not emitted into the output. If the input encoding + /// is marked with an explicit endianness (e.g. UTF-16-BE), then the BOM is + /// interpreted as a non-breaking-space and is preserved in the output (including + /// always for UTF-8). + /// + /// The end result is that if the input is marked as an explicit endianness the + /// transcoding is faithful to all codepoints in the source. If it is not marked + /// with an explicit endianness, the BOM is not considered part of the string itself + /// but as metadata, and so is not preserved in the output. + /// + /// Examples: + /// + /// >>> tf.strings.unicode_transcode(["Hello", "TensorFlow", "2.x"], "UTF-8", "UTF-16-BE") + /// + /// >>> tf.strings.unicode_transcode(["A", "B", "C"], "US ASCII", "UTF-8").numpy() + /// array([b'A', b'B', b'C'], dtype=object) + /// + /// - Parameter input: The text to be processed. Can have any shape. + /// + /// - Attrs: + /// - input_encoding: Text encoding of the input strings. This is any of the encodings supported + /// by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`. + /// - output_encoding: The unicode encoding to use in the output. Must be one of + /// `"UTF-8", "UTF-16-BE", "UTF-32-BE"`. Multi-byte encodings will be big-endian. + /// - errors: Error handling policy when there is invalid formatting found in the input. + /// The value of 'strict' will cause the operation to produce a InvalidArgument + /// error on any invalid input formatting. A value of 'replace' (the default) will + /// cause the operation to replace any invalid formatting in the input with the + /// `replacement_char` codepoint. A value of 'ignore' will cause the operation to + /// skip any invalid formatting in the input and produce no corresponding output + /// character. + /// - replacement_char: The replacement character codepoint to be used in place of any invalid + /// formatting in the input when `errors='replace'`. Any valid unicode codepoint may + /// be used. The default value is the default unicode replacement character is + /// 0xFFFD or U+65533.) + /// + /// Note that for UTF-8, passing a replacement character expressible in 1 byte, such + /// as ' ', will preserve string alignment to the source since invalid bytes will be + /// replaced with a 1-byte replacement. For UTF-16-BE and UTF-16-LE, any 1 or 2 byte + /// replacement character will preserve byte alignment to the source. + /// - replace_control_characters: Whether to replace the C0 control characters (00-1F) with the + /// `replacement_char`. Default is false. + /// + /// - Output output: A string tensor containing unicode text encoded using `output_encoding`. + @inlinable @inline(__always) + public static func unicodeTranscode( + _ input: StringTensor, + inputEncoding: String, + outputEncoding: OutputEncoding, + errors: Errors = .replace, + replacementChar: Int64 = 65533, + replaceControlCharacters: Bool = false + ) -> StringTensor { + _RawTFEager.unicodeTranscode( + input, inputEncoding: inputEncoding, outputEncoding: outputEncoding, errors: errors, + replacementChar: replacementChar, replaceControlCharacters: replaceControlCharacters) + } + + /// Generates labels for candidate sampling with a uniform distribution. + /// + /// See explanations of candidate sampling and the data formats at + /// go/candidate-sampling. + /// + /// For each batch, this op picks a single set of sampled candidate labels. + /// + /// The advantages of sampling candidates per-batch are simplicity and the + /// possibility of efficient dense matrix multiplication. The disadvantage is that + /// the sampled candidates must be chosen independently of the context and of the + /// true labels. + /// + /// - Parameter true_classes: A batch_size * num_true matrix, in which each row contains the + /// IDs of the num_true target_classes in the corresponding original label. + /// + /// - Attrs: + /// - num_true: Number of true labels per context. + /// - num_sampled: Number of candidates to randomly sample. + /// - unique: If unique is true, we sample with rejection, so that all sampled + /// candidates in a batch are unique. This requires some approximation to + /// estimate the post-rejection sampling probabilities. + /// - range_max: The sampler will sample integers from the interval [0, range_max). + /// - seed: If either seed or seed2 are set to be non-zero, the random number + /// generator is seeded by the given seed. Otherwise, it is seeded by a + /// random seed. + /// - seed2: An second seed to avoid seed collision. + /// + /// - Outputs: + /// - sampled_candidates: A vector of length num_sampled, in which each element is + /// the ID of a sampled candidate. + /// - true_expected_count: A batch_size * num_true matrix, representing + /// the number of times each candidate is expected to occur in a batch + /// of sampled candidates. If unique=true, then this is a probability. + /// - sampled_expected_count: A vector of length num_sampled, for each sampled + /// candidate representing the number of times the candidate is expected + /// to occur in a batch of sampled candidates. If unique=true, then this is a + /// probability. + @inlinable @inline(__always) + public static func uniformCandidateSampler( + trueClasses: Tensor, + numTrue: Int64, + numSampled: Int64, + unique: Bool, + rangeMax: Int64, + seed: Int64 = 0, + seed2: Int64 = 0 + ) -> ( + sampledCandidates: Tensor, trueExpectedCount: Tensor, + sampledExpectedCount: Tensor + ) { + _RawTFEager.uniformCandidateSampler( + trueClasses: trueClasses, numTrue: numTrue, numSampled: numSampled, unique: unique, + rangeMax: rangeMax, seed: seed, seed2: seed2) + } + + /// Finds unique elements in a 1-D tensor. + /// + /// This operation returns a tensor `y` containing all of the unique elements of `x` + /// sorted in the same order that they occur in `x`; `x` does not need to be sorted. + /// This operation also returns a tensor `idx` the same size as `x` that contains + /// the index of each value of `x` in the unique output `y`. In other words: + /// + /// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + /// + /// Examples: + /// + /// ``` + /// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + /// y, idx = unique(x) + /// y ==> [1, 2, 4, 7, 8] + /// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + /// ``` + /// + /// ``` + /// # tensor 'x' is [4, 5, 1, 2, 3, 3, 4, 5] + /// y, idx = unique(x) + /// y ==> [4, 5, 1, 2, 3] + /// idx ==> [0, 1, 2, 3, 4, 4, 0, 1] + /// ``` + /// + /// - Parameter x: 1-D. + /// + /// - Outputs: + /// - y: 1-D. + /// - idx: 1-D. + @inlinable @inline(__always) + public static func unique< + T: TensorFlowScalar, + OutIdx: TensorFlowIndex + >( + _ x: Tensor + ) -> (y: Tensor, idx: Tensor) { + _RawTFEager.unique(x) + } + + /// Creates a dataset that contains the unique elements of `input_dataset`. + @inlinable @inline(__always) + public static func uniqueDataset( + inputDataset: VariantHandle, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.uniqueDataset( + inputDataset: inputDataset, outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Finds unique elements along an axis of a tensor. + /// + /// This operation either returns a tensor `y` containing unique elements + /// along the `axis` of a tensor. The returned unique elements is sorted + /// in the same order as they occur along `axis` in `x`. + /// This operation also returns a tensor `idx` that is the same size as + /// the number of the elements in `x` along the `axis` dimension. It + /// contains the index in the unique output `y`. + /// In other words, for an `1-D` tensor `x` with `axis = None: + /// + /// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + /// + /// For example: + /// + /// ``` + /// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + /// y, idx = unique(x) + /// y ==> [1, 2, 4, 7, 8] + /// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + /// ``` + /// + /// For an `2-D` tensor `x` with `axis = 0`: + /// + /// ``` + /// # tensor 'x' is [[1, 0, 0], + /// # [1, 0, 0], + /// # [2, 0, 0]] + /// y, idx = unique(x, axis=0) + /// y ==> [[1, 0, 0], + /// [2, 0, 0]] + /// idx ==> [0, 0, 1] + /// ``` + /// + /// For an `2-D` tensor `x` with `axis = 1`: + /// + /// ``` + /// # tensor 'x' is [[1, 0, 0], + /// # [1, 0, 0], + /// # [2, 0, 0]] + /// y, idx = unique(x, axis=1) + /// y ==> [[1, 0], + /// [1, 0], + /// [2, 0]] + /// idx ==> [0, 1, 1] + /// ``` + /// + /// - Parameters: + /// - x: A `Tensor`. + /// - axis: A `Tensor` of type `int32` (default: None). The axis of the Tensor to + /// find the unique elements. + /// + /// - Outputs: + /// - y: A `Tensor`. Unique elements along the `axis` of `Tensor` x. + /// - idx: A 1-D Tensor. Has the same type as x that contains the index of each + /// value of x in the output y. + @inlinable @inline(__always) + public static func uniqueV2< + T: TensorFlowScalar, + Taxis: TensorFlowIndex, + OutIdx: TensorFlowIndex + >( + _ x: Tensor, + axis: Tensor + ) -> (y: Tensor, idx: Tensor) { + _RawTFEager.uniqueV2(x, axis: axis) + } + + /// Finds unique elements in a 1-D tensor. + /// + /// This operation returns a tensor `y` containing all of the unique elements of `x` + /// sorted in the same order that they occur in `x`. This operation also returns a + /// tensor `idx` the same size as `x` that contains the index of each value of `x` + /// in the unique output `y`. Finally, it returns a third tensor `count` that + /// contains the count of each element of `y` in `x`. In other words: + /// + /// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + /// + /// For example: + /// + /// ``` + /// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + /// y, idx, count = unique_with_counts(x) + /// y ==> [1, 2, 4, 7, 8] + /// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + /// count ==> [2, 1, 3, 1, 2] + /// ``` + /// + /// - Parameter x: 1-D. + /// + /// - Outputs: + /// - y: 1-D. + /// - idx: 1-D. + /// - count: 1-D. + @inlinable @inline(__always) + public static func uniqueWithCounts< + T: TensorFlowScalar, + OutIdx: TensorFlowIndex + >( + _ x: Tensor + ) -> (y: Tensor, idx: Tensor, count: Tensor) { + _RawTFEager.uniqueWithCounts(x) + } + + /// Finds unique elements along an axis of a tensor. + /// + /// This operation either returns a tensor `y` containing unique elements + /// along the `axis` of a tensor. The returned unique elements is sorted + /// in the same order as they occur along `axis` in `x`. + /// This operation also returns a tensor `idx` and a tensor `count` + /// that are the same size as the number of the elements in `x` along the + /// `axis` dimension. The `idx` contains the index in the unique output `y` + /// and the `count` contains the count in the unique output `y`. + /// In other words, for an `1-D` tensor `x` with `axis = None: + /// + /// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + /// + /// For example: + /// + /// ``` + /// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + /// y, idx, count = unique_with_counts(x) + /// y ==> [1, 2, 4, 7, 8] + /// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + /// count ==> [2, 1, 3, 1, 2] + /// ``` + /// + /// For an `2-D` tensor `x` with `axis = 0`: + /// + /// ``` + /// # tensor 'x' is [[1, 0, 0], + /// # [1, 0, 0], + /// # [2, 0, 0]] + /// y, idx, count = unique_with_counts(x, axis=0) + /// y ==> [[1, 0, 0], + /// [2, 0, 0]] + /// idx ==> [0, 0, 1] + /// count ==> [2, 1] + /// ``` + /// + /// For an `2-D` tensor `x` with `axis = 1`: + /// + /// ``` + /// # tensor 'x' is [[1, 0, 0], + /// # [1, 0, 0], + /// # [2, 0, 0]] + /// y, idx, count = unique_with_counts(x, axis=1) + /// y ==> [[1, 0], + /// [1, 0], + /// [2, 0]] + /// idx ==> [0, 1, 1] + /// count ==> [1, 2] + /// ``` + /// + /// - Parameters: + /// - x: A `Tensor`. + /// - axis: A `Tensor` of type `int32` (default: None). The axis of the Tensor to + /// find the unique elements. + /// + /// - Outputs: + /// - y: A `Tensor`. Unique elements along the `axis` of `Tensor` x. + /// - idx: A 1-D Tensor. Has the same type as x that contains the index of each + /// value of x in the output y. + /// - count: A 1-D Tensor. The count of each value of x in the output y. + @inlinable @inline(__always) + public static func uniqueWithCountsV2< + T: TensorFlowScalar, + Taxis: TensorFlowIndex, + OutIdx: TensorFlowIndex + >( + _ x: Tensor, + axis: Tensor + ) -> (y: Tensor, idx: Tensor, count: Tensor) { + _RawTFEager.uniqueWithCountsV2(x, axis: axis) + } + + /// Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors. + /// + /// Unpacks `num` tensors from `value` by chipping it along the `axis` dimension. + /// For example, given a tensor of shape `(A, B, C, D)`; + /// + /// If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]` + /// and each tensor in `output` will have shape `(B, C, D)`. (Note that the + /// dimension unpacked along is gone, unlike `split`). + /// + /// If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]` + /// and each tensor in `output` will have shape `(A, C, D)`. + /// Etc. + /// + /// This is the opposite of `pack`. + /// + /// - Parameter value: 1-D or higher, with `axis` dimension size equal to `num`. + /// + /// - Attr axis: Dimension along which to unpack. Negative values wrap around, so the + /// valid range is `[-R, R)`. + /// + /// - Output output: The list of tensors unpacked from `value`. + @inlinable @inline(__always) + public static func unpack( + value: Tensor, + num: Int64, + axis: Int64 = 0 + ) -> [Tensor] { + switch value.handle.backend { + case .XLA: + return _RawXLA.unpack(value: value, num: num, axis: axis) + case .TF_EAGER: + return _RawTFEager.unpack(value: value, num: num, axis: axis) + } + + } + + /// Converts an array of flat indices into a tuple of coordinate arrays. + /// + /// + /// Example: + /// + /// ``` + /// y = tf.unravel_index(indices=[2, 5, 7], dims=[3, 3]) + /// # 'dims' represent a hypothetical (3, 3) tensor of indices: + /// # [[0, 1, *2*], + /// # [3, 4, *5*], + /// # [6, *7*, 8]] + /// # For each entry from 'indices', this operation returns + /// # its coordinates (marked with '*'), such as + /// # 2 ==> (0, 2) + /// # 5 ==> (1, 2) + /// # 7 ==> (2, 1) + /// y ==> [[0, 1, 2], [2, 2, 1]] + /// ``` + /// + /// @compatibility(numpy) + /// Equivalent to np.unravel_index + /// @end_compatibility + /// + /// - Parameters: + /// - indices: An 0-D or 1-D `int` Tensor whose elements are indices into the + /// flattened version of an array of dimensions dims. + /// - dims: An 1-D `int` Tensor. The shape of the array to use for unraveling + /// indices. + /// + /// - Output output: An 2-D (or 1-D if indices is 0-D) tensor where each row has the + /// same shape as the indices array. + @inlinable @inline(__always) + public static func unravelIndex( + indices: Tensor, + dims: Tensor + ) -> Tensor { + switch commonBackend(indices.handle.backend, dims.handle.backend) { + case .XLA: + let output_device = dims.device + let indices = Tensor(copying: indices, to: .defaultTFEager) + let dims = Tensor(copying: dims, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.unravelIndex(indices: indices, dims: dims), to: output_device) + case .TF_EAGER: + return _RawTFEager.unravelIndex(indices: indices, dims: dims) + } + + } + + /// Joins the elements of `inputs` based on `segment_ids`. + /// + /// Computes the string join along segments of a tensor. + /// Given `segment_ids` with rank `N` and `data` with rank `N+M`: + /// + /// `output[i, k1...kM] = strings.join([data[j1...jN, k1...kM])` + /// + /// where the join is over all [j1...jN] such that segment_ids[j1...jN] = i. + /// Strings are joined in row-major order. + /// + /// For example: + /// + /// ```python + /// inputs = [['Y', 'q', 'c'], ['Y', '6', '6'], ['p', 'G', 'a']] + /// output_array = string_ops.unsorted_segment_join(inputs=inputs, + /// segment_ids=[1, 0, 1], + /// num_segments=2, + /// separator=':')) + /// # output_array ==> [['Y', '6', '6'], ['Y:p', 'q:G', 'c:a']] + /// + /// + /// inputs = ['this', 'is', 'a', 'test'] + /// output_array = string_ops.unsorted_segment_join(inputs=inputs, + /// segment_ids=[0, 0, 0, 0], + /// num_segments=1, + /// separator=':')) + /// # output_array ==> ['this:is:a:test'] + /// ``` + /// + /// - Parameters: + /// - inputs: The input to be joined. + /// - segment_ids: A tensor whose shape is a prefix of data.shape. Negative segment ids are not + /// supported. + /// - num_segments: A scalar. + /// + /// - Attr separator: The separator to use when joining. + @inlinable @inline(__always) + public static func unsortedSegmentJoin< + Tindices: TensorFlowIndex, + Tnumsegments: TensorFlowIndex + >( + inputs: StringTensor, + segmentIds: Tensor, + numSegments: Tensor, + separator: String + ) -> StringTensor { + _RawTFEager.unsortedSegmentJoin( + inputs: inputs, segmentIds: segmentIds, numSegments: numSegments, separator: separator) + } + + /// Computes the maximum along segments of a tensor. + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// This operator is similar to the unsorted segment sum operator found + /// [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum). + /// Instead of computing the sum over segments, it computes the maximum such that: + /// + /// \\(output_i = \max_{j...} data[j...]\\) where max is over tuples `j...` such + /// that `segment_ids[j...] == i`. + /// + /// If the maximum is empty for a given segment ID `i`, it outputs the smallest + /// possible value for the specific numeric type, + /// `output[i] = numeric_limits::lowest()`. + /// + /// If the given segment ID `i` is negative, then the corresponding value is + /// dropped, and will not be included in the result. + /// + ///
+ /// + ///
+ /// + /// For example: + /// + /// ``` python + /// c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + /// tf.unsorted_segment_max(c, tf.constant([0, 1, 0]), num_segments=2) + /// # ==> [[ 4, 3, 3, 4], + /// # [5, 6, 7, 8]] + /// ``` + /// + /// + /// - Parameter segment_ids: A tensor whose shape is a prefix of `data.shape`. + /// + /// - Output output: Has same shape as data, except for the first `segment_ids.rank` + /// dimensions, which are replaced with a single dimension which has size + /// `num_segments`. + @inlinable @inline(__always) + public static func unsortedSegmentMax< + T: TensorFlowNumeric, + Tindices: TensorFlowIndex, + Tnumsegments: TensorFlowIndex + >( + data: Tensor, + segmentIds: Tensor, + numSegments: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(data.handle.backend, segmentIds.handle.backend), numSegments.handle.backend) + { + case .XLA: + let output_device = numSegments.device + let data = Tensor(copying: data, to: .defaultTFEager) + let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) + let numSegments = Tensor(copying: numSegments, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.unsortedSegmentMax( + data: data, segmentIds: segmentIds, numSegments: numSegments), to: output_device) + case .TF_EAGER: + return _RawTFEager.unsortedSegmentMax( + data: data, segmentIds: segmentIds, numSegments: numSegments) + } + + } + + /// Computes the minimum along segments of a tensor. + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// This operator is similar to the unsorted segment sum operator found + /// [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum). + /// Instead of computing the sum over segments, it computes the minimum such that: + /// + /// \\(output_i = \min_{j...} data_[j...]\\) where min is over tuples `j...` such + /// that `segment_ids[j...] == i`. + /// + /// If the minimum is empty for a given segment ID `i`, it outputs the largest + /// possible value for the specific numeric type, + /// `output[i] = numeric_limits::max()`. + /// + /// For example: + /// + /// ``` python + /// c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + /// tf.unsorted_segment_min(c, tf.constant([0, 1, 0]), num_segments=2) + /// # ==> [[ 1, 2, 2, 1], + /// # [5, 6, 7, 8]] + /// ``` + /// + /// If the given segment ID `i` is negative, then the corresponding value is + /// dropped, and will not be included in the result. + /// + /// - Parameter segment_ids: A tensor whose shape is a prefix of `data.shape`. + /// + /// - Output output: Has same shape as data, except for the first `segment_ids.rank` + /// dimensions, which are replaced with a single dimension which has size + /// `num_segments`. + @inlinable @inline(__always) + public static func unsortedSegmentMin< + T: TensorFlowNumeric, + Tindices: TensorFlowIndex, + Tnumsegments: TensorFlowIndex + >( + data: Tensor, + segmentIds: Tensor, + numSegments: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(data.handle.backend, segmentIds.handle.backend), numSegments.handle.backend) + { + case .XLA: + let output_device = numSegments.device + let data = Tensor(copying: data, to: .defaultTFEager) + let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) + let numSegments = Tensor(copying: numSegments, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.unsortedSegmentMin( + data: data, segmentIds: segmentIds, numSegments: numSegments), to: output_device) + case .TF_EAGER: + return _RawTFEager.unsortedSegmentMin( + data: data, segmentIds: segmentIds, numSegments: numSegments) + } + + } + + /// Computes the product along segments of a tensor. + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// This operator is similar to the unsorted segment sum operator found + /// [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum). + /// Instead of computing the sum over segments, it computes the product of all + /// entries belonging to a segment such that: + /// + /// \\(output_i = \prod_{j...} data[j...]\\) where the product is over tuples + /// `j...` such that `segment_ids[j...] == i`. + /// + /// For example: + /// + /// ``` python + /// c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + /// tf.unsorted_segment_prod(c, tf.constant([0, 1, 0]), num_segments=2) + /// # ==> [[ 4, 6, 6, 4], + /// # [5, 6, 7, 8]] + /// ``` + /// + /// If there is no entry for a given segment ID `i`, it outputs 1. + /// + /// If the given segment ID `i` is negative, then the corresponding value is + /// dropped, and will not be included in the result. + /// + /// - Parameter segment_ids: A tensor whose shape is a prefix of `data.shape`. + /// + /// - Output output: Has same shape as data, except for the first `segment_ids.rank` + /// dimensions, which are replaced with a single dimension which has size + /// `num_segments`. + @inlinable @inline(__always) + public static func unsortedSegmentProd< + T: TensorFlowNumeric, + Tindices: TensorFlowIndex, + Tnumsegments: TensorFlowIndex + >( + data: Tensor, + segmentIds: Tensor, + numSegments: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(data.handle.backend, segmentIds.handle.backend), numSegments.handle.backend) + { + case .XLA: + let output_device = numSegments.device + let data = Tensor(copying: data, to: .defaultTFEager) + let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) + let numSegments = Tensor(copying: numSegments, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.unsortedSegmentProd( + data: data, segmentIds: segmentIds, numSegments: numSegments), to: output_device) + case .TF_EAGER: + return _RawTFEager.unsortedSegmentProd( + data: data, segmentIds: segmentIds, numSegments: numSegments) + } + + } + + /// Computes the sum along segments of a tensor. + /// + /// Read + /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + /// for an explanation of segments. + /// + /// Computes a tensor such that + /// \\(output[i] = \sum_{j...} data[j...]\\) where the sum is over tuples `j...` such + /// that `segment_ids[j...] == i`. Unlike `SegmentSum`, `segment_ids` + /// need not be sorted and need not cover all values in the full + /// range of valid values. + /// + /// If the sum is empty for a given segment ID `i`, `output[i] = 0`. + /// If the given segment ID `i` is negative, the value is dropped and will not be + /// added to the sum of the segment. + /// + /// `num_segments` should equal the number of distinct segment IDs. + /// + ///
+ /// + ///
+ /// + /// ``` python + /// c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + /// tf.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2) + /// # ==> [[ 5, 5, 5, 5], + /// # [5, 6, 7, 8]] + /// ``` + /// + /// + /// - Parameter segment_ids: A tensor whose shape is a prefix of `data.shape`. + /// + /// - Output output: Has same shape as data, except for the first `segment_ids.rank` + /// dimensions, which are replaced with a single dimension which has size + /// `num_segments`. + @inlinable @inline(__always) + public static func unsortedSegmentSum< + T: TensorFlowNumeric, + Tindices: TensorFlowIndex, + Tnumsegments: TensorFlowIndex + >( + data: Tensor, + segmentIds: Tensor, + numSegments: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(data.handle.backend, segmentIds.handle.backend), numSegments.handle.backend) + { + case .XLA: + return _RawXLA.unsortedSegmentSum( + data: data, segmentIds: segmentIds, numSegments: numSegments) + case .TF_EAGER: + return _RawTFEager.unsortedSegmentSum( + data: data, segmentIds: segmentIds, numSegments: numSegments) + } + + } + + /// Op is similar to a lightweight Dequeue. + /// + /// The basic functionality is similar to dequeue with many fewer + /// capabilities and options. This Op is optimized for performance. + @inlinable @inline(__always) + public static func unstage( + capacity: Int64 = 0, + memoryLimit: Int64 = 0, + container: String, + sharedName: String + ) -> Dtypes { + _RawTFEager.unstage( + capacity: capacity, memoryLimit: memoryLimit, container: container, sharedName: sharedName) + } + + @inlinable @inline(__always) + public static func unwrapDatasetVariant( + inputHandle: VariantHandle + ) -> VariantHandle { + _RawTFEager.unwrapDatasetVariant(inputHandle: inputHandle) + } + + /// Applies upper_bound(sorted_search_values, values) along each row. + /// + /// Each set of rows with the same index in (sorted_inputs, values) is treated + /// independently. The resulting row is the equivalent of calling + /// `np.searchsorted(sorted_inputs, values, side='right')`. + /// + /// The result is not a global index to the entire + /// `Tensor`, but rather just the index in the last dimension. + /// + /// A 2-D example: + /// sorted_sequence = [[0, 3, 9, 9, 10], + /// [1, 2, 3, 4, 5]] + /// values = [[2, 4, 9], + /// [0, 2, 6]] + /// + /// result = UpperBound(sorted_sequence, values) + /// + /// result == [[1, 2, 4], + /// [0, 2, 5]] + /// + /// - Parameters: + /// - sorted_inputs: 2-D Tensor where each row is ordered. + /// - values: 2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains + /// the values that will be searched for in `sorted_search_values`. + /// + /// - Output output: A `Tensor` with the same shape as `values`. It contains the last scalar index + /// into the last dimension where values can be inserted without changing the + /// ordered property. + @inlinable @inline(__always) + public static func upperBound< + T: TensorFlowScalar, + OutType: TensorFlowIndex + >( + sortedInputs: Tensor, + _ values: Tensor + ) -> Tensor { + switch commonBackend(sortedInputs.handle.backend, values.handle.backend) { + case .XLA: + let output_device = values.device + let sortedInputs = Tensor(copying: sortedInputs, to: .defaultTFEager) + let values = Tensor(copying: values, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.upperBound(sortedInputs: sortedInputs, values), to: output_device) + case .TF_EAGER: + return _RawTFEager.upperBound(sortedInputs: sortedInputs, values) + } + + } + + /// Creates a handle to a Variable resource. + /// + /// - Attrs: + /// - container: the container this variable is placed in. + /// - shared_name: the name by which this variable is referred to. + /// - dtype: the type of this variable. Must agree with the dtypes + /// of all ops using this variable. + /// - shape: The (possibly partially specified) shape of this variable. + @inlinable @inline(__always) + public static func varHandleOp( + container: String, + sharedName: String, + dtype: TensorDataType, + shape: TensorShape? + ) -> ResourceHandle { + _RawTFEager.varHandleOp( + container: container, sharedName: sharedName, dtype: dtype, shape: shape) + } + + /// Checks whether a resource handle-based variable has been initialized. + /// + /// - Parameter resource: the input resource handle. + /// + /// - Output is_initialized: a scalar boolean which is true if the variable has been + /// initialized. + @inlinable @inline(__always) + public static func varIsInitializedOp( + resource: ResourceHandle + ) -> Tensor { + _RawTFEager.varIsInitializedOp(resource: resource) + } + + /// Returns the shape of the variable pointed to by `resource`. + /// + /// This operation returns a 1-D integer tensor representing the shape of `input`. + /// + /// For example: + /// + /// ``` + /// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + /// shape(t) ==> [2, 2, 3] + /// ``` + @inlinable @inline(__always) + public static func variableShape( + _ input: ResourceHandle + ) -> Tensor { + _RawTFEager.variableShape(input) + } + + /// Returns locations of nonzero / true values in a tensor. + /// + /// This operation returns the coordinates of true elements in `condition`. The + /// coordinates are returned in a 2-D tensor where the first dimension (rows) + /// represents the number of true elements, and the second dimension (columns) + /// represents the coordinates of the true elements. Keep in mind, the shape of + /// the output tensor can vary depending on how many true values there are in + /// `condition`. Indices are output in row-major order. + /// + /// For example: + /// + /// ``` + /// # 'input' tensor is [[True, False] + /// # [True, False]] + /// # 'input' has two true values, so output has two coordinates. + /// # 'input' has rank of 2, so coordinates have two indices. + /// where(input) ==> [[0, 0], + /// [1, 0]] + /// + /// # `condition` tensor is [[[True, False] + /// # [True, False]] + /// # [[False, True] + /// # [False, True]] + /// # [[False, False] + /// # [False, True]]] + /// # 'input' has 5 true values, so output has 5 coordinates. + /// # 'input' has rank of 3, so coordinates have three indices. + /// where(input) ==> [[0, 0, 0], + /// [0, 1, 0], + /// [1, 0, 1], + /// [1, 1, 1], + /// [2, 1, 1]] + /// + /// # `condition` tensor is [[[1.5, 0.0] + /// # [-0.5, 0.0]] + /// # [[0.0, 0.25] + /// # [0.0, 0.75]] + /// # [[0.0, 0.0] + /// # [0.0, 0.01]]] + /// # 'input' has 5 nonzero values, so output has 5 coordinates. + /// # 'input' has rank of 3, so coordinates have three indices. + /// where(input) ==> [[0, 0, 0], + /// [0, 1, 0], + /// [1, 0, 1], + /// [1, 1, 1], + /// [2, 1, 1]] + /// + /// # `condition` tensor is [[[1.5 + 0.0j, 0.0 + 0.0j] + /// # [0.0 + 0.5j, 0.0 + 0.0j]] + /// # [[0.0 + 0.0j, 0.25 + 1.5j] + /// # [0.0 + 0.0j, 0.75 + 0.0j]] + /// # [[0.0 + 0.0j, 0.0 + 0.0j] + /// # [0.0 + 0.0j, 0.01 + 0.0j]]] + /// # 'input' has 5 nonzero magnitude values, so output has 5 coordinates. + /// # 'input' has rank of 3, so coordinates have three indices. + /// where(input) ==> [[0, 0, 0], + /// [0, 1, 0], + /// [1, 0, 1], + /// [1, 1, 1], + /// [2, 1, 1]] + /// ``` + @inlinable @inline(__always) + public static func where_( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.where_(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.where_(input) + } + + } + + /// output = input; While (Cond(output)) { output = Body(output) } + /// + /// - Parameter input: A list of input tensors whose types are T. + /// + /// - Attrs: + /// - T: dtype in use. + /// - cond: A function takes 'input' and returns a tensor. If the tensor is + /// a scalar of non-boolean, the scalar is converted to a boolean + /// according to the following rule: if the scalar is a numerical + /// value, non-zero means True and zero means False; if the scalar is + /// a string, non-empty means True and empty means False. If the + /// tensor is not a scalar, non-emptiness means True and False + /// otherwise. + /// - body: A function that takes a list of tensors and returns another + /// list of tensors. Both lists have the same types as specified + /// by T. + /// + /// - Output output: A list of output tensors whose types are T. + @inlinable @inline(__always) + public static func while_< + T: TensorArrayProtocol, + CondIn: TensorGroup, + CondOut: TensorGroup, + BodyIn: TensorGroup, + BodyOut: TensorGroup + >( + _ input: T, + cond: (CondIn) -> CondOut, + body: (BodyIn) -> BodyOut, + outputShapes: [TensorShape?], + parallelIterations: Int64 = 10 + ) -> T { + _RawTFEager.while_( + input, cond: cond, body: body, outputShapes: outputShapes, + parallelIterations: parallelIterations) + } + + /// A Reader that outputs the entire contents of a file as a value. + /// + /// To use, enqueue filenames in a Queue. The output of ReaderRead will + /// be a filename (key) and the contents of that file (value). + /// + /// - Attrs: + /// - container: If non-empty, this reader is placed in the given container. + /// Otherwise, a default container is used. + /// - shared_name: If non-empty, this reader is named in the given bucket + /// with this shared_name. Otherwise, the node name is used instead. + /// + /// - Output reader_handle: The handle to reference the Reader. + @inlinable @inline(__always) + public static func wholeFileReaderV2( + container: String, + sharedName: String + ) -> ResourceHandle { + _RawTFEager.wholeFileReaderV2(container: container, sharedName: sharedName) + } + + /// Combines (nests of) input elements into a dataset of (nests of) windows. + /// + /// A "window" is a finite dataset of flat elements of size `size` (or possibly + /// fewer if there are not enough input elements to fill the window and + /// `drop_remainder` evaluates to false). + /// + /// The `shift` argument determines the number of input elements by which + /// the window moves on each iteration. The first element in the `k`th window + /// will be element + /// + /// ``` + /// 1 + (k-1) * shift + /// ``` + /// + /// of the input dataset. In particular, the first element of the first window + /// will always be the first element of the input dataset. + /// + /// If the `stride` parameter is greater than 1, then each window will skip + /// `(stride - 1)` input elements between each element that appears in the + /// window. Output windows will still contain `size` elements regardless of + /// the value of `stride`. + /// + /// The `stride` argument determines the stride of the input elements, and the + /// `shift` argument determines the shift of the window. + /// + /// For example, letting `{...}` to represent a Dataset: + /// + /// - `tf.data.Dataset.range(7).window(2)` produces + /// `{{0, 1}, {2, 3}, {4, 5}, {6}}` + /// - `tf.data.Dataset.range(7).window(3, 2, 1, True)` produces + /// `{{0, 1, 2}, {2, 3, 4}, {4, 5, 6}}` + /// - `tf.data.Dataset.range(7).window(3, 1, 2, True)` produces + /// `{{0, 2, 4}, {1, 3, 5}, {2, 4, 6}}` + /// + /// Note that when the `window` transformation is applied to a dataset of + /// nested elements, it produces a dataset of nested windows. + /// + /// For example: + /// + /// - `tf.data.Dataset.from_tensor_slices((range(4), range(4))).window(2)` + /// produces `{({0, 1}, {0, 1}), ({2, 3}, {2, 3})}` + /// - `tf.data.Dataset.from_tensor_slices({"a": range(4)}).window(2)` + /// produces `{{"a": {0, 1}}, {"a": {2, 3}}}` + /// + /// - Parameters: + /// - size: An integer scalar, representing the number of elements + /// of the input dataset to combine into a window. Must be positive. + /// - shift: An integer scalar, representing the number of input elements + /// by which the window moves in each iteration. Defaults to `size`. + /// Must be positive. + /// - stride: An integer scalar, representing the stride of the input elements + /// in the sliding window. Must be positive. The default value of 1 means + /// "retain every input element". + /// - drop_remainder: A Boolean scalar, representing whether the last window should be + /// dropped if its size is smaller than `window_size`. + @inlinable @inline(__always) + public static func windowDataset( + inputDataset: VariantHandle, + size: Tensor, + shift: Tensor, + stride: Tensor, + dropRemainder: Tensor, + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.windowDataset( + inputDataset: inputDataset, size: size, shift: shift, stride: stride, + dropRemainder: dropRemainder, outputTypes: outputTypes, outputShapes: outputShapes) + } + + /// Worker heartbeat op. + /// + /// Heartbeats may be sent periodically to indicate the coordinator is still active, + /// to retrieve the current worker status and to expedite shutdown when necessary. + /// + /// - Parameter request: A string tensor containing a serialized WorkerHeartbeatRequest + /// + /// - Output response: A string tensor containing a serialized WorkerHeartbeatResponse + @inlinable @inline(__always) + public static func workerHeartbeat( + request: StringTensor + ) -> StringTensor { + _RawTFEager.workerHeartbeat(request: request) + } + + @inlinable @inline(__always) + public static func wrapDatasetVariant( + inputHandle: VariantHandle + ) -> VariantHandle { + _RawTFEager.wrapDatasetVariant(inputHandle: inputHandle) + } + + @inlinable @inline(__always) + public static func writeAudioSummary( + writer: ResourceHandle, + step: Tensor, + tag: StringTensor, + _ tensor: Tensor, + sampleRate: Tensor, + maxOutputs: Int64 = 3 + ) { + _RawTFEager.writeAudioSummary( + writer: writer, step: step, tag: tag, tensor, sampleRate: sampleRate, maxOutputs: maxOutputs + ) + } + + /// Writes contents to the file at input filename. Creates file and recursively + /// + /// creates directory if not existing. + /// + /// - Parameters: + /// - filename: scalar. The name of the file to which we write the contents. + /// - contents: scalar. The content to be written to the output file. + @inlinable @inline(__always) + public static func writeFile( + filename: StringTensor, + contents: StringTensor + ) { + _RawTFEager.writeFile(filename: filename, contents: contents) + } + + @inlinable @inline(__always) + public static func writeGraphSummary( + writer: ResourceHandle, + step: Tensor, + _ tensor: StringTensor + ) { + _RawTFEager.writeGraphSummary(writer: writer, step: step, tensor) + } + + @inlinable @inline(__always) + public static func writeHistogramSummary( + writer: ResourceHandle, + step: Tensor, + tag: StringTensor, + _ values: Tensor + ) { + _RawTFEager.writeHistogramSummary(writer: writer, step: step, tag: tag, values) + } + + @inlinable @inline(__always) + public static func writeImageSummary( + writer: ResourceHandle, + step: Tensor, + tag: StringTensor, + _ tensor: Tensor, + badColor: Tensor, + maxImages: Int64 = 3 + ) { + _RawTFEager.writeImageSummary( + writer: writer, step: step, tag: tag, tensor, badColor: badColor, maxImages: maxImages) + } + + @inlinable @inline(__always) + public static func writeRawProtoSummary( + writer: ResourceHandle, + step: Tensor, + _ tensor: StringTensor + ) { + _RawTFEager.writeRawProtoSummary(writer: writer, step: step, tensor) + } + + @inlinable @inline(__always) + public static func writeScalarSummary( + writer: ResourceHandle, + step: Tensor, + tag: StringTensor, + value: Tensor + ) { + _RawTFEager.writeScalarSummary(writer: writer, step: step, tag: tag, value: value) + } + + @inlinable @inline(__always) + public static func writeSummary( + writer: ResourceHandle, + step: Tensor, + _ tensor: Tensor, + tag: StringTensor, + summaryMetadata: StringTensor + ) { + _RawTFEager.writeSummary( + writer: writer, step: step, tensor, tag: tag, summaryMetadata: summaryMetadata) + } + + /// Returns 0 if x == 0, and x / y otherwise, elementwise. + @inlinable @inline(__always) + public static func xdivy( + _ x: Tensor, + _ y: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + return _RawXLA.xdivy(x, y) + case .TF_EAGER: + return _RawTFEager.xdivy(x, y) } - /// Op returns the number of elements in the underlying container. - @inlinable @inline(__always) - public static func orderedMapSize( - capacity: Int64 = 0, - memoryLimit: Int64 = 0, - dtypes: [TensorDataType], - container: String, - sharedName: String - ) -> Tensor { - _RawTFEager.orderedMapSize( - capacity: capacity, memoryLimit: memoryLimit, dtypes: dtypes, container: container, - sharedName: sharedName) - } - - /// Stage (key, values) in the underlying container which behaves like a ordered - /// - /// associative container. Elements are ordered by key. - /// - /// - Parameters: - /// - key: int64 - /// - values: a list of tensors - /// dtypes A list of data types that inserted values should adhere to. - /// - /// - Attrs: - /// - capacity: Maximum number of elements in the Staging Area. If > 0, inserts - /// on the container will block when the capacity is reached. - /// - container: If non-empty, this queue is placed in the given container. Otherwise, - /// a default container is used. - /// - shared_name: It is necessary to match this name to the matching Unstage Op. - @inlinable @inline(__always) - public static func orderedMapStage( - key: Tensor, - indices: Tensor, - _ values: FakeDtypes, - capacity: Int64 = 0, - memoryLimit: Int64 = 0, - dtypes: [TensorDataType], - container: String, - sharedName: String - ) { - _RawTFEager.orderedMapStage( - key: key, indices: indices, values, capacity: capacity, memoryLimit: memoryLimit, - dtypes: dtypes, container: container, sharedName: sharedName) - } - - /// Op removes and returns the values associated with the key - /// - /// from the underlying container. If the underlying container - /// does not contain this key, the op will block until it does. - @inlinable @inline(__always) - public static func orderedMapUnstage( - key: Tensor, - indices: Tensor, - capacity: Int64 = 0, - memoryLimit: Int64 = 0, - container: String, - sharedName: String - ) -> Dtypes { - _RawTFEager.orderedMapUnstage( - key: key, indices: indices, capacity: capacity, memoryLimit: memoryLimit, - container: container, sharedName: sharedName) - } + } - /// Op removes and returns the (key, value) element with the smallest - /// - /// key from the underlying container. If the underlying container - /// does not contain elements, the op will block until it does. - @inlinable @inline(__always) - public static func orderedMapUnstageNoKey( - indices: Tensor, - capacity: Int64 = 0, - memoryLimit: Int64 = 0, - container: String, - sharedName: String - ) -> (key: Tensor, values: Dtypes) { - _RawTFEager.orderedMapUnstageNoKey( - indices: indices, capacity: capacity, memoryLimit: memoryLimit, container: container, - sharedName: sharedName) - } - - @inlinable @inline(__always) - public static func outT() -> Tensor { - _RawTFEager.outT() - } - - @inlinable @inline(__always) - public static func outTypeList() -> T { - _RawTFEager.outTypeList() - } - - @inlinable @inline(__always) - public static func outTypeListRestrict() -> T { - _RawTFEager.outTypeListRestrict() - } - - /// Retrieves a single tensor from the computation outfeed. - /// - /// This operation will block indefinitely until data is available. - /// - /// - Attrs: - /// - dtype: The type of elements in the tensor. - /// - shape: The shape of the tensor. - /// - device_ordinal: The TPU device to use. This should be -1 when the Op - /// is running on a TPU device, and >= 0 when the Op is running on the CPU - /// device. - /// - /// - Output output: A tensor that will be read from the device outfeed. - @inlinable @inline(__always) - public static func outfeedDequeue( - shape: TensorShape?, - deviceOrdinal: Int64 = -1 - ) -> Tensor { - _RawTFEager.outfeedDequeue(shape: shape, deviceOrdinal: deviceOrdinal) - } - - /// Retrieve multiple values from the computation outfeed. - /// - /// This operation will block indefinitely until data is available. Output `i` - /// corresponds to XLA tuple element `i`. - /// - /// - Attrs: - /// - dtypes: The element types of each element in `outputs`. - /// - shapes: The shapes of each tensor in `outputs`. - /// - device_ordinal: The TPU device to use. This should be -1 when the Op - /// is running on a TPU device, and >= 0 when the Op is running on the CPU - /// device. - /// - /// - Output outputs: A list of tensors that will be read from the outfeed. - @inlinable @inline(__always) - public static func outfeedDequeueTuple( - shapes: [TensorShape?], - deviceOrdinal: Int64 = -1 - ) -> Dtypes { - _RawTFEager.outfeedDequeueTuple(shapes: shapes, deviceOrdinal: deviceOrdinal) - } - - /// Enqueue a Tensor on the computation outfeed. - /// - /// - Parameter input: A tensor that will be inserted into the outfeed queue. - @inlinable @inline(__always) - public static func outfeedEnqueue( - _ input: Tensor - ) { - _RawTFEager.outfeedEnqueue(input) + /// Helper operator for performing XLA-style broadcasts + /// + /// Broadcasts `lhs` and `rhs` to the same rank, by adding size 1 dimensions to + /// whichever of `lhs` and `rhs` has the lower rank, using XLA's broadcasting rules + /// for binary operators. + /// + /// - Parameters: + /// - lhs: the LHS input tensor + /// - rhs: the RHS input tensor + /// - broadcast_dims: an XLA-style broadcast dimension specification + /// + /// - Outputs: + /// - lhs_output: the broadcasted LHS tensor + /// - rhs_output: the broadcasted RHS tensor + @inlinable @inline(__always) + public static func xlaBroadcastHelper< + T: TensorFlowNumeric, + Tindices: TensorFlowIndex + >( + lhs: Tensor, + rhs: Tensor, + broadcastDims: Tensor + ) -> (lhsOutput: Tensor, rhsOutput: Tensor) { + _RawTFEager.xlaBroadcastHelper(lhs: lhs, rhs: rhs, broadcastDims: broadcastDims) + } + + /// Operator that connects the output of an XLA computation to other consumer graph nodes. + @inlinable @inline(__always) + public static func xlaClusterOutput( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.xlaClusterOutput(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.xlaClusterOutput(input) } - /// Enqueue multiple Tensor values on the computation outfeed. - /// - /// - Parameter inputs: A list of tensors that will be inserted into the outfeed queue as an - /// XLA tuple. - @inlinable @inline(__always) - public static func outfeedEnqueueTuple( - inputs: Dtypes - ) { - _RawTFEager.outfeedEnqueueTuple(inputs: inputs) - } - - /// Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor. - /// - /// Packs the `N` tensors in `values` into a tensor with rank one higher than each - /// tensor in `values`, by packing them along the `axis` dimension. - /// Given a list of tensors of shape `(A, B, C)`; - /// - /// if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`. - /// if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`. - /// Etc. - /// - /// For example: - /// - /// ``` - /// # 'x' is [1, 4] - /// # 'y' is [2, 5] - /// # 'z' is [3, 6] - /// pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. - /// pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]] - /// ``` - /// - /// This is the opposite of `unpack`. - /// - /// - Parameter values: Must be of same shape and type. - /// - /// - Attr axis: Dimension along which to pack. Negative values wrap around, so the - /// valid range is `[-(R+1), R+1)`. - /// - /// - Output output: The packed tensor. - @inlinable @inline(__always) - public static func pack( - _ values: [Tensor], - axis: Int64 = 0 - ) -> Tensor { - switch commonBackend(values) { - case .XLA: - return _RawXLA.pack(values, axis: axis) - case .TF_EAGER: - return _RawTFEager.pack(values, axis: axis) - } - - } - - /// Pads a tensor with zeros. - /// - /// This operation pads a `input` with zeros according to the `paddings` you - /// specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the - /// rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates - /// how many zeros to add before the contents of `input` in that dimension, and - /// `paddings[D, 1]` indicates how many zeros to add after the contents of `input` - /// in that dimension. - /// - /// The padded size of each dimension D of the output is: - /// - /// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` - /// - /// For example: - /// - /// ``` - /// # 't' is [[1, 1], [2, 2]] - /// # 'paddings' is [[1, 1], [2, 2]] - /// # rank of 't' is 2 - /// pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0] - /// [0, 0, 1, 1, 0, 0] - /// [0, 0, 2, 2, 0, 0] - /// [0, 0, 0, 0, 0, 0]] - /// ``` - /// - @inlinable @inline(__always) - public static func pad< - T: TensorFlowScalar, - Tpaddings: TensorFlowIndex - >( - _ input: Tensor, - paddings: Tensor - ) -> Tensor { - switch commonBackend(input.handle.backend, paddings.handle.backend) { - case .XLA: - return _RawXLA.pad(input, paddings: paddings) - case .TF_EAGER: - return _RawTFEager.pad(input, paddings: paddings) - } - - } - - /// Pads a tensor. - /// - /// This operation pads `input` according to the `paddings` and `constant_values` - /// you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is - /// the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates - /// how many padding values to add before the contents of `input` in that dimension, - /// and `paddings[D, 1]` indicates how many padding values to add after the contents - /// of `input` in that dimension. `constant_values` is a scalar tensor of the same - /// type as `input` that indicates the value to use for padding `input`. - /// - /// The padded size of each dimension D of the output is: - /// - /// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` - /// - /// For example: - /// - /// ``` - /// # 't' is [[1, 1], [2, 2]] - /// # 'paddings' is [[1, 1], [2, 2]] - /// # 'constant_values' is 0 - /// # rank of 't' is 2 - /// pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0] - /// [0, 0, 1, 1, 0, 0] - /// [0, 0, 2, 2, 0, 0] - /// [0, 0, 0, 0, 0, 0]] - /// ``` - @inlinable @inline(__always) - public static func padV2< - T: TensorFlowScalar, - Tpaddings: TensorFlowIndex - >( - _ input: Tensor, - paddings: Tensor, - constantValues: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(input.handle.backend, paddings.handle.backend), constantValues.handle.backend) - { - case .XLA: - return _RawXLA.padV2(input, paddings: paddings, constantValues: constantValues) - case .TF_EAGER: - return _RawTFEager.padV2(input, paddings: paddings, constantValues: constantValues) - } - - } - - /// Creates a dataset that batches and pads `batch_size` elements from the input. - /// - /// - Parameters: - /// - batch_size: A scalar representing the number of elements to accumulate in a - /// batch. - /// - padded_shapes: A list of int64 tensors representing the desired padded shapes - /// of the corresponding output components. These shapes may be partially - /// specified, using `-1` to indicate that a particular dimension should be - /// padded to the maximum size of all batch elements. - /// - padding_values: A list of scalars containing the padding value to use for - /// each of the outputs. - @inlinable @inline(__always) - public static func paddedBatchDataset( - inputDataset: VariantHandle, - batchSize: Tensor, - paddedShapes: [Tensor], - paddingValues: ToutputTypes, - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.paddedBatchDataset( - inputDataset: inputDataset, batchSize: batchSize, paddedShapes: paddedShapes, - paddingValues: paddingValues, outputShapes: outputShapes) - } - - /// Creates a dataset that batches and pads `batch_size` elements from the input. - /// - /// - Parameters: - /// - batch_size: A scalar representing the number of elements to accumulate in a - /// batch. - /// - padded_shapes: A list of int64 tensors representing the desired padded shapes - /// of the corresponding output components. These shapes may be partially - /// specified, using `-1` to indicate that a particular dimension should be - /// padded to the maximum size of all batch elements. - /// - padding_values: A list of scalars containing the padding value to use for - /// each of the outputs. - /// - drop_remainder: A scalar representing whether the last batch should be dropped in case its size - /// is smaller than desired. - @inlinable @inline(__always) - public static func paddedBatchDatasetV2( - inputDataset: VariantHandle, - batchSize: Tensor, - paddedShapes: [Tensor], - paddingValues: ToutputTypes, - dropRemainder: Tensor, - parallelCopy: Bool = false, - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.paddedBatchDatasetV2( - inputDataset: inputDataset, batchSize: batchSize, paddedShapes: paddedShapes, - paddingValues: paddingValues, dropRemainder: dropRemainder, parallelCopy: parallelCopy, - outputShapes: outputShapes) - } - - /// A queue that produces elements in first-in first-out order. - /// - /// Variable-size shapes are allowed by setting the corresponding shape dimensions - /// to 0 in the shape attr. In this case DequeueMany will pad up to the maximum - /// size of any given element in the minibatch. See below for details. - /// - /// - Attrs: - /// - component_types: The type of each component in a value. - /// - shapes: The shape of each component in a value. The length of this attr must - /// be either 0 or the same as the length of component_types. - /// Shapes of fixed rank but variable size are allowed by setting - /// any shape dimension to -1. In this case, the inputs' shape may vary along - /// the given dimension, and DequeueMany will pad the given dimension with - /// zeros up to the maximum shape of all elements in the given batch. - /// If the length of this attr is 0, different queue elements may have - /// different ranks and shapes, but only one element may be dequeued at a time. - /// - capacity: The upper bound on the number of elements in this queue. - /// Negative numbers mean no limit. - /// - container: If non-empty, this queue is placed in the given container. - /// Otherwise, a default container is used. - /// - shared_name: If non-empty, this queue will be shared under the given name - /// across multiple sessions. - /// - /// - Output handle: The handle to the queue. - @inlinable @inline(__always) - public static func paddingFIFOQueueV2( - componentTypes: [TensorDataType], - shapes: [TensorShape?], - capacity: Int64 = -1, - container: String, - sharedName: String - ) -> ResourceHandle { - _RawTFEager.paddingFIFOQueueV2( - componentTypes: componentTypes, shapes: shapes, capacity: capacity, container: container, - sharedName: sharedName) - } - - /// Concatenates a list of `N` tensors along the first dimension. - /// - /// The input tensors are all required to have size 1 in the first dimension. - /// - /// For example: - /// - /// ``` - /// # 'x' is [[1, 4]] - /// # 'y' is [[2, 5]] - /// # 'z' is [[3, 6]] - /// parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. - /// ``` - /// - /// The difference between concat and parallel_concat is that concat requires all - /// of the inputs be computed before the operation will begin but doesn't require - /// that the input shapes be known during graph construction. Parallel concat - /// will copy pieces of the input into the output as they become available, in - /// some situations this can provide a performance benefit. - /// - /// - Parameter values: Tensors to be concatenated. All must have size 1 in the first dimension - /// and same shape. - /// - /// - Attr shape: the final shape of the result; should be equal to the shapes of any input - /// but with the number of input values in the first dimension. - /// - /// - Output output: The concatenated tensor. - @inlinable @inline(__always) - public static func parallelConcat( - _ values: [Tensor], - shape: TensorShape? - ) -> Tensor { - _RawTFEager.parallelConcat(values, shape: shape) - } - - /// Interleave the values from the `data` tensors into a single tensor. - /// - /// Builds a merged tensor such that - /// - /// ```python - /// merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] - /// ``` - /// - /// For example, if each `indices[m]` is scalar or vector, we have - /// - /// ```python - /// # Scalar indices: - /// merged[indices[m], ...] = data[m][...] - /// - /// # Vector indices: - /// merged[indices[m][i], ...] = data[m][i, ...] - /// ``` - /// - /// Each `data[i].shape` must start with the corresponding `indices[i].shape`, - /// and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we - /// must have `data[i].shape = indices[i].shape + constant`. In terms of this - /// `constant`, the output shape is - /// - /// merged.shape = [max(indices)] + constant - /// - /// Values may be merged in parallel, so if an index appears in both `indices[m][i]` - /// and `indices[n][j]`, the result may be invalid. This differs from the normal - /// DynamicStitch operator that defines the behavior in that case. - /// - /// For example: - /// - /// ```python - /// indices[0] = 6 - /// indices[1] = [4, 1] - /// indices[2] = [[5, 2], [0, 3]] - /// data[0] = [61, 62] - /// data[1] = [[41, 42], [11, 12]] - /// data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]] - /// merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], - /// [51, 52], [61, 62]] - /// ``` - /// - /// This method can be used to merge partitions created by `dynamic_partition` - /// as illustrated on the following example: - /// - /// ```python - /// # Apply function (increments x_i) on elements for which a certain condition - /// # apply (x_i != -1 in this example). - /// x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4]) - /// condition_mask=tf.not_equal(x,tf.constant(-1.)) - /// partitioned_data = tf.dynamic_partition( - /// x, tf.cast(condition_mask, tf.int32) , 2) - /// partitioned_data[1] = partitioned_data[1] + 1.0 - /// condition_indices = tf.dynamic_partition( - /// tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2) - /// x = tf.dynamic_stitch(condition_indices, partitioned_data) - /// # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain - /// # unchanged. - /// ``` - /// - ///
- /// - ///
- @inlinable @inline(__always) - public static func parallelDynamicStitch( - indices: [Tensor], - data: [Tensor] - ) -> Tensor { - _RawTFEager.parallelDynamicStitch(indices: indices, data: data) - } - - /// Creates a dataset that applies `f` to the outputs of `input_dataset`. - /// - /// The resulting dataset is similar to the `InterleaveDataset`, with the exception - /// that if retrieving the next value from a dataset would cause the requester to - /// block, it will skip that input dataset. This dataset is especially useful - /// when loading data from a variable-latency datastores (e.g. HDFS, GCS), as it - /// allows the training step to proceed so long as some data is available. - /// - /// !! WARNING !! If the `sloppy` parameter is set to `True`, the operation of this - /// dataset will not be deterministic! - /// - /// This dataset has been superseded by `ParallelInterleaveDatasetV2`. New code - /// should use `ParallelInterleaveDatasetV2`. - /// - /// The Python API `tf.data.experimental.parallel_interleave` creates instances of - /// this op. `tf.data.experimental.parallel_interleave` is a deprecated API. - /// - /// - Parameters: - /// - input_dataset: Dataset that produces a stream of arguments for the function `f`. - /// - other_arguments: Additional arguments to pass to `f` beyond those produced by `input_dataset`. - /// Evaluated once when the dataset is instantiated. - /// - cycle_length: Number of datasets (each created by applying `f` to the elements of - /// `input_dataset`) among which the `ParallelInterleaveDataset` will cycle in a - /// round-robin fashion. - /// - block_length: Number of elements at a time to produce from each interleaved invocation of a - /// dataset returned by `f`. - /// - sloppy: If `True`, return elements as they become available, even if that means returning - /// these elements in a non-deterministic order. Sloppy operation may result in better - /// performance in the presence of stragglers, but the dataset will still block if - /// all of its open streams are blocked. - /// If `False`, always return elements in a deterministic order. - /// - buffer_output_elements: The number of elements each iterator being interleaved should buffer (similar - /// to the `.prefetch()` transformation for each interleaved iterator). - /// - prefetch_input_elements: Determines the number of iterators to prefetch, allowing buffers to warm up and - /// data to be pre-fetched without blocking the main thread. - /// - /// - Attrs: - /// - f: A function mapping elements of `input_dataset`, concatenated with - /// `other_arguments`, to a Dataset variant that contains elements matching - /// `output_types` and `output_shapes`. - /// - Targuments: Types of the elements of `other_arguments`. - @inlinable @inline(__always) - public static func parallelInterleaveDataset< - FIn: TensorGroup, - FOut: TensorGroup, - Targuments: TensorArrayProtocol - >( - inputDataset: VariantHandle, - otherArguments: Targuments, - cycleLength: Tensor, - blockLength: Tensor, - sloppy: Tensor, - bufferOutputElements: Tensor, - prefetchInputElements: Tensor, - f: (FIn) -> FOut, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.parallelInterleaveDataset( - inputDataset: inputDataset, otherArguments: otherArguments, cycleLength: cycleLength, - blockLength: blockLength, sloppy: sloppy, bufferOutputElements: bufferOutputElements, - prefetchInputElements: prefetchInputElements, f: f, outputTypes: outputTypes, - outputShapes: outputShapes) - } - - /// Creates a dataset that applies `f` to the outputs of `input_dataset`. - /// - /// The resulting dataset is similar to the `InterleaveDataset`, except that the - /// dataset will fetch records from the interleaved datasets in parallel. - /// - /// The `tf.data` Python API creates instances of this op from - /// `Dataset.interleave()` when the `num_parallel_calls` parameter of that method - /// is set to any value other than `None`. - /// - /// By default, the output of this dataset will be deterministic, which may result - /// in the dataset blocking if the next data item to be returned isn't available. - /// In order to avoid head-of-line blocking, one can set the - /// `experimental_deterministic` parameter of `tf.data.Options` to `False`, - /// which can improve performance at the expense of non-determinism. - /// - /// - Parameters: - /// - input_dataset: Dataset that produces a stream of arguments for the function `f`. - /// - other_arguments: Additional arguments to pass to `f` beyond those produced by `input_dataset`. - /// Evaluated once when the dataset is instantiated. - /// - cycle_length: Number of datasets (each created by applying `f` to the elements of - /// `input_dataset`) among which the `ParallelInterleaveDatasetV2` will cycle in a - /// round-robin fashion. - /// - block_length: Number of elements at a time to produce from each interleaved invocation of a - /// dataset returned by `f`. - /// - num_parallel_calls: Determines the number of threads that should be used for fetching data from - /// input datasets in parallel. The Python API `tf.data.experimental.AUTOTUNE` - /// constant can be used to indicate that the level of parallelism should be autotuned. - /// - /// - Attrs: - /// - f: A function mapping elements of `input_dataset`, concatenated with - /// `other_arguments`, to a Dataset variant that contains elements matching - /// `output_types` and `output_shapes`. - /// - Targuments: Types of the elements of `other_arguments`. - @inlinable @inline(__always) - public static func parallelInterleaveDatasetV2< - FIn: TensorGroup, - FOut: TensorGroup, - Targuments: TensorArrayProtocol - >( - inputDataset: VariantHandle, - otherArguments: Targuments, - cycleLength: Tensor, - blockLength: Tensor, - numParallelCalls: Tensor, - f: (FIn) -> FOut, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?], - sloppy: Bool = false - ) -> VariantHandle { - _RawTFEager.parallelInterleaveDatasetV2( - inputDataset: inputDataset, otherArguments: otherArguments, cycleLength: cycleLength, - blockLength: blockLength, numParallelCalls: numParallelCalls, f: f, - outputTypes: outputTypes, outputShapes: outputShapes, sloppy: sloppy) - } - - /// Creates a dataset that applies `f` to the outputs of `input_dataset`. - /// - /// Unlike a "MapDataset", which applies `f` sequentially, this dataset invokes up - /// to `num_parallel_calls` copies of `f` in parallel. - /// - /// - Parameter num_parallel_calls: The number of concurrent invocations of `f` that process - /// elements from `input_dataset` in parallel. - @inlinable @inline(__always) - public static func parallelMapDataset< - FIn: TensorGroup, - FOut: TensorGroup, - Targuments: TensorArrayProtocol - >( - inputDataset: VariantHandle, - otherArguments: Targuments, - numParallelCalls: Tensor, - f: (FIn) -> FOut, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?], - useInterOpParallelism: Bool = true, - sloppy: Bool = false, - preserveCardinality: Bool = false - ) -> VariantHandle { - _RawTFEager.parallelMapDataset( - inputDataset: inputDataset, otherArguments: otherArguments, - numParallelCalls: numParallelCalls, f: f, outputTypes: outputTypes, - outputShapes: outputShapes, useInterOpParallelism: useInterOpParallelism, sloppy: sloppy, - preserveCardinality: preserveCardinality) - } - - /// Outputs random values from a normal distribution. The parameters may each be a - /// - /// scalar which applies to the entire output, or a vector of length shape[0] which - /// stores the parameters for each batch. - /// - /// - Parameters: - /// - shape: The shape of the output tensor. Batches are indexed by the 0th dimension. - /// - means: The mean parameter of each batch. - /// - stdevs: The standard deviation parameter of each batch. Must be greater than 0. - /// - minvals: The minimum cutoff. May be -infinity. - /// - maxvals: The maximum cutoff. May be +infinity, and must be more than the minval - /// for each batch. - /// - /// - Attrs: - /// - seed: If either `seed` or `seed2` are set to be non-zero, the random number - /// generator is seeded by the given seed. Otherwise, it is seeded by a - /// random seed. - /// - seed2: A second seed to avoid seed collision. - /// - dtype: The type of the output. - /// - /// - Output output: A matrix of shape num_batches x samples_per_batch, filled with random - /// truncated normal values using the parameters for each row. - @inlinable @inline(__always) - public static func parameterizedTruncatedNormal< - Dtype: FloatingPoint & TensorFlowScalar, - T: TensorFlowIndex - >( - shape: Tensor, - means: Tensor, - stdevs: Tensor, - minvals: Tensor, - maxvals: Tensor, - seed: Int64 = 0, - seed2: Int64 = 0 - ) -> Tensor { - switch commonBackend( + } + + /// Wraps the XLA ConvGeneralDilated operator, documented at + /// + /// https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution + /// . + /// + /// - Parameters: + /// - lhs: the input tensor + /// - rhs: the kernel tensor + /// - window_strides: the inter-window strides + /// - padding: the padding to apply at the start and end of each input dimensions + /// - lhs_dilation: dilation to apply between input elements + /// - rhs_dilation: dilation to apply between kernel elements + /// - feature_group_count: number of feature groups for grouped convolution. + /// + /// - Attrs: + /// - dimension_numbers: a serialized xla::ConvolutionDimensionNumbers proto. + /// - precision_config: a serialized xla::PrecisionConfig proto. + @inlinable @inline(__always) + public static func xlaConv< + T: TensorFlowNumeric, + Tindices: TensorFlowIndex + >( + lhs: Tensor, + rhs: Tensor, + windowStrides: Tensor, + padding: Tensor, + lhsDilation: Tensor, + rhsDilation: Tensor, + featureGroupCount: Tensor, + dimensionNumbers: String, + precisionConfig: String + ) -> Tensor { + switch commonBackend( + commonBackend( commonBackend( commonBackend( - commonBackend(shape.handle.backend, means.handle.backend), stdevs.handle.backend), - minvals.handle.backend), maxvals.handle.backend) - { - case .XLA: - let output_device = maxvals.device - let shape = Tensor(copying: shape, to: .defaultTFEager) - let means = Tensor(copying: means, to: .defaultTFEager) - let stdevs = Tensor(copying: stdevs, to: .defaultTFEager) - let minvals = Tensor(copying: minvals, to: .defaultTFEager) - let maxvals = Tensor(copying: maxvals, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.parameterizedTruncatedNormal( - shape: shape, means: means, stdevs: stdevs, minvals: minvals, maxvals: maxvals, - seed: seed, seed2: seed2), to: output_device) - case .TF_EAGER: - return _RawTFEager.parameterizedTruncatedNormal( - shape: shape, means: means, stdevs: stdevs, minvals: minvals, maxvals: maxvals, - seed: seed, seed2: seed2) - } - - } - - /// Transforms a vector of brain.Example protos (as strings) into typed tensors. - /// - /// - Parameters: - /// - serialized: A vector containing a batch of binary serialized Example protos. - /// - names: A vector containing the names of the serialized protos. - /// May contain, for example, table key (descriptive) names for the - /// corresponding serialized protos. These are purely useful for debugging - /// purposes, and the presence of values here has no effect on the output. - /// May also be an empty vector if no names are available. - /// If non-empty, this vector must be the same length as "serialized". - /// - sparse_keys: A list of Nsparse string Tensors (scalars). - /// The keys expected in the Examples' features associated with sparse values. - /// - dense_keys: A list of Ndense string Tensors (scalars). - /// The keys expected in the Examples' features associated with dense values. - /// - dense_defaults: A list of Ndense Tensors (some may be empty). - /// dense_defaults[j] provides default values - /// when the example's feature_map lacks dense_key[j]. If an empty Tensor is - /// provided for dense_defaults[j], then the Feature dense_keys[j] is required. - /// The input type is inferred from dense_defaults[j], even when it's empty. - /// If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined, - /// then the shape of dense_defaults[j] must match that of dense_shapes[j]. - /// If dense_shapes[j] has an undefined major dimension (variable strides dense - /// feature), dense_defaults[j] must contain a single element: - /// the padding element. - /// - /// - Attrs: - /// - sparse_types: A list of Nsparse types; the data types of data in each Feature - /// given in sparse_keys. - /// Currently the ParseExample supports DT_FLOAT (FloatList), - /// DT_INT64 (Int64List), and DT_STRING (BytesList). - /// - dense_shapes: A list of Ndense shapes; the shapes of data in each Feature - /// given in dense_keys. - /// The number of elements in the Feature corresponding to dense_key[j] - /// must always equal dense_shapes[j].NumEntries(). - /// If dense_shapes[j] == (D0, D1, ..., DN) then the shape of output - /// Tensor dense_values[j] will be (|serialized|, D0, D1, ..., DN): - /// The dense outputs are just the inputs row-stacked by batch. - /// This works for dense_shapes[j] = (-1, D1, ..., DN). In this case - /// the shape of the output Tensor dense_values[j] will be - /// (|serialized|, M, D1, .., DN), where M is the maximum number of blocks - /// of elements of length D1 * .... * DN, across all minibatch entries - /// in the input. Any minibatch entry with less than M blocks of elements of - /// length D1 * ... * DN will be padded with the corresponding default_value - /// scalar element along the second dimension. - @inlinable @inline(__always) - public static func parseExample< - SparseTypes: TensorGroup, - Tdense: TensorArrayProtocol - >( - serialized: StringTensor, - names: StringTensor, - sparseKeys: [StringTensor], - denseKeys: [StringTensor], - denseDefaults: Tdense, - denseShapes: [TensorShape?] - ) -> ( - sparseIndices: [Tensor], sparseValues: SparseTypes, sparseShapes: [Tensor], - denseValues: Tdense - ) { - _RawTFEager.parseExample( - serialized: serialized, names: names, sparseKeys: sparseKeys, denseKeys: denseKeys, - denseDefaults: denseDefaults, denseShapes: denseShapes) - } - - /// Transforms `input_dataset` containing `Example` protos as vectors of DT_STRING into a dataset of `Tensor` or `SparseTensor` objects representing the parsed features. - /// - /// - Parameter dense_defaults: A dict mapping string keys to `Tensor`s. - /// The keys of the dict must match the dense_keys of the feature. - /// - /// - Attrs: - /// - sparse_keys: A list of string keys in the examples features. - /// The results for these keys will be returned as `SparseTensor` objects. - /// - dense_keys: A list of Ndense string Tensors (scalars). - /// The keys expected in the Examples features associated with dense values. - /// - sparse_types: A list of `DTypes` of the same length as `sparse_keys`. - /// Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), - /// and `tf.string` (`BytesList`) are supported. - /// - Tdense: A list of DTypes of the same length as `dense_keys`. - /// Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), - /// and `tf.string` (`BytesList`) are supported. - /// - /// - dense_shapes: List of tuples with the same length as `dense_keys`. - /// The shape of the data for each dense feature referenced by `dense_keys`. - /// Required for any input tensors identified by `dense_keys`. Must be - /// either fully defined, or may contain an unknown first dimension. - /// An unknown first dimension means the feature is treated as having - /// a variable number of blocks, and the output shape along this dimension - /// is considered unknown at graph build time. Padding is applied for - /// minibatch elements smaller than the maximum number of blocks for the - /// given feature along this dimension. - /// - output_types: The type list for the return values. - /// - output_shapes: The list of shapes being produced. - @inlinable @inline(__always) - public static func parseExampleDataset( - inputDataset: VariantHandle, - numParallelCalls: Tensor, - denseDefaults: Tdense, - sparseKeys: [String], - denseKeys: [String], - sparseTypes: [TensorDataType], - denseShapes: [TensorShape?], - outputTypes: [TensorDataType], - outputShapes: [TensorShape?], - sloppy: Bool = false, - raggedKeys: [String], - raggedValueTypes: [TensorDataType], - raggedSplitTypes: [TensorDataType] - ) -> VariantHandle { - _RawTFEager.parseExampleDataset( - inputDataset: inputDataset, numParallelCalls: numParallelCalls, - denseDefaults: denseDefaults, sparseKeys: sparseKeys, denseKeys: denseKeys, - sparseTypes: sparseTypes, denseShapes: denseShapes, outputTypes: outputTypes, - outputShapes: outputShapes, sloppy: sloppy, raggedKeys: raggedKeys, - raggedValueTypes: raggedValueTypes, raggedSplitTypes: raggedSplitTypes) - } - - /// Transforms a vector of tf.Example protos (as strings) into typed tensors. - /// - /// - Parameters: - /// - serialized: A scalar or vector containing binary serialized Example protos. - /// - names: A tensor containing the names of the serialized protos. - /// Corresponds 1:1 with the `serialized` tensor. - /// May contain, for example, table key (descriptive) names for the - /// corresponding serialized protos. These are purely useful for debugging - /// purposes, and the presence of values here has no effect on the output. - /// May also be an empty vector if no names are available. - /// If non-empty, this tensor must have the same shape as "serialized". - /// - sparse_keys: Vector of strings. - /// The keys expected in the Examples' features associated with sparse values. - /// - dense_keys: Vector of strings. - /// The keys expected in the Examples' features associated with dense values. - /// - ragged_keys: Vector of strings. - /// The keys expected in the Examples' features associated with ragged values. - /// - dense_defaults: A list of Tensors (some may be empty). Corresponds 1:1 with `dense_keys`. - /// dense_defaults[j] provides default values - /// when the example's feature_map lacks dense_key[j]. If an empty Tensor is - /// provided for dense_defaults[j], then the Feature dense_keys[j] is required. - /// The input type is inferred from dense_defaults[j], even when it's empty. - /// If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined, - /// then the shape of dense_defaults[j] must match that of dense_shapes[j]. - /// If dense_shapes[j] has an undefined major dimension (variable strides dense - /// feature), dense_defaults[j] must contain a single element: - /// the padding element. - /// - /// - Attrs: - /// - num_sparse: The number of sparse keys. - /// - sparse_types: A list of `num_sparse` types; the data types of data in each Feature - /// given in sparse_keys. - /// Currently the ParseExample supports DT_FLOAT (FloatList), - /// DT_INT64 (Int64List), and DT_STRING (BytesList). - /// - ragged_value_types: A list of `num_ragged` types; the data types of data in each Feature - /// given in ragged_keys (where `num_ragged = sparse_keys.size()`). - /// Currently the ParseExample supports DT_FLOAT (FloatList), - /// DT_INT64 (Int64List), and DT_STRING (BytesList). - /// - ragged_split_types: A list of `num_ragged` types; the data types of row_splits in each Feature - /// given in ragged_keys (where `num_ragged = sparse_keys.size()`). - /// May be DT_INT32 or DT_INT64. - /// - dense_shapes: A list of `num_dense` shapes; the shapes of data in each Feature - /// given in dense_keys (where `num_dense = dense_keys.size()`). - /// The number of elements in the Feature corresponding to dense_key[j] - /// must always equal dense_shapes[j].NumEntries(). - /// If dense_shapes[j] == (D0, D1, ..., DN) then the shape of output - /// Tensor dense_values[j] will be (|serialized|, D0, D1, ..., DN): - /// The dense outputs are just the inputs row-stacked by batch. - /// This works for dense_shapes[j] = (-1, D1, ..., DN). In this case - /// the shape of the output Tensor dense_values[j] will be - /// (|serialized|, M, D1, .., DN), where M is the maximum number of blocks - /// of elements of length D1 * .... * DN, across all minibatch entries - /// in the input. Any minibatch entry with less than M blocks of elements of - /// length D1 * ... * DN will be padded with the corresponding default_value - /// scalar element along the second dimension. - @inlinable @inline(__always) - public static func parseExampleV2< - Tdense: TensorArrayProtocol, - SparseTypes: TensorGroup, - RaggedValueTypes: TensorGroup, - RaggedSplitTypes: TensorGroup - >( - serialized: StringTensor, - names: StringTensor, - sparseKeys: StringTensor, - denseKeys: StringTensor, - raggedKeys: StringTensor, - denseDefaults: Tdense, - numSparse: Int64, - denseShapes: [TensorShape?] - ) -> ( - sparseIndices: [Tensor], sparseValues: SparseTypes, sparseShapes: [Tensor], - denseValues: Tdense, raggedValues: RaggedValueTypes, raggedRowSplits: RaggedSplitTypes - ) { - _RawTFEager.parseExampleV2( - serialized: serialized, names: names, sparseKeys: sparseKeys, denseKeys: denseKeys, - raggedKeys: raggedKeys, denseDefaults: denseDefaults, numSparse: numSparse, - denseShapes: denseShapes) - } - - /// Transforms a vector of brain.SequenceExample protos (as strings) into typed tensors. - /// - /// - Parameters: - /// - serialized: A vector containing binary serialized SequenceExample protos. - /// - debug_name: A vector containing the names of the serialized protos. - /// May contain, for example, table key (descriptive) name for the - /// corresponding serialized proto. This is purely useful for debugging - /// purposes, and the presence of values here has no effect on the output. - /// May also be an empty vector if no name is available. - /// - context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty). - /// context_dense_defaults[j] provides default values - /// when the SequenceExample's context map lacks context_dense_key[j]. - /// If an empty Tensor is provided for context_dense_defaults[j], - /// then the Feature context_dense_keys[j] is required. - /// The input type is inferred from context_dense_defaults[j], even when it's - /// empty. If context_dense_defaults[j] is not empty, its shape must match - /// context_dense_shapes[j]. - /// - /// - Attrs: - /// - feature_list_dense_missing_assumed_empty: A vector listing the - /// FeatureList keys which may be missing from the SequenceExamples. If the - /// associated FeatureList is missing, it is treated as empty. By default, - /// any FeatureList not listed in this vector must exist in the SequenceExamples. - /// - context_sparse_keys: A list of Ncontext_sparse string Tensors (scalars). - /// The keys expected in the Examples' features associated with context_sparse - /// values. - /// - context_dense_keys: A list of Ncontext_dense string Tensors (scalars). - /// The keys expected in the SequenceExamples' context features associated with - /// dense values. - /// - feature_list_sparse_keys: A list of Nfeature_list_sparse string Tensors - /// (scalars). The keys expected in the FeatureLists associated with sparse - /// values. - /// - feature_list_dense_keys: A list of Nfeature_list_dense string Tensors (scalars). - /// The keys expected in the SequenceExamples' feature_lists associated - /// with lists of dense values. - /// - context_sparse_types: A list of Ncontext_sparse types; the data types of data in - /// each context Feature given in context_sparse_keys. - /// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), - /// DT_INT64 (Int64List), and DT_STRING (BytesList). - /// - context_dense_shapes: A list of Ncontext_dense shapes; the shapes of data in - /// each context Feature given in context_dense_keys. - /// The number of elements in the Feature corresponding to context_dense_key[j] - /// must always equal context_dense_shapes[j].NumEntries(). - /// The shape of context_dense_values[j] will match context_dense_shapes[j]. - /// - feature_list_sparse_types: A list of Nfeature_list_sparse types; the data types - /// of data in each FeatureList given in feature_list_sparse_keys. - /// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), - /// DT_INT64 (Int64List), and DT_STRING (BytesList). - /// - feature_list_dense_shapes: A list of Nfeature_list_dense shapes; the shapes of - /// data in each FeatureList given in feature_list_dense_keys. - /// The shape of each Feature in the FeatureList corresponding to - /// feature_list_dense_key[j] must always equal - /// feature_list_dense_shapes[j].NumEntries(). - @inlinable @inline(__always) - public static func parseSequenceExample< - ContextSparseTypes: TensorGroup, - TcontextDense: TensorArrayProtocol, - FeatureListDenseTypes: TensorGroup, - FeatureListSparseTypes: TensorGroup - >( - serialized: StringTensor, - debugName: StringTensor, - contextDenseDefaults: TcontextDense, - featureListDenseMissingAssumedEmpty: [String], - contextSparseKeys: [String], - contextDenseKeys: [String], - featureListSparseKeys: [String], - featureListDenseKeys: [String], - ncontextSparse: Int64 = 0, - ncontextDense: Int64 = 0, - nfeatureListSparse: Int64 = 0, - nfeatureListDense: Int64 = 0, - contextDenseShapes: [TensorShape?], - featureListDenseShapes: [TensorShape?] - ) -> ( - contextSparseIndices: [Tensor], contextSparseValues: ContextSparseTypes, - contextSparseShapes: [Tensor], contextDenseValues: TcontextDense, - featureListSparseIndices: [Tensor], featureListSparseValues: FeatureListSparseTypes, - featureListSparseShapes: [Tensor], featureListDenseValues: FeatureListDenseTypes, - featureListDenseLengths: [Tensor] - ) { - _RawTFEager.parseSequenceExample( - serialized: serialized, debugName: debugName, contextDenseDefaults: contextDenseDefaults, - featureListDenseMissingAssumedEmpty: featureListDenseMissingAssumedEmpty, - contextSparseKeys: contextSparseKeys, contextDenseKeys: contextDenseKeys, - featureListSparseKeys: featureListSparseKeys, featureListDenseKeys: featureListDenseKeys, - ncontextSparse: ncontextSparse, ncontextDense: ncontextDense, - nfeatureListSparse: nfeatureListSparse, nfeatureListDense: nfeatureListDense, - contextDenseShapes: contextDenseShapes, featureListDenseShapes: featureListDenseShapes) - } - - /// Transforms a vector of tf.io.SequenceExample protos (as strings) into - /// typed tensors. - /// - /// - Parameters: - /// - serialized: A scalar or vector containing binary serialized SequenceExample protos. - /// - debug_name: A scalar or vector containing the names of the serialized protos. - /// May contain, for example, table key (descriptive) name for the - /// corresponding serialized proto. This is purely useful for debugging - /// purposes, and the presence of values here has no effect on the output. - /// May also be an empty vector if no name is available. - /// - context_sparse_keys: The keys expected in the Examples' features associated with context_sparse - /// values. - /// - context_dense_keys: The keys expected in the SequenceExamples' context features associated with - /// dense values. - /// - context_ragged_keys: The keys expected in the Examples' features associated with context_ragged - /// values. - /// - feature_list_sparse_keys: The keys expected in the FeatureLists associated with sparse values. - /// - feature_list_dense_keys: The keys expected in the SequenceExamples' feature_lists associated - /// with lists of dense values. - /// - feature_list_ragged_keys: The keys expected in the FeatureLists associated with ragged values. - /// - feature_list_dense_missing_assumed_empty: A vector corresponding 1:1 with featue_list_dense_keys, indicating which - /// features may be missing from the SequenceExamples. If the associated - /// FeatureList is missing, it is treated as empty. - /// - context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty). - /// context_dense_defaults[j] provides default values - /// when the SequenceExample's context map lacks context_dense_key[j]. - /// If an empty Tensor is provided for context_dense_defaults[j], - /// then the Feature context_dense_keys[j] is required. - /// The input type is inferred from context_dense_defaults[j], even when it's - /// empty. If context_dense_defaults[j] is not empty, its shape must match - /// context_dense_shapes[j]. - /// - /// - Attrs: - /// - context_sparse_types: A list of Ncontext_sparse types; the data types of data in - /// each context Feature given in context_sparse_keys. - /// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), - /// DT_INT64 (Int64List), and DT_STRING (BytesList). - /// - context_ragged_value_types: RaggedTensor.value dtypes for the ragged context features. - /// - context_ragged_split_types: RaggedTensor.row_split dtypes for the ragged context features. - /// - context_dense_shapes: A list of Ncontext_dense shapes; the shapes of data in - /// each context Feature given in context_dense_keys. - /// The number of elements in the Feature corresponding to context_dense_key[j] - /// must always equal context_dense_shapes[j].NumEntries(). - /// The shape of context_dense_values[j] will match context_dense_shapes[j]. - /// - feature_list_sparse_types: A list of Nfeature_list_sparse types; the data types - /// of data in each FeatureList given in feature_list_sparse_keys. - /// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), - /// DT_INT64 (Int64List), and DT_STRING (BytesList). - /// - feature_list_ragged_value_types: RaggedTensor.value dtypes for the ragged FeatureList features. - /// - feature_list_ragged_split_types: RaggedTensor.row_split dtypes for the ragged FeatureList features. - /// - feature_list_dense_shapes: A list of Nfeature_list_dense shapes; the shapes of - /// data in each FeatureList given in feature_list_dense_keys. - /// The shape of each Feature in the FeatureList corresponding to - /// feature_list_dense_key[j] must always equal - /// feature_list_dense_shapes[j].NumEntries(). - @inlinable @inline(__always) - public static func parseSequenceExampleV2< - TcontextDense: TensorArrayProtocol, - ContextSparseTypes: TensorGroup, - ContextRaggedValueTypes: TensorGroup, - ContextRaggedSplitTypes: TensorGroup, - FeatureListDenseTypes: TensorGroup, - FeatureListSparseTypes: TensorGroup, - FeatureListRaggedValueTypes: TensorGroup, - FeatureListRaggedSplitTypes: TensorGroup - >( - serialized: StringTensor, - debugName: StringTensor, - contextSparseKeys: StringTensor, - contextDenseKeys: StringTensor, - contextRaggedKeys: StringTensor, - featureListSparseKeys: StringTensor, - featureListDenseKeys: StringTensor, - featureListRaggedKeys: StringTensor, - featureListDenseMissingAssumedEmpty: Tensor, - contextDenseDefaults: TcontextDense, - ncontextSparse: Int64 = 0, - contextDenseShapes: [TensorShape?], - nfeatureListSparse: Int64 = 0, - nfeatureListDense: Int64 = 0, - featureListDenseShapes: [TensorShape?] - ) -> ( - contextSparseIndices: [Tensor], contextSparseValues: ContextSparseTypes, - contextSparseShapes: [Tensor], contextDenseValues: TcontextDense, - contextRaggedValues: ContextRaggedValueTypes, contextRaggedRowSplits: ContextRaggedSplitTypes, - featureListSparseIndices: [Tensor], featureListSparseValues: FeatureListSparseTypes, - featureListSparseShapes: [Tensor], featureListDenseValues: FeatureListDenseTypes, - featureListDenseLengths: [Tensor], - featureListRaggedValues: FeatureListRaggedValueTypes, - featureListRaggedOuterSplits: FeatureListRaggedSplitTypes, - featureListRaggedInnerSplits: FeatureListRaggedSplitTypes - ) { - _RawTFEager.parseSequenceExampleV2( - serialized: serialized, debugName: debugName, contextSparseKeys: contextSparseKeys, - contextDenseKeys: contextDenseKeys, contextRaggedKeys: contextRaggedKeys, - featureListSparseKeys: featureListSparseKeys, featureListDenseKeys: featureListDenseKeys, - featureListRaggedKeys: featureListRaggedKeys, - featureListDenseMissingAssumedEmpty: featureListDenseMissingAssumedEmpty, - contextDenseDefaults: contextDenseDefaults, ncontextSparse: ncontextSparse, - contextDenseShapes: contextDenseShapes, nfeatureListSparse: nfeatureListSparse, - nfeatureListDense: nfeatureListDense, featureListDenseShapes: featureListDenseShapes) - } - - /// Transforms a tf.Example proto (as a string) into typed tensors. - /// - /// - Parameters: - /// - serialized: A vector containing a batch of binary serialized Example protos. - /// - dense_defaults: A list of Tensors (some may be empty), whose length matches - /// the length of `dense_keys`. dense_defaults[j] provides default values - /// when the example's feature_map lacks dense_key[j]. If an empty Tensor is - /// provided for dense_defaults[j], then the Feature dense_keys[j] is required. - /// The input type is inferred from dense_defaults[j], even when it's empty. - /// If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined, - /// then the shape of dense_defaults[j] must match that of dense_shapes[j]. - /// If dense_shapes[j] has an undefined major dimension (variable strides dense - /// feature), dense_defaults[j] must contain a single element: - /// the padding element. - /// - /// - Attrs: - /// - num_sparse: The number of sparse features to be parsed from the example. This - /// must match the lengths of `sparse_keys` and `sparse_types`. - /// - sparse_keys: A list of `num_sparse` strings. - /// The keys expected in the Examples' features associated with sparse values. - /// - dense_keys: The keys expected in the Examples' features associated with dense - /// values. - /// - sparse_types: A list of `num_sparse` types; the data types of data in each - /// Feature given in sparse_keys. - /// Currently the ParseSingleExample op supports DT_FLOAT (FloatList), - /// DT_INT64 (Int64List), and DT_STRING (BytesList). - /// - Tdense: The data types of data in each Feature given in dense_keys. - /// The length of this list must match the length of `dense_keys`. - /// Currently the ParseSingleExample op supports DT_FLOAT (FloatList), - /// DT_INT64 (Int64List), and DT_STRING (BytesList). - /// - dense_shapes: The shapes of data in each Feature given in dense_keys. - /// The length of this list must match the length of `dense_keys`. The - /// number of elements in the Feature corresponding to dense_key[j] must - /// always equal dense_shapes[j].NumEntries(). If dense_shapes[j] == - /// (D0, D1, ..., DN) then the shape of output Tensor dense_values[j] - /// will be (D0, D1, ..., DN): In the case dense_shapes[j] = (-1, D1, - /// ..., DN), the shape of the output Tensor dense_values[j] will be (M, - /// D1, .., DN), where M is the number of blocks of elements of length - /// D1 * .... * DN, in the input. - @inlinable @inline(__always) - public static func parseSingleExample< - SparseTypes: TensorGroup, - Tdense: TensorArrayProtocol - >( - serialized: StringTensor, - denseDefaults: Tdense, - numSparse: Int64, - sparseKeys: [String], - denseKeys: [String], - denseShapes: [TensorShape?] - ) -> ( - sparseIndices: [Tensor], sparseValues: SparseTypes, sparseShapes: [Tensor], - denseValues: Tdense - ) { - _RawTFEager.parseSingleExample( - serialized: serialized, denseDefaults: denseDefaults, numSparse: numSparse, - sparseKeys: sparseKeys, denseKeys: denseKeys, denseShapes: denseShapes) - } - - /// Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors. - /// - /// - Parameters: - /// - serialized: A scalar containing a binary serialized SequenceExample proto. - /// - feature_list_dense_missing_assumed_empty: A vector listing the - /// FeatureList keys which may be missing from the SequenceExample. If the - /// associated FeatureList is missing, it is treated as empty. By default, - /// any FeatureList not listed in this vector must exist in the SequenceExample. - /// - context_sparse_keys: A list of Ncontext_sparse string Tensors (scalars). - /// The keys expected in the Examples' features associated with context_sparse - /// values. - /// - context_dense_keys: A list of Ncontext_dense string Tensors (scalars). - /// The keys expected in the SequenceExamples' context features associated with - /// dense values. - /// - feature_list_sparse_keys: A list of Nfeature_list_sparse string Tensors - /// (scalars). The keys expected in the FeatureLists associated with sparse - /// values. - /// - feature_list_dense_keys: A list of Nfeature_list_dense string Tensors (scalars). - /// The keys expected in the SequenceExamples' feature_lists associated - /// with lists of dense values. - /// - context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty). - /// context_dense_defaults[j] provides default values - /// when the SequenceExample's context map lacks context_dense_key[j]. - /// If an empty Tensor is provided for context_dense_defaults[j], - /// then the Feature context_dense_keys[j] is required. - /// The input type is inferred from context_dense_defaults[j], even when it's - /// empty. If context_dense_defaults[j] is not empty, its shape must match - /// context_dense_shapes[j]. - /// - debug_name: A scalar containing the name of the serialized proto. - /// May contain, for example, table key (descriptive) name for the - /// corresponding serialized proto. This is purely useful for debugging - /// purposes, and the presence of values here has no effect on the output. - /// May also be an empty scalar if no name is available. - /// - /// - Attrs: - /// - context_sparse_types: A list of Ncontext_sparse types; the data types of data in - /// each context Feature given in context_sparse_keys. - /// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), - /// DT_INT64 (Int64List), and DT_STRING (BytesList). - /// - context_dense_shapes: A list of Ncontext_dense shapes; the shapes of data in - /// each context Feature given in context_dense_keys. - /// The number of elements in the Feature corresponding to context_dense_key[j] - /// must always equal context_dense_shapes[j].NumEntries(). - /// The shape of context_dense_values[j] will match context_dense_shapes[j]. - /// - feature_list_sparse_types: A list of Nfeature_list_sparse types; the data types - /// of data in each FeatureList given in feature_list_sparse_keys. - /// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), - /// DT_INT64 (Int64List), and DT_STRING (BytesList). - /// - feature_list_dense_shapes: A list of Nfeature_list_dense shapes; the shapes of - /// data in each FeatureList given in feature_list_dense_keys. - /// The shape of each Feature in the FeatureList corresponding to - /// feature_list_dense_key[j] must always equal - /// feature_list_dense_shapes[j].NumEntries(). - @inlinable @inline(__always) - public static func parseSingleSequenceExample< - ContextSparseTypes: TensorGroup, - TcontextDense: TensorArrayProtocol, - FeatureListDenseTypes: TensorGroup, - FeatureListSparseTypes: TensorGroup - >( - serialized: StringTensor, - featureListDenseMissingAssumedEmpty: StringTensor, - contextSparseKeys: [StringTensor], - contextDenseKeys: [StringTensor], - featureListSparseKeys: [StringTensor], - featureListDenseKeys: [StringTensor], - contextDenseDefaults: TcontextDense, - debugName: StringTensor, - contextDenseShapes: [TensorShape?], - featureListDenseShapes: [TensorShape?] - ) -> ( - contextSparseIndices: [Tensor], contextSparseValues: ContextSparseTypes, - contextSparseShapes: [Tensor], contextDenseValues: TcontextDense, - featureListSparseIndices: [Tensor], featureListSparseValues: FeatureListSparseTypes, - featureListSparseShapes: [Tensor], featureListDenseValues: FeatureListDenseTypes - ) { - _RawTFEager.parseSingleSequenceExample( - serialized: serialized, - featureListDenseMissingAssumedEmpty: featureListDenseMissingAssumedEmpty, - contextSparseKeys: contextSparseKeys, contextDenseKeys: contextDenseKeys, - featureListSparseKeys: featureListSparseKeys, featureListDenseKeys: featureListDenseKeys, - contextDenseDefaults: contextDenseDefaults, debugName: debugName, - contextDenseShapes: contextDenseShapes, featureListDenseShapes: featureListDenseShapes) - } - - /// Transforms a serialized tensorflow.TensorProto proto into a Tensor. - /// - /// - Parameter serialized: A scalar string containing a serialized TensorProto proto. - /// - /// - Attr out_type: The type of the serialized tensor. The provided type must match the - /// type of the serialized tensor and no implicit conversion will take place. - /// - /// - Output output: A Tensor of type `out_type`. - @inlinable @inline(__always) - public static func parseTensor( - serialized: StringTensor - ) -> Tensor { - _RawTFEager.parseTensor(serialized: serialized) - } - - /// returns `f(inputs)`, where `f`'s body is placed and partitioned. - /// - /// - Parameter args: A list of input tensors. - /// - /// - Attrs: - /// - Tin: A list of input types. - /// - Tout: A list of output types. - /// - f: A function that takes 'args', a list of tensors, and returns 'output', - /// another list of tensors. Input and output types are specified by 'Tin' - /// and 'Tout'. The function body of f will be placed and partitioned across - /// devices, setting this op apart from the regular Call op. - /// - /// - Output output: A list of return values. - @inlinable @inline(__always) - public static func partitionedCall< - Tin: TensorArrayProtocol, - Tout: TensorGroup, - FIn: TensorGroup, - FOut: TensorGroup - >( - args: Tin, - f: (FIn) -> FOut, - config: String, - configProto: String, - executorType: String - ) -> Tout { - _RawTFEager.partitionedCall( - args: args, f: f, config: config, configProto: configProto, executorType: executorType) - } - - /// A placeholder op for a value that will be fed into the computation. - /// - /// N.B. This operation will fail with an error if it is executed. It is - /// intended as a way to represent a value that will always be fed, and to - /// provide attrs that enable the fed value to be checked at runtime. - /// - /// - Attrs: - /// - dtype: The type of elements in the tensor. - /// - shape: (Optional) The shape of the tensor. If the shape has 0 dimensions, the - /// shape is unconstrained. - /// - /// - Output output: A placeholder tensor that must be replaced using the feed mechanism. - @inlinable @inline(__always) - public static func placeholder( - shape: TensorShape? - ) -> Tensor { - _RawTFEager.placeholder(shape: shape) - } - - /// A placeholder op for a value that will be fed into the computation. - /// - /// N.B. This operation will fail with an error if it is executed. It is - /// intended as a way to represent a value that will always be fed, and to - /// provide attrs that enable the fed value to be checked at runtime. - /// - /// - Attrs: - /// - dtype: The type of elements in the tensor. - /// - shape: The shape of the tensor. The shape can be any partially-specified - /// shape. To be unconstrained, pass in a shape with unknown rank. - /// - /// - Output output: A placeholder tensor that must be replaced using the feed mechanism. - @inlinable @inline(__always) - public static func placeholderV2( - shape: TensorShape? - ) -> Tensor { - _RawTFEager.placeholderV2(shape: shape) - } - - /// A placeholder op that passes through `input` when its output is not fed. - /// - /// - Parameter input: The default value to produce when `output` is not fed. - /// - /// - Attrs: - /// - dtype: The type of elements in the tensor. - /// - shape: The (possibly partial) shape of the tensor. - /// - /// - Output output: A placeholder tensor that defaults to `input` if it is not fed. - @inlinable @inline(__always) - public static func placeholderWithDefault( - _ input: Tensor, - shape: TensorShape? - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.placeholderWithDefault(input, shape: shape), to: output_device) - case .TF_EAGER: - return _RawTFEager.placeholderWithDefault(input, shape: shape) - } - - } - - /// Compute the polygamma function \\(\psi^{(n)}(x)\\). - /// - /// The polygamma function is defined as: - /// - /// - /// \\(\psi^{(a)}(x) = \frac{d^a}{dx^a} \psi(x)\\) - /// - /// where \\(\psi(x)\\) is the digamma function. - /// The polygamma function is defined only for non-negative integer orders \\a\\. - @inlinable @inline(__always) - public static func polygamma( - _ a: Tensor, - _ x: Tensor - ) -> Tensor { - switch commonBackend(a.handle.backend, x.handle.backend) { - case .XLA: - let output_device = x.device - let a = Tensor(copying: a, to: .defaultTFEager) - let x = Tensor(copying: x, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.polygamma(a, x), to: output_device) - case .TF_EAGER: - return _RawTFEager.polygamma(a, x) - } - - } - - @inlinable @inline(__always) - public static func polymorphic( - _ a: Tensor - ) -> Tensor { - switch a.handle.backend { - case .XLA: - let output_device = a.device - let a = Tensor(copying: a, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.polymorphic(a), to: output_device) - case .TF_EAGER: - return _RawTFEager.polymorphic(a) - } - - } - - @inlinable @inline(__always) - public static func polymorphicDefaultOut() -> Tensor { - _RawTFEager.polymorphicDefaultOut() - } - - @inlinable @inline(__always) - public static func polymorphicOut() -> Tensor { - _RawTFEager.polymorphicOut() - } - - /// Computes element-wise population count (a.k.a. popcount, bitsum, bitcount). - /// - /// For each entry in `x`, calculates the number of `1` (on) bits in the binary - /// representation of that entry. - /// - /// **NOTE**: It is more efficient to first `tf.bitcast` your tensors into - /// `int32` or `int64` and perform the bitcount on the result, than to feed in - /// 8- or 16-bit inputs and then aggregate the resulting counts. - @inlinable @inline(__always) - public static func populationCount( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - let output_device = x.device - let x = Tensor(copying: x, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.populationCount(x), to: output_device) - case .TF_EAGER: - return _RawTFEager.populationCount(x) - } - - } - - /// Computes the power of one value to another. - /// - /// Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for - /// corresponding elements in `x` and `y`. For example: - /// - /// ``` - /// # tensor 'x' is [[2, 2]], [3, 3]] - /// # tensor 'y' is [[8, 16], [2, 3]] - /// tf.pow(x, y) ==> [[256, 65536], [9, 27]] - /// ``` - @inlinable @inline(__always) - public static func pow( - _ x: Tensor, - _ y: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - return _RawXLA.pow(x, y) - case .TF_EAGER: - return _RawTFEager.pow(x, y) - } - - } - - /// Creates a dataset that asynchronously prefetches elements from `input_dataset`. - /// - /// - Parameter buffer_size: The maximum number of elements to buffer in an iterator over - /// this dataset. - @inlinable @inline(__always) - public static func prefetchDataset( - inputDataset: VariantHandle, - bufferSize: Tensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?], - slackPeriod: Int64 = 0, - legacyAutotune: Bool = true - ) -> VariantHandle { - _RawTFEager.prefetchDataset( - inputDataset: inputDataset, bufferSize: bufferSize, outputTypes: outputTypes, - outputShapes: outputShapes, slackPeriod: slackPeriod, legacyAutotune: legacyAutotune) - } - - /// An op which linearizes one Tensor value to an opaque variant tensor. - /// - /// - Parameter input: A tensor that will be linearized. - /// - /// - Attrs: - /// - dtype: The type of elements in the tensor. - /// - shape: The shape of the tensor. - /// - layout: A vector holding the requested layout in minor-to-major sequence. If a layout - /// attribute is passed but its values are all -1 the layout will be computed by - /// the infeed operation. - @inlinable @inline(__always) - public static func prelinearize( - _ input: Tensor, - shape: TensorShape?, - layout: [Int32] - ) -> VariantHandle { - _RawTFEager.prelinearize(input, shape: shape, layout: layout) - } - - /// An op which linearizes multiple Tensor values to an opaque variant tensor. - /// - /// - Parameter inputs: A list of tensors that will be provided using the infeed mechanism. - /// - /// - Attrs: - /// - dtypes: The element types of each element in `inputs`. - /// - shapes: The shapes of each tensor in `inputs`. - /// - layouts: A vector holding the requested layout in minor-to-major sequence for all the - /// tuple shapes in the order the shapes appear in the "shapes" input. The layout - /// elements for a sub-shape can be set to -1 in which case the corresponding layout - /// will be computed by the infeed operation. - @inlinable @inline(__always) - public static func prelinearizeTuple( - inputs: Dtypes, - shapes: [TensorShape?], - layouts: [Int32] - ) -> VariantHandle { - _RawTFEager.prelinearizeTuple(inputs: inputs, shapes: shapes, layouts: layouts) - } - - /// An identity op that triggers an error if a gradient is requested. - /// - /// When executed in a graph, this op outputs its input tensor as-is. - /// - /// When building ops to compute gradients, the TensorFlow gradient system - /// will return an error when trying to lookup the gradient of this op, - /// because no gradient must ever be registered for this function. This - /// op exists to prevent subtle bugs from silently returning unimplemented - /// gradients in some corner cases. - /// - /// - Parameter input: any tensor. - /// - /// - Attr message: Will be printed in the error when anyone tries to differentiate - /// this operation. - /// - /// - Output output: the same input tensor. - @inlinable @inline(__always) - public static func preventGradient( - _ input: Tensor, - message: String - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.preventGradient(input, message: message), to: output_device) - case .TF_EAGER: - return _RawTFEager.preventGradient(input, message: message) - } - - } - - /// Prints a list of tensors. - /// - /// Passes `input` through to `output` and prints `data` when evaluating. - /// - /// - Parameters: - /// - input: The tensor passed to `output` - /// - data: A list of tensors to print out when op is evaluated. - /// - /// - Attrs: - /// - message: A string, prefix of the error message. - /// - first_n: Only log `first_n` number of times. -1 disables logging. - /// - summarize: Only print this many entries of each tensor. - /// - /// - Output output: = The unmodified `input` tensor - @inlinable @inline(__always) - public static func print< - T: TensorFlowScalar, - U: TensorArrayProtocol - >( - _ input: Tensor, - data: U, - message: String, - firstN: Int64 = -1, - summarize: Int64 = 3 - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.print( - input, data: data, message: message, firstN: firstN, summarize: summarize), - to: output_device) - case .TF_EAGER: - return _RawTFEager.print( - input, data: data, message: message, firstN: firstN, summarize: summarize) - } - - } - - /// Prints a string scalar. - /// - /// Prints a string scalar to the desired output_stream. - /// - /// - Parameter input: The string scalar to print. - /// - /// - Attr output_stream: A string specifying the output stream or logging level to print to. - @inlinable @inline(__always) - public static func printV2( - _ input: StringTensor, - outputStream: String = "stderr", - end: String = "\n" - ) { - _RawTFEager.printV2(input, outputStream: outputStream, end: end) - } - - /// A queue that produces elements sorted by the first component value. - /// - /// Note that the PriorityQueue requires the first component of any element - /// to be a scalar int64, in addition to the other elements declared by - /// component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue - /// and DequeueMany) on a PriorityQueue will all require (resp. output) one extra - /// entry in their input (resp. output) lists. - /// - /// - Attrs: - /// - component_types: The type of each component in a value. - /// - shapes: The shape of each component in a value. The length of this attr must - /// be either 0 or the same as the length of component_types. If the length of - /// this attr is 0, the shapes of queue elements are not constrained, and - /// only one element may be dequeued at a time. - /// - capacity: The upper bound on the number of elements in this queue. - /// Negative numbers mean no limit. - /// - container: If non-empty, this queue is placed in the given container. - /// Otherwise, a default container is used. - /// - shared_name: If non-empty, this queue will be shared under the given name - /// across multiple sessions. - /// - /// - Output handle: The handle to the queue. - @inlinable @inline(__always) - public static func priorityQueueV2( - componentTypes: [TensorDataType], - shapes: [TensorShape?], - capacity: Int64 = -1, - container: String, - sharedName: String - ) -> ResourceHandle { - _RawTFEager.priorityQueueV2( - componentTypes: componentTypes, shapes: shapes, capacity: capacity, container: container, - sharedName: sharedName) - } - - /// Creates a dataset that uses a custom thread pool to compute `input_dataset`. - /// - /// - Parameter num_threads: Identifies the number of threads to use for the private threadpool. - @inlinable @inline(__always) - public static func privateThreadPoolDataset( - inputDataset: VariantHandle, - numThreads: Tensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.privateThreadPoolDataset( - inputDataset: inputDataset, numThreads: numThreads, outputTypes: outputTypes, - outputShapes: outputShapes) - } - - /// Computes the product of elements across dimensions of a tensor. - /// - /// Reduces `input` along the dimensions given in `axis`. Unless - /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - /// `axis`. If `keep_dims` is true, the reduced dimensions are - /// retained with length 1. - /// - /// - Parameters: - /// - input: The tensor to reduce. - /// - reduction_indices: The dimensions to reduce. Must be in the range - /// `[-rank(input), rank(input))`. - /// - /// - Attr keep_dims: If true, retain reduced dimensions with length 1. - /// - /// - Output output: The reduced tensor. - @inlinable @inline(__always) - public static func prod< - T: TensorFlowNumeric, - Tidx: TensorFlowIndex - >( - _ input: Tensor, - reductionIndices: Tensor, - keepDims: Bool = false - ) -> Tensor { - switch commonBackend(input.handle.backend, reductionIndices.handle.backend) { - case .XLA: - return _RawXLA.prod(input, reductionIndices: reductionIndices, keepDims: keepDims) - case .TF_EAGER: - return _RawTFEager.prod(input, reductionIndices: reductionIndices, keepDims: keepDims) - } - - } - - /// Invokes a python function to compute func(input)->output. - /// - /// This operation is considered stateful. For a stateless version, see - /// PyFuncStateless. - /// - /// - Parameter input: List of Tensors that will provide input to the Op. - /// - /// - Attrs: - /// - token: A token representing a registered python function in this address space. - /// - Tin: Data types of the inputs to the op. - /// - Tout: Data types of the outputs from the op. - /// The length of the list specifies the number of outputs. - /// - /// - Output output: The outputs from the Op. - @inlinable @inline(__always) - public static func pyFunc< - Tin: TensorArrayProtocol, - Tout: TensorGroup - >( - _ input: Tin, - token: String - ) -> Tout { - _RawTFEager.pyFunc(input, token: token) - } - - /// A stateless version of PyFunc. - @inlinable @inline(__always) - public static func pyFuncStateless< - Tin: TensorArrayProtocol, - Tout: TensorGroup - >( - _ input: Tin, - token: String - ) -> Tout { - _RawTFEager.pyFuncStateless(input, token: token) - } - - /// Computes the QR decompositions of one or more matrices. - /// - /// Computes the QR decomposition of each inner matrix in `tensor` such that - /// `tensor[..., :, :] = q[..., :, :] * r[..., :,:])` - /// - /// ```python - /// # a is a tensor. - /// # q is a tensor of orthonormal matrices. - /// # r is a tensor of upper triangular matrices. - /// q, r = qr(a) - /// q_full, r_full = qr(a, full_matrices=True) - /// ``` - /// - /// - Parameter input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions - /// form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`. - /// - /// - Attr full_matrices: If true, compute full-sized `q` and `r`. If false - /// (the default), compute only the leading `P` columns of `q`. - /// - /// - Outputs: - /// - q: Orthonormal basis for range of `a`. If `full_matrices` is `False` then - /// shape is `[..., M, P]`; if `full_matrices` is `True` then shape is - /// `[..., M, M]`. - /// - r: Triangular factor. If `full_matrices` is `False` then shape is - /// `[..., P, N]`. If `full_matrices` is `True` then shape is `[..., M, N]`. - @inlinable @inline(__always) - public static func qr( - _ input: Tensor, - fullMatrices: Bool = false - ) -> (q: Tensor, r: Tensor) { - switch input.handle.backend { - case .XLA: - return _RawXLA.qr(input, fullMatrices: fullMatrices) - case .TF_EAGER: - return _RawTFEager.qr(input, fullMatrices: fullMatrices) - } - - } - - /// Use QuantizeAndDequantizeV2 instead. - @inlinable @inline(__always) - public static func quantizeAndDequantize( - _ input: Tensor, - signedInput: Bool = true, - numBits: Int64 = 8, - rangeGiven: Bool = false, - inputMin: Double = 0, - inputMax: Double = 0 - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.quantizeAndDequantize( - input, signedInput: signedInput, numBits: numBits, rangeGiven: rangeGiven, - inputMin: inputMin, inputMax: inputMax), to: output_device) - case .TF_EAGER: - return _RawTFEager.quantizeAndDequantize( - input, signedInput: signedInput, numBits: numBits, rangeGiven: rangeGiven, - inputMin: inputMin, inputMax: inputMax) - } - - } - - /// Quantizes then dequantizes a tensor. - /// - /// This op simulates the precision loss from the quantized forward pass by: - /// - /// 1. Quantizing the tensor to fixed point numbers, which should match the target - /// quantization method when it is used in inference. - /// 2. Dequantizing it back to floating point numbers for the following ops, most - /// likely matmul. - /// - /// There are different ways to quantize. This version uses only scaling, so 0.0 - /// maps to 0. - /// - /// From the specified 'num_bits' in the quantized output type, it determines - /// minimum and maximum representable quantized values. - /// - /// e.g. - /// - /// * [-128, 127] for signed, num_bits = 8, or - /// * [0, 255] for unsigned, num_bits = 8. - /// - /// If range_given == False, the initial input_min, input_max will be determined - /// automatically as the minimum and maximum values in the input tensor, otherwise - /// the specified values of input_min, input_max are used. - /// - /// Note: If the input_min, input_max are specified, they do not need to equal the - /// actual minimum and maximum values in the tensor. e.g. in some cases it may be - /// beneficial to specify these values such that the low probability extremes of the - /// input distribution are clipped. - /// - /// This op determines the maximum scale_factor that would map the initial - /// [input_min, input_max] range to a range that lies within the representable - /// quantized range. - /// - /// It determines the scale from one of input_min and input_max, then updates the - /// other one to maximize the representable range. - /// - /// e.g. - /// - /// * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0, - /// 5.0]: it would use a scale_factor of -128 / -10.0 = 12.8 In this case, it - /// would update input_max to be 127 / 12.8 = 9.921875 - /// * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0, - /// 10.0]: it would use a scale_factor of 127 / 10.0 = 12.7 In this case, it - /// would update input_min to be 128.0 / 12.7 = -10.07874 - /// * if the output is unsigned, input_min is forced to be 0, and only the - /// specified input_max is used. - /// - /// After determining the scale_factor and updating the input range, it applies the - /// following to each value in the 'input' tensor. - /// - /// output = round(clamp(value, input_min, input_max) * scale_factor) / scale_factor. - /// - /// The above round function rounds the value based on the given round_mode. - /// - /// - /// - Parameters: - /// - input: Tensor to quantize and then dequantize. - /// - input_min: If `range_given == True`, this specifies the minimum input value that needs to - /// be represented, otherwise it is determined from the min value of the `input` - /// tensor. - /// - input_max: If `range_given == True`, this specifies the maximum input value that needs to - /// be represented, otherwise it is determined from the max value of the `input` - /// tensor. - /// - /// - Attrs: - /// - signed_input: Whether the quantization is signed or unsigned. (actually this parameter should - /// have been called `signed_output`) - /// - num_bits: The bitwidth of the quantization. - /// - range_given: Whether the range is given or should be determined from the `input` tensor. - /// - round_mode: The 'round_mode' attribute controls which rounding tie-breaking algorithm is - /// used when rounding float values to their quantized equivalents. The following - /// rounding modes are currently supported: - /// - /// * HALF_TO_EVEN: this is the default round_mode. - /// * HALF_UP: round towards positive. In this mode 7.5 rounds up to 8 and -7.5 - /// rounds up to -7. - /// - /// - narrow_range: If True, then the absolute value of the quantized minimum value is the same as - /// the quantized maximum value, instead of 1 greater. - /// i.e. for 8 bit quantization, the minimum value is -127 instead of -128. - /// - axis: If specified, this axis is treated as a channel or slice axis, and a separate - /// quantization range is used for each channel or slice along this axis. - @inlinable @inline(__always) - public static func quantizeAndDequantizeV2( - _ input: Tensor, - inputMin: Tensor, - inputMax: Tensor, - signedInput: Bool = true, - numBits: Int64 = 8, - rangeGiven: Bool = false, - roundMode: RoundMode = .halfToEven, - narrowRange: Bool = false, - axis: Int64 = -1 - ) -> Tensor { - switch commonBackend( - commonBackend(input.handle.backend, inputMin.handle.backend), inputMax.handle.backend) - { - case .XLA: - let output_device = inputMax.device - let input = Tensor(copying: input, to: .defaultTFEager) - let inputMin = Tensor(copying: inputMin, to: .defaultTFEager) - let inputMax = Tensor(copying: inputMax, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.quantizeAndDequantizeV2( - input, inputMin: inputMin, inputMax: inputMax, signedInput: signedInput, - numBits: numBits, rangeGiven: rangeGiven, roundMode: roundMode, - narrowRange: narrowRange, axis: axis), to: output_device) - case .TF_EAGER: - return _RawTFEager.quantizeAndDequantizeV2( - input, inputMin: inputMin, inputMax: inputMax, signedInput: signedInput, numBits: numBits, - rangeGiven: rangeGiven, roundMode: roundMode, narrowRange: narrowRange, axis: axis) - } - - } - - /// Quantizes then dequantizes a tensor. - /// - /// This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a - /// tensor, so its value can change during training. - @inlinable @inline(__always) - public static func quantizeAndDequantizeV3( - _ input: Tensor, - inputMin: Tensor, - inputMax: Tensor, - numBits: Tensor, - signedInput: Bool = true, - rangeGiven: Bool = true, - narrowRange: Bool = false, - axis: Int64 = -1 - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(input.handle.backend, inputMin.handle.backend), inputMax.handle.backend), - numBits.handle.backend) - { - case .XLA: - let output_device = numBits.device - let input = Tensor(copying: input, to: .defaultTFEager) - let inputMin = Tensor(copying: inputMin, to: .defaultTFEager) - let inputMax = Tensor(copying: inputMax, to: .defaultTFEager) - let numBits = Tensor(copying: numBits, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.quantizeAndDequantizeV3( - input, inputMin: inputMin, inputMax: inputMax, numBits: numBits, - signedInput: signedInput, rangeGiven: rangeGiven, narrowRange: narrowRange, axis: axis), - to: output_device) - case .TF_EAGER: - return _RawTFEager.quantizeAndDequantizeV3( - input, inputMin: inputMin, inputMax: inputMax, numBits: numBits, signedInput: signedInput, - rangeGiven: rangeGiven, narrowRange: narrowRange, axis: axis) - } - - } - - /// Convert the quantized 'input' tensor into a lower-precision 'output', using the - /// - /// actual distribution of the values to maximize the usage of the lower bit depth - /// and adjusting the output min and max ranges accordingly. - /// - /// [input_min, input_max] are scalar floats that specify the range for the float - /// interpretation of the 'input' data. For example, if input_min is -1.0f and - /// input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 - /// value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. - /// - /// This operator tries to squeeze as much precision as possible into an output with - /// a lower bit depth by calculating the actual min and max values found in the - /// data. For example, maybe that quint16 input has no values lower than 16,384 and - /// none higher than 49,152. That means only half the range is actually needed, all - /// the float interpretations are between -0.5f and 0.5f, so if we want to compress - /// the data into a quint8 output, we can use that range rather than the theoretical - /// -1.0f to 1.0f that is suggested by the input min and max. - /// - /// In practice, this is most useful for taking output from operations like - /// QuantizedMatMul that can produce higher bit-depth outputs than their inputs and - /// may have large potential output ranges, but in practice have a distribution of - /// input values that only uses a small fraction of the possible range. By feeding - /// that output into this operator, we can reduce it from 32 bits down to 8 with - /// minimal loss of accuracy. - /// - /// - Parameters: - /// - input_min: The float value that the minimum quantized input value represents. - /// - input_max: The float value that the maximum quantized input value represents. - /// - /// - Attrs: - /// - Tinput: The type of the input. - /// - out_type: The type of the output. Should be a lower bit depth than Tinput. - /// - /// - Outputs: - /// - output_min: The float value that the minimum quantized output value represents. - /// - output_max: The float value that the maximum quantized output value represents. - @inlinable @inline(__always) - public static func quantizeDownAndShrinkRange< - Tinput: TensorFlowScalar, - OutType: TensorFlowScalar - >( - _ input: Tensor, - inputMin: Tensor, - inputMax: Tensor - ) -> (output: Tensor, outputMin: Tensor, outputMax: Tensor) { - _RawTFEager.quantizeDownAndShrinkRange(input, inputMin: inputMin, inputMax: inputMax) - } - - /// Quantize the 'input' tensor of type float to 'output' tensor of type 'T'. - /// - /// [min_range, max_range] are scalar floats that specify the range for - /// the 'input' data. The 'mode' attribute controls exactly which calculations are - /// used to convert the float values to their quantized equivalents. The - /// 'round_mode' attribute controls which rounding tie-breaking algorithm is used - /// when rounding float values to their quantized equivalents. - /// - /// In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: - /// - /// ``` - /// out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) - /// if T == qint8: out[i] -= (range(T) + 1) / 2.0 - /// ``` - /// - /// here `range(T) = numeric_limits::max() - numeric_limits::min()` - /// - /// *MIN_COMBINED Mode Example* - /// - /// Assume the input is type float and has a possible range of [0.0, 6.0] and the - /// output type is quint8 ([0, 255]). The min_range and max_range values should be - /// specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each - /// value of the input by 255/6 and cast to quint8. - /// - /// If the output type was qint8 ([-128, 127]), the operation will additionally - /// subtract each value by 128 prior to casting, so that the range of values aligns - /// with the range of qint8. - /// - /// If the mode is 'MIN_FIRST', then this approach is used: - /// - /// ``` - /// num_discrete_values = 1 << (# of bits in T) - /// range_adjust = num_discrete_values / (num_discrete_values - 1) - /// range = (range_max - range_min) * range_adjust - /// range_scale = num_discrete_values / range - /// quantized = round(input * range_scale) - round(range_min * range_scale) + - /// numeric_limits::min() - /// quantized = max(quantized, numeric_limits::min()) - /// quantized = min(quantized, numeric_limits::max()) - /// ``` - /// - /// The biggest difference between this and MIN_COMBINED is that the minimum range - /// is rounded first, before it's subtracted from the rounded value. With - /// MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing - /// and dequantizing will introduce a larger and larger error. - /// - /// *SCALED mode Example* - /// - /// `SCALED` mode matches the quantization approach used in - /// `QuantizeAndDequantize{V2|V3}`. - /// - /// If the mode is `SCALED`, the quantization is performed by multiplying each - /// input value by a scaling_factor. - /// The scaling_factor is determined from `min_range` and `max_range` to be as large - /// as possible such that the range from `min_range` to `max_range` is representable - /// within values of type T. - /// - /// ```c++ - /// - /// const int min_T = std::numeric_limits::min(); - /// const int max_T = std::numeric_limits::max(); - /// const float max_float = std::numeric_limits::max(); - /// - /// const float scale_factor_from_min_side = - /// (min_T * min_range > 0) ? min_T / min_range : max_float; - /// const float scale_factor_from_max_side = - /// (max_T * max_range > 0) ? max_T / max_range : max_float; - /// - /// const float scale_factor = std::min(scale_factor_from_min_side, - /// scale_factor_from_max_side); - /// ``` - /// - /// We next use the scale_factor to adjust min_range and max_range as follows: - /// - /// ```c++ - /// min_range = min_T / scale_factor; - /// max_range = max_T / scale_factor; - /// ``` - /// - /// - /// e.g. if T = qint8, and initially min_range = -10, and max_range = 9, we would - /// compare -128/-10.0 = 12.8 to 127/9.0 = 14.11, and set scaling_factor = 12.8 - /// In this case, min_range would remain -10, but max_range would be adjusted to - /// 127 / 12.8 = 9.921875 - /// - /// So we will quantize input values in the range (-10, 9.921875) to (-128, 127). - /// - /// The input tensor can now be quantized by clipping values to the range - /// `min_range` to `max_range`, then multiplying by scale_factor as follows: - /// - /// ```c++ - /// result = round(min(max_range, max(min_range, input)) * scale_factor) - /// ``` - /// - /// The adjusted `min_range` and `max_range` are returned as outputs 2 and 3 of - /// this operation. These outputs should be used as the range for any further - /// calculations. - /// - /// - /// *narrow_range (bool) attribute* - /// - /// If true, we do not use the minimum quantized value. - /// i.e. for int8 the quantized output, it would be restricted to the range - /// -127..127 instead of the full -128..127 range. - /// This is provided for compatibility with certain inference backends. - /// (Only applies to SCALED mode) - /// - /// - /// *axis (int) attribute* - /// - /// An optional `axis` attribute can specify a dimension index of the input tensor, - /// such that quantization ranges will be calculated and applied separately for each - /// slice of the tensor along that dimension. This is useful for per-channel - /// quantization. - /// - /// If axis is specified, min_range and max_range - /// - /// if `axis`=None, per-tensor quantization is performed as normal. - /// - /// - /// *ensure_minimum_range (float) attribute* - /// - /// Ensures the minimum quantization range is at least this value. - /// The legacy default value for this is 0.01, but it is strongly suggested to - /// set it to 0 for new uses. - /// - /// - /// - Parameters: - /// - min_range: The minimum value of the quantization range. This value may be adjusted by the - /// op depending on other parameters. The adjusted value is written to `output_min`. - /// If the `axis` attribute is specified, this must be a 1-D tensor whose size - /// matches the `axis` dimension of the input and output tensors. - /// - max_range: The maximum value of the quantization range. This value may be adjusted by the - /// op depending on other parameters. The adjusted value is written to `output_max`. - /// If the `axis` attribute is specified, this must be a 1-D tensor whose size - /// matches the `axis` dimension of the input and output tensors. - /// - /// - Outputs: - /// - output: The quantized data produced from the float input. - /// - output_min: The final quantization range minimum, used to clip input values before scaling - /// and rounding them to quantized values. - /// If the `axis` attribute is specified, this will be a 1-D tensor whose size - /// matches the `axis` dimension of the input and output tensors. - /// - output_max: The final quantization range maximum, used to clip input values before scaling - /// and rounding them to quantized values. - /// If the `axis` attribute is specified, this will be a 1-D tensor whose size - /// matches the `axis` dimension of the input and output tensors. - @inlinable @inline(__always) - public static func quantizeV2( - _ input: Tensor, - minRange: Tensor, - maxRange: Tensor, - mode: Mode = .minCombined, - roundMode: RoundMode1 = .halfAwayFromZero, - narrowRange: Bool = false, - axis: Int64 = -1, - ensureMinimumRange: Double = 0.01 - ) -> (output: Tensor, outputMin: Tensor, outputMax: Tensor) { - _RawTFEager.quantizeV2( - input, minRange: minRange, maxRange: maxRange, mode: mode, roundMode: roundMode, - narrowRange: narrowRange, axis: axis, ensureMinimumRange: ensureMinimumRange) - } - - /// Returns x + y element-wise, working on quantized buffers. - /// - /// - Parameters: - /// - min_x: The float value that the lowest quantized `x` value represents. - /// - max_x: The float value that the highest quantized `x` value represents. - /// - min_y: The float value that the lowest quantized `y` value represents. - /// - max_y: The float value that the highest quantized `y` value represents. - /// - /// - Outputs: - /// - min_z: The float value that the lowest quantized output value represents. - /// - max_z: The float value that the highest quantized output value represents. - /// - /// *NOTE*: `QuantizedAdd` supports limited forms of broadcasting. More about - /// broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - @inlinable @inline(__always) - public static func quantizedAdd< - T1: TensorFlowScalar, - T2: TensorFlowScalar, - Toutput: TensorFlowScalar - >( - _ x: Tensor, - _ y: Tensor, - minX: Tensor, - maxX: Tensor, - minY: Tensor, - maxY: Tensor - ) -> (z: Tensor, minZ: Tensor, maxZ: Tensor) { - _RawTFEager.quantizedAdd(x, y, minX: minX, maxX: maxX, minY: minY, maxY: maxY) - } - - /// Produces the average pool of the input tensor for quantized types. - /// - /// - Parameters: - /// - input: 4-D with shape `[batch, height, width, channels]`. - /// - min_input: The float value that the lowest quantized input value represents. - /// - max_input: The float value that the highest quantized input value represents. - /// - /// - Attrs: - /// - ksize: The size of the window for each dimension of the input tensor. - /// The length must be 4 to match the number of dimensions of the input. - /// - strides: The stride of the sliding window for each dimension of the input - /// tensor. The length must be 4 to match the number of dimensions of the input. - /// - padding: The type of padding algorithm to use. - /// - /// - Outputs: - /// - min_output: The float value that the lowest quantized output value represents. - /// - max_output: The float value that the highest quantized output value represents. - @inlinable @inline(__always) - public static func quantizedAvgPool( - _ input: Tensor, - minInput: Tensor, - maxInput: Tensor, - ksize: [Int32], - strides: [Int32], - padding: Padding - ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { - _RawTFEager.quantizedAvgPool( - input, minInput: minInput, maxInput: maxInput, ksize: ksize, strides: strides, - padding: padding) + commonBackend( + commonBackend(lhs.handle.backend, rhs.handle.backend), windowStrides.handle.backend), + padding.handle.backend), lhsDilation.handle.backend), rhsDilation.handle.backend), + featureGroupCount.handle.backend) + { + case .XLA: + let output_device = featureGroupCount.device + let lhs = Tensor(copying: lhs, to: .defaultTFEager) + let rhs = Tensor(copying: rhs, to: .defaultTFEager) + let windowStrides = Tensor(copying: windowStrides, to: .defaultTFEager) + let padding = Tensor(copying: padding, to: .defaultTFEager) + let lhsDilation = Tensor(copying: lhsDilation, to: .defaultTFEager) + let rhsDilation = Tensor(copying: rhsDilation, to: .defaultTFEager) + let featureGroupCount = Tensor(copying: featureGroupCount, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.xlaConv( + lhs: lhs, rhs: rhs, windowStrides: windowStrides, padding: padding, + lhsDilation: lhsDilation, rhsDilation: rhsDilation, + featureGroupCount: featureGroupCount, dimensionNumbers: dimensionNumbers, + precisionConfig: precisionConfig), to: output_device) + case .TF_EAGER: + return _RawTFEager.xlaConv( + lhs: lhs, rhs: rhs, windowStrides: windowStrides, padding: padding, + lhsDilation: lhsDilation, rhsDilation: rhsDilation, featureGroupCount: featureGroupCount, + dimensionNumbers: dimensionNumbers, precisionConfig: precisionConfig) } - /// Quantized Batch normalization. - /// - /// This op is deprecated and will be removed in the future. Prefer - /// `tf.nn.batch_normalization`. - /// - /// - Parameters: - /// - t: A 4D input Tensor. - /// - t_min: The value represented by the lowest quantized input. - /// - t_max: The value represented by the highest quantized input. - /// - m: A 1D mean Tensor with size matching the last dimension of t. - /// This is the first output from tf.nn.moments, - /// or a saved moving average thereof. - /// - m_min: The value represented by the lowest quantized mean. - /// - m_max: The value represented by the highest quantized mean. - /// - v: A 1D variance Tensor with size matching the last dimension of t. - /// This is the second output from tf.nn.moments, - /// or a saved moving average thereof. - /// - v_min: The value represented by the lowest quantized variance. - /// - v_max: The value represented by the highest quantized variance. - /// - beta: A 1D beta Tensor with size matching the last dimension of t. - /// An offset to be added to the normalized tensor. - /// - beta_min: The value represented by the lowest quantized offset. - /// - beta_max: The value represented by the highest quantized offset. - /// - gamma: A 1D gamma Tensor with size matching the last dimension of t. - /// If "scale_after_normalization" is true, this tensor will be multiplied - /// with the normalized tensor. - /// - gamma_min: The value represented by the lowest quantized gamma. - /// - gamma_max: The value represented by the highest quantized gamma. - /// - /// - Attrs: - /// - variance_epsilon: A small float number to avoid dividing by 0. - /// - scale_after_normalization: A bool indicating whether the resulted tensor - /// needs to be multiplied with gamma. - @inlinable @inline(__always) - public static func quantizedBatchNormWithGlobalNormalization< - Tinput: TensorFlowScalar, - OutType: TensorFlowScalar - >( - t: Tensor, - tMin: Tensor, - tMax: Tensor, - m: Tensor, - mMin: Tensor, - mMax: Tensor, - v: Tensor, - vMin: Tensor, - vMax: Tensor, - beta: Tensor, - betaMin: Tensor, - betaMax: Tensor, - gamma: Tensor, - gammaMin: Tensor, - gammaMax: Tensor, - varianceEpsilon: Double, - scaleAfterNormalization: Bool - ) -> (result: Tensor, resultMin: Tensor, resultMax: Tensor) { - _RawTFEager.quantizedBatchNormWithGlobalNormalization( - t: t, tMin: tMin, tMax: tMax, m: m, mMin: mMin, mMax: mMax, v: v, vMin: vMin, vMax: vMax, - beta: beta, betaMin: betaMin, betaMax: betaMax, gamma: gamma, gammaMin: gammaMin, - gammaMax: gammaMax, varianceEpsilon: varianceEpsilon, - scaleAfterNormalization: scaleAfterNormalization) - } + } - /// Adds Tensor 'bias' to Tensor 'input' for Quantized types. - /// - /// Broadcasts the values of bias on dimensions 0..N-2 of 'input'. - /// - /// - Parameters: - /// - bias: A 1D bias Tensor with size matching the last dimension of 'input'. - /// - min_input: The float value that the lowest quantized input value represents. - /// - max_input: The float value that the highest quantized input value represents. - /// - min_bias: The float value that the lowest quantized bias value represents. - /// - max_bias: The float value that the highest quantized bias value represents. - /// - /// - Outputs: - /// - min_out: The float value that the lowest quantized output value represents. - /// - max_out: The float value that the highest quantized output value represents. - @inlinable @inline(__always) - public static func quantizedBiasAdd< - T1: TensorFlowScalar, - T2: TensorFlowScalar, - OutType: TensorFlowScalar - >( - _ input: Tensor, - bias: Tensor, - minInput: Tensor, - maxInput: Tensor, - minBias: Tensor, - maxBias: Tensor - ) -> (output: Tensor, minOut: Tensor, maxOut: Tensor) { - _RawTFEager.quantizedBiasAdd( - input, bias: bias, minInput: minInput, maxInput: maxInput, minBias: minBias, - maxBias: maxBias) - } - - /// Concatenates quantized tensors along one dimension. - /// - /// - Parameters: - /// - concat_dim: 0-D. The dimension along which to concatenate. Must be in the - /// range [0, rank(values)). - /// - values: The `N` Tensors to concatenate. Their ranks and types must match, - /// and their sizes must match in all dimensions except `concat_dim`. - /// - input_mins: The minimum scalar values for each of the input tensors. - /// - input_maxes: The maximum scalar values for each of the input tensors. - /// - /// - Outputs: - /// - output: A `Tensor` with the concatenation of values stacked along the - /// `concat_dim` dimension. This tensor's shape matches that of `values` except - /// in `concat_dim` where it has the sum of the sizes. - /// - output_min: The float value that the minimum quantized output value represents. - /// - output_max: The float value that the maximum quantized output value represents. - @inlinable @inline(__always) - public static func quantizedConcat( - concatDim: Tensor, - _ values: [Tensor], - inputMins: [Tensor], - inputMaxes: [Tensor] - ) -> (output: Tensor, outputMin: Tensor, outputMax: Tensor) { - _RawTFEager.quantizedConcat( - concatDim: concatDim, values, inputMins: inputMins, inputMaxes: inputMaxes) - } - - /// Computes a 2D convolution given quantized 4D input and filter tensors. - /// - /// The inputs are quantized tensors where the lowest value represents the real - /// number of the associated minimum, and the highest represents the maximum. - /// This means that you can only interpret the quantized output in the same way, by - /// taking the returned minimum and maximum values into account. - /// - /// - Parameters: - /// - filter: filter's input_depth dimension must match input's depth dimensions. - /// - min_input: The float value that the lowest quantized input value represents. - /// - max_input: The float value that the highest quantized input value represents. - /// - min_filter: The float value that the lowest quantized filter value represents. - /// - max_filter: The float value that the highest quantized filter value represents. - /// - /// - Attrs: - /// - strides: The stride of the sliding window for each dimension of the input - /// tensor. - /// - padding: The type of padding algorithm to use. - /// - dilations: 1-D tensor of length 4. The dilation factor for each dimension of - /// `input`. If set to k > 1, there will be k-1 skipped cells between each - /// filter element on that dimension. The dimension order is determined by the - /// value of `data_format`, see above for details. Dilations in the batch and - /// depth dimensions must be 1. - /// - /// - Outputs: - /// - min_output: The float value that the lowest quantized output value represents. - /// - max_output: The float value that the highest quantized output value represents. - @inlinable @inline(__always) - public static func quantizedConv2D< - Tinput: TensorFlowScalar, - Tfilter: TensorFlowScalar, - OutType: TensorFlowScalar - >( - _ input: Tensor, - filter: Tensor, - minInput: Tensor, - maxInput: Tensor, - minFilter: Tensor, - maxFilter: Tensor, - strides: [Int32], - padding: Padding, - dilations: [Int32] = [1, 1, 1, 1] - ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { - _RawTFEager.quantizedConv2D( - input, filter: filter, minInput: minInput, maxInput: maxInput, minFilter: minFilter, - maxFilter: maxFilter, strides: strides, padding: padding, dilations: dilations) - } - - @inlinable @inline(__always) - public static func quantizedConv2DAndRelu< - Tinput: TensorFlowScalar, - Tfilter: TensorFlowScalar, - OutType: TensorFlowScalar - >( - _ input: Tensor, - filter: Tensor, - minInput: Tensor, - maxInput: Tensor, - minFilter: Tensor, - maxFilter: Tensor, - strides: [Int32], - padding: Padding, - dilations: [Int32] = [1, 1, 1, 1], - paddingList: [Int32] - ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { - _RawTFEager.quantizedConv2DAndRelu( - input, filter: filter, minInput: minInput, maxInput: maxInput, minFilter: minFilter, - maxFilter: maxFilter, strides: strides, padding: padding, dilations: dilations, - paddingList: paddingList) - } - - @inlinable @inline(__always) - public static func quantizedConv2DAndReluAndRequantize< - Tinput: TensorFlowScalar, - Tfilter: TensorFlowScalar, - OutType: TensorFlowScalar - >( - _ input: Tensor, - filter: Tensor, - minInput: Tensor, - maxInput: Tensor, - minFilter: Tensor, - maxFilter: Tensor, - minFreezedOutput: Tensor, - maxFreezedOutput: Tensor, - strides: [Int32], - padding: Padding, - dilations: [Int32] = [1, 1, 1, 1], - paddingList: [Int32] - ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { - _RawTFEager.quantizedConv2DAndReluAndRequantize( - input, filter: filter, minInput: minInput, maxInput: maxInput, minFilter: minFilter, - maxFilter: maxFilter, minFreezedOutput: minFreezedOutput, - maxFreezedOutput: maxFreezedOutput, strides: strides, padding: padding, - dilations: dilations, paddingList: paddingList) - } - - @inlinable @inline(__always) - public static func quantizedConv2DAndRequantize< - Tinput: TensorFlowScalar, - Tfilter: TensorFlowScalar, - OutType: TensorFlowScalar - >( - _ input: Tensor, - filter: Tensor, - minInput: Tensor, - maxInput: Tensor, - minFilter: Tensor, - maxFilter: Tensor, - minFreezedOutput: Tensor, - maxFreezedOutput: Tensor, - strides: [Int32], - padding: Padding, - dilations: [Int32] = [1, 1, 1, 1], - paddingList: [Int32] - ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { - _RawTFEager.quantizedConv2DAndRequantize( - input, filter: filter, minInput: minInput, maxInput: maxInput, minFilter: minFilter, - maxFilter: maxFilter, minFreezedOutput: minFreezedOutput, - maxFreezedOutput: maxFreezedOutput, strides: strides, padding: padding, - dilations: dilations, paddingList: paddingList) - } - - /// Computes QuantizedConv2D per channel. - /// - /// - Parameters: - /// - input: The original input tensor. - /// - filter: The original filter tensor. - /// - min_input: The minimum value of the input tensor - /// - max_input: The maximum value of the input tensor. - /// - min_filter: The minimum value of the filter tensor. - /// - max_filter: The maximum value of the filter tensor. - /// - /// - Attrs: - /// - Tinput: The quantized type of input tensor that needs to be converted. - /// - Tfilter: The quantized type of filter tensor that needs to be converted. - /// - out_type: The quantized type of output tensor that needs to be converted. - /// - strides: list of stride values. - /// - dilations: list of dilation values. - /// - /// - Outputs: - /// - output: The output tensor. - /// - min_output: The minimum value of the final output tensor. - /// - max_output: The maximum value of the final output tensor. - @inlinable @inline(__always) - public static func quantizedConv2DPerChannel< - Tinput: TensorFlowScalar, - Tfilter: TensorFlowScalar, - OutType: TensorFlowScalar - >( - _ input: Tensor, - filter: Tensor, - minInput: Tensor, - maxInput: Tensor, - minFilter: Tensor, - maxFilter: Tensor, - strides: [Int32], - padding: Padding, - dilations: [Int32] = [1, 1, 1, 1] - ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { - _RawTFEager.quantizedConv2DPerChannel( - input, filter: filter, minInput: minInput, maxInput: maxInput, minFilter: minFilter, - maxFilter: maxFilter, strides: strides, padding: padding, dilations: dilations) - } - - @inlinable @inline(__always) - public static func quantizedConv2DWithBias< - Tinput: TensorFlowScalar, - Tfilter: TensorFlowScalar, - OutType: TensorFlowScalar - >( - _ input: Tensor, - filter: Tensor, - bias: Tensor, - minInput: Tensor, - maxInput: Tensor, - minFilter: Tensor, - maxFilter: Tensor, - strides: [Int32], - padding: Padding, - dilations: [Int32] = [1, 1, 1, 1], - paddingList: [Int32] - ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { - _RawTFEager.quantizedConv2DWithBias( - input, filter: filter, bias: bias, minInput: minInput, maxInput: maxInput, - minFilter: minFilter, maxFilter: maxFilter, strides: strides, padding: padding, - dilations: dilations, paddingList: paddingList) - } - - @inlinable @inline(__always) - public static func quantizedConv2DWithBiasAndRelu< - Tinput: TensorFlowScalar, - Tfilter: TensorFlowScalar, - OutType: TensorFlowScalar - >( - _ input: Tensor, - filter: Tensor, - bias: Tensor, - minInput: Tensor, - maxInput: Tensor, - minFilter: Tensor, - maxFilter: Tensor, - strides: [Int32], - padding: Padding, - dilations: [Int32] = [1, 1, 1, 1], - paddingList: [Int32] - ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { - _RawTFEager.quantizedConv2DWithBiasAndRelu( - input, filter: filter, bias: bias, minInput: minInput, maxInput: maxInput, - minFilter: minFilter, maxFilter: maxFilter, strides: strides, padding: padding, - dilations: dilations, paddingList: paddingList) - } - - @inlinable @inline(__always) - public static func quantizedConv2DWithBiasAndReluAndRequantize< - Tinput: TensorFlowScalar, - Tfilter: TensorFlowScalar, - Tbias: FloatingPoint & TensorFlowScalar, - OutType: TensorFlowScalar - >( - _ input: Tensor, - filter: Tensor, - bias: Tensor, - minInput: Tensor, - maxInput: Tensor, - minFilter: Tensor, - maxFilter: Tensor, - minFreezedOutput: Tensor, - maxFreezedOutput: Tensor, - strides: [Int32], - padding: Padding, - dilations: [Int32] = [1, 1, 1, 1], - paddingList: [Int32] - ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { - _RawTFEager.quantizedConv2DWithBiasAndReluAndRequantize( - input, filter: filter, bias: bias, minInput: minInput, maxInput: maxInput, - minFilter: minFilter, maxFilter: maxFilter, minFreezedOutput: minFreezedOutput, - maxFreezedOutput: maxFreezedOutput, strides: strides, padding: padding, - dilations: dilations, paddingList: paddingList) - } - - @inlinable @inline(__always) - public static func quantizedConv2DWithBiasAndRequantize< - Tinput: TensorFlowScalar, - Tfilter: TensorFlowScalar, - Tbias: FloatingPoint & TensorFlowScalar, - OutType: TensorFlowScalar - >( - _ input: Tensor, - filter: Tensor, - bias: Tensor, - minInput: Tensor, - maxInput: Tensor, - minFilter: Tensor, - maxFilter: Tensor, - minFreezedOutput: Tensor, - maxFreezedOutput: Tensor, - strides: [Int32], - padding: Padding, - dilations: [Int32] = [1, 1, 1, 1], - paddingList: [Int32] - ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { - _RawTFEager.quantizedConv2DWithBiasAndRequantize( - input, filter: filter, bias: bias, minInput: minInput, maxInput: maxInput, - minFilter: minFilter, maxFilter: maxFilter, minFreezedOutput: minFreezedOutput, - maxFreezedOutput: maxFreezedOutput, strides: strides, padding: padding, - dilations: dilations, paddingList: paddingList) - } - - @inlinable @inline(__always) - public static func quantizedConv2DWithBiasSignedSumAndReluAndRequantize< - Tinput: TensorFlowScalar, - Tfilter: TensorFlowScalar, - Tbias: FloatingPoint & TensorFlowScalar, - Tsummand: TensorFlowScalar, - OutType: TensorFlowScalar - >( - _ input: Tensor, - filter: Tensor, - bias: Tensor, - minInput: Tensor, - maxInput: Tensor, - minFilter: Tensor, - maxFilter: Tensor, - minFreezedOutput: Tensor, - maxFreezedOutput: Tensor, - summand: Tensor, - minSummand: Tensor, - maxSummand: Tensor, - strides: [Int32], - padding: Padding, - dilations: [Int32] = [1, 1, 1, 1], - paddingList: [Int32] - ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { - _RawTFEager.quantizedConv2DWithBiasSignedSumAndReluAndRequantize( - input, filter: filter, bias: bias, minInput: minInput, maxInput: maxInput, - minFilter: minFilter, maxFilter: maxFilter, minFreezedOutput: minFreezedOutput, - maxFreezedOutput: maxFreezedOutput, summand: summand, minSummand: minSummand, - maxSummand: maxSummand, strides: strides, padding: padding, dilations: dilations, - paddingList: paddingList) - } - - @inlinable @inline(__always) - public static func quantizedConv2DWithBiasSumAndRelu< - Tinput: TensorFlowScalar, - Tfilter: TensorFlowScalar, - OutType: TensorFlowScalar - >( - _ input: Tensor, - filter: Tensor, - bias: Tensor, - minInput: Tensor, - maxInput: Tensor, - minFilter: Tensor, - maxFilter: Tensor, - summand: Tensor, - strides: [Int32], - padding: Padding, - dilations: [Int32] = [1, 1, 1, 1], - paddingList: [Int32] - ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { - _RawTFEager.quantizedConv2DWithBiasSumAndRelu( - input, filter: filter, bias: bias, minInput: minInput, maxInput: maxInput, - minFilter: minFilter, maxFilter: maxFilter, summand: summand, strides: strides, - padding: padding, dilations: dilations, paddingList: paddingList) - } - - @inlinable @inline(__always) - public static func quantizedConv2DWithBiasSumAndReluAndRequantize< - Tinput: TensorFlowScalar, - Tfilter: TensorFlowScalar, - Tbias: FloatingPoint & TensorFlowScalar, - Tsummand: TensorFlowScalar, - OutType: TensorFlowScalar - >( - _ input: Tensor, - filter: Tensor, - bias: Tensor, - minInput: Tensor, - maxInput: Tensor, - minFilter: Tensor, - maxFilter: Tensor, - minFreezedOutput: Tensor, - maxFreezedOutput: Tensor, - summand: Tensor, - minSummand: Tensor, - maxSummand: Tensor, - strides: [Int32], - padding: Padding, - dilations: [Int32] = [1, 1, 1, 1], - paddingList: [Int32] - ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { - _RawTFEager.quantizedConv2DWithBiasSumAndReluAndRequantize( - input, filter: filter, bias: bias, minInput: minInput, maxInput: maxInput, - minFilter: minFilter, maxFilter: maxFilter, minFreezedOutput: minFreezedOutput, - maxFreezedOutput: maxFreezedOutput, summand: summand, minSummand: minSummand, - maxSummand: maxSummand, strides: strides, padding: padding, dilations: dilations, - paddingList: paddingList) - } - - /// Computes quantized depthwise Conv2D. - /// - /// - Parameters: - /// - input: The original input tensor. - /// - filter: The original filter tensor. - /// - min_input: The float value that the minimum quantized input value represents. - /// - max_input: The float value that the maximum quantized input value represents. - /// - min_filter: The float value that the minimum quantized filter value represents. - /// - max_filter: The float value that the maximum quantized filter value represents. - /// - /// - Attrs: - /// - Tinput: The type of the input. - /// - Tfilter: The type of the filter. - /// - out_type: The type of the output. - /// - strides: List of stride values. - /// - dilations: List of dilation values. - /// - /// - Outputs: - /// - output: The output tensor. - /// - min_output: The float value that the minimum quantized output value represents. - /// - max_output: The float value that the maximum quantized output value represents. - @inlinable @inline(__always) - public static func quantizedDepthwiseConv2D< - Tinput: TensorFlowScalar, - Tfilter: TensorFlowScalar, - OutType: TensorFlowScalar - >( - _ input: Tensor, - filter: Tensor, - minInput: Tensor, - maxInput: Tensor, - minFilter: Tensor, - maxFilter: Tensor, - strides: [Int32], - padding: Padding, - dilations: [Int32] = [1, 1, 1, 1] - ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { - _RawTFEager.quantizedDepthwiseConv2D( - input, filter: filter, minInput: minInput, maxInput: maxInput, minFilter: minFilter, - maxFilter: maxFilter, strides: strides, padding: padding, dilations: dilations) - } - - /// Computes quantized depthwise Conv2D with Bias. - /// - /// - Parameters: - /// - input: The original input tensor. - /// - filter: The original filter tensor. - /// - bias: The original bias tensor. - /// - min_input: The float value that the minimum quantized input value represents. - /// - max_input: The float value that the maximum quantized input value represents. - /// - min_filter: The float value that the minimum quantized filter value represents. - /// - max_filter: The float value that the maximum quantized filter value represents. - /// - /// - Attrs: - /// - Tinput: The type of the input. - /// - Tfilter: The type of the filter. - /// - out_type: The type of the output. - /// - strides: List of stride values. - /// - dilations: List of dilation values. - /// - /// - Outputs: - /// - output: The output tensor. - /// - min_output: The float value that the minimum quantized output value represents. - /// - max_output: The float value that the maximum quantized output value represents. - @inlinable @inline(__always) - public static func quantizedDepthwiseConv2DWithBias< - Tinput: TensorFlowScalar, - Tfilter: TensorFlowScalar, - OutType: TensorFlowScalar - >( - _ input: Tensor, - filter: Tensor, - bias: Tensor, - minInput: Tensor, - maxInput: Tensor, - minFilter: Tensor, - maxFilter: Tensor, - strides: [Int32], - padding: Padding, - dilations: [Int32] = [1, 1, 1, 1] - ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { - _RawTFEager.quantizedDepthwiseConv2DWithBias( - input, filter: filter, bias: bias, minInput: minInput, maxInput: maxInput, - minFilter: minFilter, maxFilter: maxFilter, strides: strides, padding: padding, - dilations: dilations) + /// Wraps the XLA DotGeneral operator, documented at + /// + /// https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral + /// . + /// + /// - Parameters: + /// - lhs: the LHS tensor + /// - rhs: the RHS tensor + /// + /// - Attrs: + /// - dimension_numbers: a serialized xla::DotDimensionNumbers proto. + /// - precision_config: a serialized xla::PrecisionConfig proto. + @inlinable @inline(__always) + public static func xlaDot( + lhs: Tensor, + rhs: Tensor, + dimensionNumbers: String, + precisionConfig: String + ) -> Tensor { + switch commonBackend(lhs.handle.backend, rhs.handle.backend) { + case .XLA: + let output_device = rhs.device + let lhs = Tensor(copying: lhs, to: .defaultTFEager) + let rhs = Tensor(copying: rhs, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.xlaDot( + lhs: lhs, rhs: rhs, dimensionNumbers: dimensionNumbers, precisionConfig: precisionConfig + ), to: output_device) + case .TF_EAGER: + return _RawTFEager.xlaDot( + lhs: lhs, rhs: rhs, dimensionNumbers: dimensionNumbers, precisionConfig: precisionConfig) } - /// Computes quantized depthwise Conv2D with Bias and Relu. - /// - /// - Parameters: - /// - input: The original input tensor. - /// - filter: The original filter tensor. - /// - bias: The original bias tensor. - /// - min_input: The float value that the minimum quantized input value represents. - /// - max_input: The float value that the maximum quantized input value represents. - /// - min_filter: The float value that the minimum quantized filter value represents. - /// - max_filter: The float value that the maximum quantized filter value represents. - /// - /// - Attrs: - /// - Tinput: The type of the input. - /// - Tfilter: The type of the filter. - /// - out_type: The type of the output. - /// - strides: List of stride values. - /// - dilations: List of dilation values. - /// - /// - Outputs: - /// - output: The output tensor. - /// - min_output: The float value that the minimum quantized output value represents. - /// - max_output: The float value that the maximum quantized output value represents. - @inlinable @inline(__always) - public static func quantizedDepthwiseConv2DWithBiasAndRelu< - Tinput: TensorFlowScalar, - Tfilter: TensorFlowScalar, - OutType: TensorFlowScalar - >( - _ input: Tensor, - filter: Tensor, - bias: Tensor, - minInput: Tensor, - maxInput: Tensor, - minFilter: Tensor, - maxFilter: Tensor, - strides: [Int32], - padding: Padding, - dilations: [Int32] = [1, 1, 1, 1] - ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { - _RawTFEager.quantizedDepthwiseConv2DWithBiasAndRelu( - input, filter: filter, bias: bias, minInput: minInput, maxInput: maxInput, - minFilter: minFilter, maxFilter: maxFilter, strides: strides, padding: padding, - dilations: dilations) - } + } - /// Computes quantized depthwise Conv2D with Bias, Relu and Requantize. - /// - /// - Parameters: - /// - input: The original input tensor. - /// - filter: The original filter tensor. - /// - bias: The original bias tensor. - /// - min_input: The float value that the minimum quantized input value represents. - /// - max_input: The float value that the maximum quantized input value represents. - /// - min_filter: The float value that the minimum quantized filter value represents. - /// - max_filter: The float value that the maximum quantized filter value represents. - /// - min_freezed_output: The minimum float value of the output tensor. - /// - max_freezed_output: The maximum float value of the output tensor. - /// - /// - Attrs: - /// - Tinput: The type of the input. - /// - Tfilter: The type of the filter. - /// - Tbias: The type of the bias. - /// - out_type: The type of the output. - /// - strides: List of stride values. - /// - dilations: List of dilation values. - /// - /// - Outputs: - /// - output: The output tensor. - /// - min_output: The float value that the minimum quantized output value represents. - /// - max_output: The float value that the maximum quantized output value represents. - @inlinable @inline(__always) - public static func quantizedDepthwiseConv2DWithBiasAndReluAndRequantize< - Tinput: TensorFlowScalar, - Tfilter: TensorFlowScalar, - Tbias: FloatingPoint & TensorFlowScalar, - OutType: TensorFlowScalar - >( - _ input: Tensor, - filter: Tensor, - bias: Tensor, - minInput: Tensor, - maxInput: Tensor, - minFilter: Tensor, - maxFilter: Tensor, - minFreezedOutput: Tensor, - maxFreezedOutput: Tensor, - strides: [Int32], - padding: Padding, - dilations: [Int32] = [1, 1, 1, 1] - ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { - _RawTFEager.quantizedDepthwiseConv2DWithBiasAndReluAndRequantize( - input, filter: filter, bias: bias, minInput: minInput, maxInput: maxInput, - minFilter: minFilter, maxFilter: maxFilter, minFreezedOutput: minFreezedOutput, - maxFreezedOutput: maxFreezedOutput, strides: strides, padding: padding, dilations: dilations - ) + /// Wraps the XLA DynamicSlice operator, documented at + /// + /// https://www.tensorflow.org/performance/xla/operation_semantics#dynamicslice + /// . + /// + /// DynamicSlice extracts a sub-array from the input array at dynamic + /// start_indices. The size of the slice in each dimension is passed in + /// size_indices, which specify the end point of exclusive slice intervals in each + /// dimension -- [start, start + size). The shape of start_indices must have rank 1, + /// with dimension size equal to the rank of operand. + /// + /// - Parameters: + /// - input: A `Tensor` of type T. + /// - start_indices: List of N integers containing the slice size for each + /// dimension. Each value must be strictly greater than zero, and start + size + /// must be less than or equal to the size of the dimension to avoid + /// implementation defined behavior. + @inlinable @inline(__always) + public static func xlaDynamicSlice< + T: TensorFlowScalar, + Tindices: TensorFlowIndex + >( + _ input: Tensor, + startIndices: Tensor, + sizeIndices: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(input.handle.backend, startIndices.handle.backend), sizeIndices.handle.backend + ) { + case .XLA: + let output_device = sizeIndices.device + let input = Tensor(copying: input, to: .defaultTFEager) + let startIndices = Tensor(copying: startIndices, to: .defaultTFEager) + let sizeIndices = Tensor(copying: sizeIndices, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.xlaDynamicSlice( + input, startIndices: startIndices, sizeIndices: sizeIndices), to: output_device) + case .TF_EAGER: + return _RawTFEager.xlaDynamicSlice( + input, startIndices: startIndices, sizeIndices: sizeIndices) } - /// Quantized Instance normalization. - /// - /// - Parameters: - /// - x: A 4D input Tensor. - /// - x_min: The value represented by the lowest quantized input. - /// - x_max: The value represented by the highest quantized input. - /// - /// - Attrs: - /// - output_range_given: If True, `given_y_min` and `given_y_min` - /// and `given_y_max` are used as the output range. Otherwise, - /// the implementation computes the output range. - /// - given_y_min: Output in `y_min` if `output_range_given` is True. - /// - given_y_max: Output in `y_max` if `output_range_given` is True. - /// - variance_epsilon: A small float number to avoid dividing by 0. - /// - min_separation: Minimum value of `y_max - y_min` - /// - /// - Outputs: - /// - y: A 4D Tensor. - /// - y_min: The value represented by the lowest quantized output. - /// - y_max: The value represented by the highest quantized output. - @inlinable @inline(__always) - public static func quantizedInstanceNorm( - _ x: Tensor, - xMin: Tensor, - xMax: Tensor, - outputRangeGiven: Bool = false, - givenYMin: Double = 0, - givenYMax: Double = 0, - varianceEpsilon: Double = 1e-05, - minSeparation: Double = 0.001 - ) -> (y: Tensor, yMin: Tensor, yMax: Tensor) { - _RawTFEager.quantizedInstanceNorm( - x, xMin: xMin, xMax: xMax, outputRangeGiven: outputRangeGiven, givenYMin: givenYMin, - givenYMax: givenYMax, varianceEpsilon: varianceEpsilon, minSeparation: minSeparation) - } - - /// Perform a quantized matrix multiplication of `a` by the matrix `b`. - /// - /// The inputs must be two-dimensional matrices and the inner dimension of - /// `a` (after being transposed if `transpose_a` is non-zero) must match the - /// outer dimension of `b` (after being transposed if `transposed_b` is - /// non-zero). - /// - /// - Parameters: - /// - a: Must be a two-dimensional tensor. - /// - b: Must be a two-dimensional tensor. - /// - min_a: The float value that the lowest quantized `a` value represents. - /// - max_a: The float value that the highest quantized `a` value represents. - /// - min_b: The float value that the lowest quantized `b` value represents. - /// - max_b: The float value that the highest quantized `b` value represents. - /// - /// - Attrs: - /// - transpose_a: If true, `a` is transposed before multiplication. - /// - transpose_b: If true, `b` is transposed before multiplication. - /// - Tactivation: The type of output produced by activation function - /// following this operation. - /// - /// - Outputs: - /// - min_out: The float value that the lowest quantized output value represents. - /// - max_out: The float value that the highest quantized output value represents. - @inlinable @inline(__always) - public static func quantizedMatMul< - T1: TensorFlowScalar, - T2: TensorFlowScalar, - Toutput: TensorFlowScalar - >( - _ a: Tensor, - _ b: Tensor, - minA: Tensor, - maxA: Tensor, - minB: Tensor, - maxB: Tensor, - transposeA: Bool = false, - transposeB: Bool = false, - tactivation: TensorDataType - ) -> (out: Tensor, minOut: Tensor, maxOut: Tensor) { - _RawTFEager.quantizedMatMul( - a, b, minA: minA, maxA: maxA, minB: minB, maxB: maxB, transposeA: transposeA, - transposeB: transposeB, tactivation: tactivation) - } - - /// Performs a quantized matrix multiplication of `a` by the matrix `b` with bias - /// add. - /// - /// The inputs must be two-dimensional matrices and 1D bias vector. And the inner - /// dimension of `a` (after being transposed if `transpose_a` is non-zero) must - /// match the outer dimension of `b` (after being transposed if `transposed_b` is - /// non-zero). Then do broadcast add operation with bias values on the matrix - /// mulplication result. The bias size must match inner dimension of `b`. - /// - /// - Parameters: - /// - a: A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`. - /// - b: A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`. - /// - bias: A 1D bias tensor with size matching inner dimension of `b` (after being - /// transposed if `transposed_b` is non-zero). - /// - min_a: The float value that the lowest quantized `a` value represents. - /// - max_a: The float value that the highest quantized `a` value represents. - /// - min_b: The float value that the lowest quantized `b` value represents. - /// - max_b: The float value that the highest quantized `b` value represents. - /// - /// - Attrs: - /// - transpose_a: If true, `a` is transposed before multiplication. - /// - transpose_b: If true, `b` is transposed before multiplication. - /// - input_quant_mode: Input data quantization mode. Either MIN_FIRST(default) or SCALED. - /// - /// - Outputs: - /// - min_out: The float value that the lowest quantized output value represents. - /// - max_out: The float value that the highest quantized output value represents. - @inlinable @inline(__always) - public static func quantizedMatMulWithBias< - T1: TensorFlowScalar, - T2: TensorFlowScalar, - Tbias: FloatingPoint & TensorFlowScalar, - Toutput: TensorFlowScalar - >( - _ a: Tensor, - _ b: Tensor, - bias: Tensor, - minA: Tensor, - maxA: Tensor, - minB: Tensor, - maxB: Tensor, - transposeA: Bool = false, - transposeB: Bool = false, - inputQuantMode: InputQuantMode = .minFirst - ) -> (out: Tensor, minOut: Tensor, maxOut: Tensor) { - _RawTFEager.quantizedMatMulWithBias( - a, b, bias: bias, minA: minA, maxA: maxA, minB: minB, maxB: maxB, transposeA: transposeA, - transposeB: transposeB, inputQuantMode: inputQuantMode) - } - - /// Perform a quantized matrix multiplication of `a` by the matrix `b` with bias - /// add and relu fusion. - /// - /// The inputs must be two-dimensional matrices and 1D bias vector. And the inner - /// dimension of `a` (after being transposed if `transpose_a` is non-zero) must - /// match the outer dimension of `b` (after being transposed if `transposed_b` is - /// non-zero). Then do broadcast add operation with bias values on the matrix - /// mulplication result. The bias size must match inner dimension of `b`. Then do - /// relu activation to get non-negative result. - /// - /// - Parameters: - /// - a: A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`. - /// - b: A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`. - /// - bias: A 1D bias tensor with size matching with inner dimension of `b` (after being - /// transposed if `transposed_b` is non-zero). - /// - min_a: The float value that the lowest quantized `a` value represents. - /// - max_a: The float value that the highest quantized `a` value represents. - /// - min_b: The float value that the lowest quantized `b` value represents. - /// - max_b: The float value that the highest quantized `b` value represents. - /// - /// - Attrs: - /// - transpose_a: If true, `a` is transposed before multiplication. - /// - transpose_b: If true, `b` is transposed before multiplication. - /// - input_quant_mode: Input data quantization mode. Either MIN_FIRST(default) or SCALED. - /// - /// - Outputs: - /// - min_out: The float value that the lowest quantized output value represents. - /// - max_out: The float value that the highest quantized output value represents. - @inlinable @inline(__always) - public static func quantizedMatMulWithBiasAndRelu< - T1: TensorFlowScalar, - T2: TensorFlowScalar, - Toutput: TensorFlowScalar - >( - _ a: Tensor, - _ b: Tensor, - bias: Tensor, - minA: Tensor, - maxA: Tensor, - minB: Tensor, - maxB: Tensor, - transposeA: Bool = false, - transposeB: Bool = false, - inputQuantMode: InputQuantMode = .minFirst - ) -> (out: Tensor, minOut: Tensor, maxOut: Tensor) { - _RawTFEager.quantizedMatMulWithBiasAndRelu( - a, b, bias: bias, minA: minA, maxA: maxA, minB: minB, maxB: maxB, transposeA: transposeA, - transposeB: transposeB, inputQuantMode: inputQuantMode) - } - - /// Perform a quantized matrix multiplication of `a` by the matrix `b` with bias - /// add and relu and requantize fusion. - /// - /// The inputs must be two-dimensional matrices and 1D bias vector. And the inner - /// dimension of `a` (after being transposed if `transpose_a` is non-zero) must - /// match the outer dimension of `b` (after being transposed if `transposed_b` is - /// non-zero). Then do broadcast add operation with bias values on the matrix - /// mulplication result. The bias size must match inner dimension of `b`. Then do - /// relu activation to get non-negative result. Then do requantize operation to get - /// final uint8 result. - /// - /// - Parameters: - /// - a: A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`. - /// - b: A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`. - /// - bias: A 1D bias tensor with size matching with inner dimension of `b` (after being - /// transposed if `transposed_b` is non-zero). - /// - min_a: The float value that the lowest quantized `a` value represents. - /// - max_a: The float value that the highest quantized `a` value represents. - /// - min_b: The float value that the lowest quantized `b` value represents. - /// - max_b: The float value that the highest quantized `b` value represents. - /// - min_freezed_output: The float value that the highest quantized output value after requantize. - /// - /// - Attrs: - /// - transpose_a: If true, `a` is transposed before multiplication. - /// - transpose_b: If true, `b` is transposed before multiplication. - /// - input_quant_mode: Input data quantization mode. Either MIN_FIRST(default) or SCALED. - /// - /// - Outputs: - /// - min_out: The float value that the lowest quantized output value represents. - /// - max_out: The float value that the highest quantized output value represents. - @inlinable @inline(__always) - public static func quantizedMatMulWithBiasAndReluAndRequantize< - T1: TensorFlowScalar, - T2: TensorFlowScalar, - Tbias: FloatingPoint & TensorFlowScalar, - Toutput: TensorFlowScalar - >( - _ a: Tensor, - _ b: Tensor, - bias: Tensor, - minA: Tensor, - maxA: Tensor, - minB: Tensor, - maxB: Tensor, - minFreezedOutput: Tensor, - maxFreezedOutput: Tensor, - transposeA: Bool = false, - transposeB: Bool = false, - inputQuantMode: InputQuantMode = .minFirst - ) -> (out: Tensor, minOut: Tensor, maxOut: Tensor) { - _RawTFEager.quantizedMatMulWithBiasAndReluAndRequantize( - a, b, bias: bias, minA: minA, maxA: maxA, minB: minB, maxB: maxB, - minFreezedOutput: minFreezedOutput, maxFreezedOutput: maxFreezedOutput, - transposeA: transposeA, transposeB: transposeB, inputQuantMode: inputQuantMode) - } - - /// Produces the max pool of the input tensor for quantized types. - /// - /// - Parameters: - /// - input: The 4D (batch x rows x cols x depth) Tensor to MaxReduce over. - /// - min_input: The float value that the lowest quantized input value represents. - /// - max_input: The float value that the highest quantized input value represents. - /// - /// - Attrs: - /// - ksize: The size of the window for each dimension of the input tensor. - /// The length must be 4 to match the number of dimensions of the input. - /// - strides: The stride of the sliding window for each dimension of the input - /// tensor. The length must be 4 to match the number of dimensions of the input. - /// - padding: The type of padding algorithm to use. - /// - /// - Outputs: - /// - min_output: The float value that the lowest quantized output value represents. - /// - max_output: The float value that the highest quantized output value represents. - @inlinable @inline(__always) - public static func quantizedMaxPool( - _ input: Tensor, - minInput: Tensor, - maxInput: Tensor, - ksize: [Int32], - strides: [Int32], - padding: Padding - ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { - _RawTFEager.quantizedMaxPool( - input, minInput: minInput, maxInput: maxInput, ksize: ksize, strides: strides, - padding: padding) - } + } - /// Returns x * y element-wise, working on quantized buffers. - /// - /// - Parameters: - /// - min_x: The float value that the lowest quantized `x` value represents. - /// - max_x: The float value that the highest quantized `x` value represents. - /// - min_y: The float value that the lowest quantized `y` value represents. - /// - max_y: The float value that the highest quantized `y` value represents. - /// - /// - Outputs: - /// - min_z: The float value that the lowest quantized output value represents. - /// - max_z: The float value that the highest quantized output value represents. - /// - /// *NOTE*: `QuantizedMul` supports limited forms of broadcasting. More about - /// broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - @inlinable @inline(__always) - public static func quantizedMul< - T1: TensorFlowScalar, - T2: TensorFlowScalar, - Toutput: TensorFlowScalar - >( - _ x: Tensor, - _ y: Tensor, - minX: Tensor, - maxX: Tensor, - minY: Tensor, - maxY: Tensor - ) -> (z: Tensor, minZ: Tensor, maxZ: Tensor) { - _RawTFEager.quantizedMul(x, y, minX: minX, maxX: maxX, minY: minY, maxY: maxY) - } - - /// Computes Quantized Rectified Linear: `max(features, 0)` - /// - /// - Parameters: - /// - min_features: The float value that the lowest quantized value represents. - /// - max_features: The float value that the highest quantized value represents. - /// - /// - Outputs: - /// - activations: Has the same output shape as "features". - /// - min_activations: The float value that the lowest quantized value represents. - /// - max_activations: The float value that the highest quantized value represents. - @inlinable @inline(__always) - public static func quantizedRelu< - Tinput: TensorFlowScalar, - OutType: TensorFlowScalar - >( - features: Tensor, - minFeatures: Tensor, - maxFeatures: Tensor - ) -> ( - activations: Tensor, minActivations: Tensor, maxActivations: Tensor - ) { - _RawTFEager.quantizedRelu( - features: features, minFeatures: minFeatures, maxFeatures: maxFeatures) - } - - /// Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)` - /// - /// - Parameters: - /// - min_features: The float value that the lowest quantized value represents. - /// - max_features: The float value that the highest quantized value represents. - /// - /// - Outputs: - /// - activations: Has the same output shape as "features". - /// - min_activations: The float value that the lowest quantized value represents. - /// - max_activations: The float value that the highest quantized value represents. - @inlinable @inline(__always) - public static func quantizedRelu6< - Tinput: TensorFlowScalar, - OutType: TensorFlowScalar - >( - features: Tensor, - minFeatures: Tensor, - maxFeatures: Tensor - ) -> ( - activations: Tensor, minActivations: Tensor, maxActivations: Tensor - ) { - _RawTFEager.quantizedRelu6( - features: features, minFeatures: minFeatures, maxFeatures: maxFeatures) - } - - /// Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)` - /// - /// - Parameters: - /// - min_features: The float value that the lowest quantized value represents. - /// - max_features: The float value that the highest quantized value represents. - /// - /// - Outputs: - /// - activations: Has the same output shape as "features". - /// - min_activations: The float value that the lowest quantized value represents. - /// - max_activations: The float value that the highest quantized value represents. - @inlinable @inline(__always) - public static func quantizedReluX< - Tinput: TensorFlowScalar, - OutType: TensorFlowScalar - >( - features: Tensor, - maxValue: Tensor, - minFeatures: Tensor, - maxFeatures: Tensor - ) -> ( - activations: Tensor, minActivations: Tensor, maxActivations: Tensor - ) { - _RawTFEager.quantizedReluX( - features: features, maxValue: maxValue, minFeatures: minFeatures, maxFeatures: maxFeatures) - } - - /// Reshapes a quantized tensor as per the Reshape op. - /// - /// ``` - /// - /// - Parameters: - /// - shape: Defines the shape of the output tensor. - /// - input_min: The minimum value of the input. - /// - input_max: The maximum value of the input. - /// - /// - Outputs: - /// - output_min: This value is copied from input_min. - /// - output_max: This value is copied from input_max. - @inlinable @inline(__always) - public static func quantizedReshape< - T: TensorFlowScalar, - Tshape: TensorFlowIndex - >( - _ tensor: Tensor, - shape: Tensor, - inputMin: Tensor, - inputMax: Tensor - ) -> (output: Tensor, outputMin: Tensor, outputMax: Tensor) { - _RawTFEager.quantizedReshape(tensor, shape: shape, inputMin: inputMin, inputMax: inputMax) - } - - /// Resize quantized `images` to `size` using quantized bilinear interpolation. - /// - /// Input images and output images must be quantized types. - /// - /// - Parameters: - /// - images: 4-D with shape `[batch, height, width, channels]`. - /// - size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The - /// new size for the images. - /// - /// - Attr align_corners: If true, the centers of the 4 corner pixels of the input and output tensors are - /// aligned, preserving the values at the corner pixels. Defaults to false. - /// - /// - Output resized_images: 4-D with shape - /// `[batch, new_height, new_width, channels]`. - @inlinable @inline(__always) - public static func quantizedResizeBilinear( - images: Tensor, - size: Tensor, - min: Tensor, - max: Tensor, - alignCorners: Bool = false, - halfPixelCenters: Bool = false - ) -> (resizedImages: Tensor, outMin: Tensor, outMax: Tensor) { - _RawTFEager.quantizedResizeBilinear( - images: images, size: size, min: min, max: max, alignCorners: alignCorners, - halfPixelCenters: halfPixelCenters) + /// Wraps the XLA DynamicUpdateSlice operator, documented at + /// + /// https://www.tensorflow.org/performance/xla/operation_semantics#dynamicupdateslice + /// . + /// + /// XlaDynamicUpdateSlice generates a result which is the value of the `input` + /// operand, with a slice update overwritten at `indices`. The shape of `update` + /// determines the shape of the sub-array of the result which is updated. The shape + /// of indices must be rank == 1, with dimension size equal to the rank of `input`. + /// + /// Handling of out-of-bounds slice indices is implementation-defined. + /// + /// - Parameters: + /// - input: A `Tensor` of type T. + /// - update: A `Tensor` of type T. Same rank as `input`. + /// - indices: A vector of indices into `input`. Must have length equal to the rank of + /// `input`. + /// + /// - Output output: A `Tensor` of type T. + @inlinable @inline(__always) + public static func xlaDynamicUpdateSlice< + T: TensorFlowScalar, + Tindices: TensorFlowIndex + >( + _ input: Tensor, + update: Tensor, + indices: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend(input.handle.backend, update.handle.backend), indices.handle.backend) + { + case .XLA: + let output_device = indices.device + let input = Tensor(copying: input, to: .defaultTFEager) + let update = Tensor(copying: update, to: .defaultTFEager) + let indices = Tensor(copying: indices, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.xlaDynamicUpdateSlice(input, update: update, indices: indices), + to: output_device) + case .TF_EAGER: + return _RawTFEager.xlaDynamicUpdateSlice(input, update: update, indices: indices) } - /// Closes the given queue. - /// - /// This operation signals that no more elements will be enqueued in the - /// given queue. Subsequent Enqueue(Many) operations will fail. - /// Subsequent Dequeue(Many) operations will continue to succeed if - /// sufficient elements remain in the queue. Subsequent Dequeue(Many) - /// operations that would block will fail immediately. - /// - /// - Parameter handle: The handle to a queue. - /// - /// - Attr cancel_pending_enqueues: If true, all pending enqueue requests that are - /// blocked on the given queue will be canceled. - @inlinable @inline(__always) - public static func queueCloseV2( - handle: ResourceHandle, - cancelPendingEnqueues: Bool = false - ) { - _RawTFEager.queueCloseV2(handle: handle, cancelPendingEnqueues: cancelPendingEnqueues) - } - - /// Dequeues `n` tuples of one or more tensors from the given queue. - /// - /// If the queue is closed and there are fewer than `n` elements, then an - /// OutOfRange error is returned. - /// - /// This operation concatenates queue-element component tensors along the - /// 0th dimension to make a single component tensor. All of the components - /// in the dequeued tuple will have size `n` in the 0th dimension. - /// - /// This operation has `k` outputs, where `k` is the number of components in - /// the tuples stored in the given queue, and output `i` is the ith - /// component of the dequeued tuple. - /// - /// N.B. If the queue is empty, this operation will block until `n` elements - /// have been dequeued (or 'timeout_ms' elapses, if specified). - /// - /// - Parameters: - /// - handle: The handle to a queue. - /// - n: The number of tuples to dequeue. - /// - /// - Attrs: - /// - component_types: The type of each component in a tuple. - /// - timeout_ms: If the queue has fewer than n elements, this operation - /// will block for up to timeout_ms milliseconds. - /// Note: This option is not supported yet. - /// - /// - Output components: One or more tensors that were dequeued as a tuple. - @inlinable @inline(__always) - public static func queueDequeueManyV2( - handle: ResourceHandle, - n: Tensor, - timeoutMs: Int64 = -1 - ) -> ComponentTypes { - _RawTFEager.queueDequeueManyV2(handle: handle, n: n, timeoutMs: timeoutMs) - } - - /// Dequeues `n` tuples of one or more tensors from the given queue. - /// - /// This operation is not supported by all queues. If a queue does not support - /// DequeueUpTo, then an Unimplemented error is returned. - /// - /// If the queue is closed and there are more than 0 but less than `n` - /// elements remaining, then instead of returning an OutOfRange error like - /// QueueDequeueMany, less than `n` elements are returned immediately. If - /// the queue is closed and there are 0 elements left in the queue, then - /// an OutOfRange error is returned just like in QueueDequeueMany. - /// Otherwise the behavior is identical to QueueDequeueMany: - /// - /// This operation concatenates queue-element component tensors along the - /// 0th dimension to make a single component tensor. All of the components - /// in the dequeued tuple will have size n in the 0th dimension. - /// - /// This operation has `k` outputs, where `k` is the number of components in - /// the tuples stored in the given queue, and output `i` is the ith - /// component of the dequeued tuple. - /// - /// - Parameters: - /// - handle: The handle to a queue. - /// - n: The number of tuples to dequeue. - /// - /// - Attrs: - /// - component_types: The type of each component in a tuple. - /// - timeout_ms: If the queue has fewer than n elements, this operation - /// will block for up to timeout_ms milliseconds. - /// Note: This option is not supported yet. - /// - /// - Output components: One or more tensors that were dequeued as a tuple. - @inlinable @inline(__always) - public static func queueDequeueUpToV2( - handle: ResourceHandle, - n: Tensor, - timeoutMs: Int64 = -1 - ) -> ComponentTypes { - _RawTFEager.queueDequeueUpToV2(handle: handle, n: n, timeoutMs: timeoutMs) - } - - /// Dequeues a tuple of one or more tensors from the given queue. - /// - /// This operation has k outputs, where k is the number of components - /// in the tuples stored in the given queue, and output i is the ith - /// component of the dequeued tuple. - /// - /// N.B. If the queue is empty, this operation will block until an element - /// has been dequeued (or 'timeout_ms' elapses, if specified). - /// - /// - Parameter handle: The handle to a queue. - /// - /// - Attrs: - /// - component_types: The type of each component in a tuple. - /// - timeout_ms: If the queue is empty, this operation will block for up to - /// timeout_ms milliseconds. - /// Note: This option is not supported yet. - /// - /// - Output components: One or more tensors that were dequeued as a tuple. - @inlinable @inline(__always) - public static func queueDequeueV2( - handle: ResourceHandle, - timeoutMs: Int64 = -1 - ) -> ComponentTypes { - _RawTFEager.queueDequeueV2(handle: handle, timeoutMs: timeoutMs) - } - - /// Enqueues zero or more tuples of one or more tensors in the given queue. - /// - /// This operation slices each component tensor along the 0th dimension to - /// make multiple queue elements. All of the tuple components must have the - /// same size in the 0th dimension. - /// - /// The components input has k elements, which correspond to the components of - /// tuples stored in the given queue. - /// - /// N.B. If the queue is full, this operation will block until the given - /// elements have been enqueued (or 'timeout_ms' elapses, if specified). - /// - /// - Parameters: - /// - handle: The handle to a queue. - /// - components: One or more tensors from which the enqueued tensors should - /// be taken. - /// - /// - Attr timeout_ms: If the queue is too full, this operation will block for up - /// to timeout_ms milliseconds. - /// Note: This option is not supported yet. - @inlinable @inline(__always) - public static func queueEnqueueManyV2( - handle: ResourceHandle, - components: Tcomponents, - timeoutMs: Int64 = -1 - ) { - _RawTFEager.queueEnqueueManyV2(handle: handle, components: components, timeoutMs: timeoutMs) - } - - /// Enqueues a tuple of one or more tensors in the given queue. - /// - /// The components input has k elements, which correspond to the components of - /// tuples stored in the given queue. - /// - /// N.B. If the queue is full, this operation will block until the given - /// element has been enqueued (or 'timeout_ms' elapses, if specified). - /// - /// - Parameters: - /// - handle: The handle to a queue. - /// - components: One or more tensors from which the enqueued tensors should be taken. - /// - /// - Attr timeout_ms: If the queue is full, this operation will block for up to - /// timeout_ms milliseconds. - /// Note: This option is not supported yet. - @inlinable @inline(__always) - public static func queueEnqueueV2( - handle: ResourceHandle, - components: Tcomponents, - timeoutMs: Int64 = -1 - ) { - _RawTFEager.queueEnqueueV2(handle: handle, components: components, timeoutMs: timeoutMs) - } - - /// Returns true if queue is closed. - /// - /// This operation returns true if the queue is closed and false if the queue - /// is open. - /// - /// - Parameter handle: The handle to a queue. - @inlinable @inline(__always) - public static func queueIsClosedV2( - handle: ResourceHandle - ) -> Tensor { - _RawTFEager.queueIsClosedV2(handle: handle) - } - - /// Computes the number of elements in the given queue. - /// - /// - Parameter handle: The handle to a queue. - /// - /// - Output size: The number of elements in the given queue. - @inlinable @inline(__always) - public static func queueSizeV2( - handle: ResourceHandle - ) -> Tensor { - _RawTFEager.queueSizeV2(handle: handle) - } - - /// Real-valued fast Fourier transform. - /// - /// Computes the 1-dimensional discrete Fourier transform of a real-valued signal - /// over the inner-most dimension of `input`. - /// - /// Since the DFT of a real signal is Hermitian-symmetric, `RFFT` only returns the - /// `fft_length / 2 + 1` unique components of the FFT: the zero-frequency term, - /// followed by the `fft_length / 2` positive-frequency terms. - /// - /// Along the axis `RFFT` is computed on, if `fft_length` is smaller than the - /// corresponding dimension of `input`, the dimension is cropped. If it is larger, - /// the dimension is padded with zeros. - /// - /// - Parameters: - /// - input: A float32 tensor. - /// - fft_length: An int32 tensor of shape [1]. The FFT length. - /// - /// - Output output: A complex64 tensor of the same rank as `input`. The inner-most - /// dimension of `input` is replaced with the `fft_length / 2 + 1` unique - /// frequency components of its 1D Fourier transform. - /// - /// @compatibility(numpy) - /// Equivalent to np.fft.rfft - /// @end_compatibility - @inlinable @inline(__always) - public static func rFFT< - Treal: FloatingPoint & TensorFlowScalar, - Tcomplex: TensorFlowScalar - >( - _ input: Tensor, - fftLength: Tensor - ) -> Tensor { - switch commonBackend(input.handle.backend, fftLength.handle.backend) { - case .XLA: - let output_device = fftLength.device - let input = Tensor(copying: input, to: .defaultTFEager) - let fftLength = Tensor(copying: fftLength, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.rFFT(input, fftLength: fftLength), to: output_device) - case .TF_EAGER: - return _RawTFEager.rFFT(input, fftLength: fftLength) - } - - } - - /// 2D real-valued fast Fourier transform. - /// - /// Computes the 2-dimensional discrete Fourier transform of a real-valued signal - /// over the inner-most 2 dimensions of `input`. - /// - /// Since the DFT of a real signal is Hermitian-symmetric, `RFFT2D` only returns the - /// `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension - /// of `output`: the zero-frequency term, followed by the `fft_length / 2` - /// positive-frequency terms. - /// - /// Along each axis `RFFT2D` is computed on, if `fft_length` is smaller than the - /// corresponding dimension of `input`, the dimension is cropped. If it is larger, - /// the dimension is padded with zeros. - /// - /// - Parameters: - /// - input: A float32 tensor. - /// - fft_length: An int32 tensor of shape [2]. The FFT length for each dimension. - /// - /// - Output output: A complex64 tensor of the same rank as `input`. The inner-most 2 - /// dimensions of `input` are replaced with their 2D Fourier transform. The - /// inner-most dimension contains `fft_length / 2 + 1` unique frequency - /// components. - /// - /// @compatibility(numpy) - /// Equivalent to np.fft.rfft2 - /// @end_compatibility - @inlinable @inline(__always) - public static func rFFT2D< - Treal: FloatingPoint & TensorFlowScalar, - Tcomplex: TensorFlowScalar - >( - _ input: Tensor, - fftLength: Tensor - ) -> Tensor { - switch commonBackend(input.handle.backend, fftLength.handle.backend) { - case .XLA: - let output_device = fftLength.device - let input = Tensor(copying: input, to: .defaultTFEager) - let fftLength = Tensor(copying: fftLength, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.rFFT2D(input, fftLength: fftLength), to: output_device) - case .TF_EAGER: - return _RawTFEager.rFFT2D(input, fftLength: fftLength) - } - - } - - /// 3D real-valued fast Fourier transform. - /// - /// Computes the 3-dimensional discrete Fourier transform of a real-valued signal - /// over the inner-most 3 dimensions of `input`. - /// - /// Since the DFT of a real signal is Hermitian-symmetric, `RFFT3D` only returns the - /// `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension - /// of `output`: the zero-frequency term, followed by the `fft_length / 2` - /// positive-frequency terms. - /// - /// Along each axis `RFFT3D` is computed on, if `fft_length` is smaller than the - /// corresponding dimension of `input`, the dimension is cropped. If it is larger, - /// the dimension is padded with zeros. - /// - /// - Parameters: - /// - input: A float32 tensor. - /// - fft_length: An int32 tensor of shape [3]. The FFT length for each dimension. - /// - /// - Output output: A complex64 tensor of the same rank as `input`. The inner-most 3 - /// dimensions of `input` are replaced with the their 3D Fourier transform. The - /// inner-most dimension contains `fft_length / 2 + 1` unique frequency - /// components. - /// - /// @compatibility(numpy) - /// Equivalent to np.fft.rfftn with 3 dimensions. - /// @end_compatibility - @inlinable @inline(__always) - public static func rFFT3D< - Treal: FloatingPoint & TensorFlowScalar, - Tcomplex: TensorFlowScalar - >( - _ input: Tensor, - fftLength: Tensor - ) -> Tensor { - switch commonBackend(input.handle.backend, fftLength.handle.backend) { - case .XLA: - let output_device = fftLength.device - let input = Tensor(copying: input, to: .defaultTFEager) - let fftLength = Tensor(copying: fftLength, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.rFFT3D(input, fftLength: fftLength), to: output_device) - case .TF_EAGER: - return _RawTFEager.rFFT3D(input, fftLength: fftLength) - } - - } - - /// Converts one or more images from RGB to HSV. - /// - /// Outputs a tensor of the same shape as the `images` tensor, containing the HSV - /// value of the pixels. The output is only well defined if the value in `images` - /// are in `[0,1]`. - /// - /// `output[..., 0]` contains hue, `output[..., 1]` contains saturation, and - /// `output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0 - /// corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue. - /// - /// Usage Example: - /// - /// >>> blue_image = tf.stack([ - /// ... tf.zeros([5,5]), - /// ... tf.zeros([5,5]), - /// ... tf.ones([5,5])], - /// ... axis=-1) - /// >>> blue_hsv_image = tf.image.rgb_to_hsv(blue_image) - /// >>> blue_hsv_image[0,0].numpy() - /// array([0.6666667, 1. , 1. ], dtype=float32) - /// - /// - /// - Parameter images: 1-D or higher rank. RGB data to convert. Last dimension must be size 3. - /// - /// - Output output: `images` converted to HSV. - @inlinable @inline(__always) - public static func rGBToHSV( - images: Tensor - ) -> Tensor { - switch images.handle.backend { - case .XLA: - let output_device = images.device - let images = Tensor(copying: images, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.rGBToHSV(images: images), to: output_device) - case .TF_EAGER: - return _RawTFEager.rGBToHSV(images: images) - } - - } - - /// Gather ragged slices from `params` axis `0` according to `indices`. - /// - /// Outputs a `RaggedTensor` output composed from `output_dense_values` and - /// `output_nested_splits`, such that: - /// - /// ```python - /// output.shape = indices.shape + params.shape[1:] - /// output.ragged_rank = indices.shape.ndims + params.ragged_rank - /// output[i...j, d0...dn] = params[indices[i...j], d0...dn] - /// ``` - /// - /// where - /// - /// * `params = - /// ragged.from_nested_row_splits(params_dense_values, params_nested_splits)` - /// provides the values that should be gathered. - /// * `indices` ia a dense tensor with dtype `int32` or `int64`, indicating which - /// values should be gathered. - /// * `output = - /// ragged.from_nested_row_splits(output_dense_values, output_nested_splits)` - /// is the output tensor. - /// - /// (Note: This c++ op is used to implement the higher-level python - /// `tf.ragged.gather` op, which also supports ragged indices.) - /// - /// - /// - Parameters: - /// - params_nested_splits: The `nested_row_splits` tensors that define the row-partitioning for the - /// `params` RaggedTensor input. - /// - params_dense_values: The `flat_values` for the `params` RaggedTensor. There was a terminology change - /// at the python level from dense_values to flat_values, so dense_values is the - /// deprecated name. - /// - indices: Indices in the outermost dimension of `params` of the values that should be - /// gathered. - /// - /// - Attrs: - /// - PARAMS_RAGGED_RANK: The ragged rank of the `params` RaggedTensor. `params_nested_splits` should - /// contain this number of `row_splits` tensors. This value should equal - /// `params.ragged_rank`. - /// - OUTPUT_RAGGED_RANK: The ragged rank of the output RaggedTensor. `output_nested_splits` will contain - /// this number of `row_splits` tensors. This value should equal - /// `indices.shape.ndims + params.ragged_rank - 1`. - /// - /// - Outputs: - /// - output_nested_splits: The `nested_row_splits` tensors that define the row-partitioning for the - /// returned RaggedTensor. - /// - output_dense_values: The `flat_values` for the returned RaggedTensor. - @inlinable @inline(__always) - public static func raggedGather< - Tvalues: TensorFlowScalar, - Tindices: TensorFlowIndex, - Tsplits: TensorFlowIndex - >( - paramsNestedSplits: [Tensor], - paramsDenseValues: Tensor, - indices: Tensor, - oUTPUTRAGGEDRANK: Int64 - ) -> (outputNestedSplits: [Tensor], outputDenseValues: Tensor) { - _RawTFEager.raggedGather( - paramsNestedSplits: paramsNestedSplits, paramsDenseValues: paramsDenseValues, - indices: indices, oUTPUTRAGGEDRANK: oUTPUTRAGGEDRANK) - } - - /// Returns a `RaggedTensor` containing the specified sequences of numbers. - /// - /// - /// Returns a `RaggedTensor` `result` composed from `rt_dense_values` and - /// `rt_nested_splits`, such that - /// `result[i] = range(starts[i], limits[i], deltas[i])`. - /// - /// ```python - /// (rt_nested_splits, rt_dense_values) = ragged_range( - /// starts=[2, 5, 8], limits=[3, 5, 12], deltas=1) - /// result = tf.ragged.from_row_splits(rt_dense_values, rt_nested_splits) - /// print(result) - /// - /// ``` - /// - /// The input tensors `starts`, `limits`, and `deltas` may be scalars or vectors. - /// The vector inputs must all have the same size. Scalar inputs are broadcast - /// to match the size of the vector inputs. - /// - /// - Parameters: - /// - starts: The starts of each range. - /// - limits: The limits of each range. - /// - deltas: The deltas of each range. - /// - /// - Outputs: - /// - rt_nested_splits: The `row_splits` for the returned `RaggedTensor`. - /// - rt_dense_values: The `flat_values` for the returned `RaggedTensor`. - @inlinable @inline(__always) - public static func raggedRange< - T: TensorFlowNumeric, - Tsplits: TensorFlowIndex - >( - starts: Tensor, - limits: Tensor, - deltas: Tensor - ) -> (rtNestedSplits: Tensor, rtDenseValues: Tensor) { - _RawTFEager.raggedRange(starts: starts, limits: limits, deltas: deltas) - } - - /// Decodes a `variant` Tensor into a `RaggedTensor`. - /// - /// Decodes the given `variant` Tensor and returns a `RaggedTensor`. The input - /// could be a scalar, meaning it encodes a single `RaggedTensor` with ragged_rank - /// `output_ragged_rank`. It could also have an arbitrary rank, in which case each - /// element is decoded into a `RaggedTensor` with ragged_rank `input_ragged_rank` - /// and these are then stacked according to the input shape to output a single - /// `RaggedTensor` with ragged_rank `output_ragged_rank`. Each `variant` element in - /// the input Tensor is decoded by retrieving from the element a 1-D `variant` - /// Tensor with `input_ragged_rank + 1` Tensors, corresponding to the splits and - /// values of the decoded `RaggedTensor`. If `input_ragged_rank` is -1, then it is - /// inferred as `output_ragged_rank` - `rank(encoded_ragged)`. See - /// `RaggedTensorToVariant` for the corresponding encoding logic. - /// - /// - /// - Parameter encoded_ragged: A `variant` Tensor containing encoded `RaggedTensor`s. - /// - /// - Attrs: - /// - input_ragged_rank: The ragged rank of each encoded `RaggedTensor` component in the input. If set to - /// -1, this is inferred as `output_ragged_rank` - `rank(encoded_ragged)` - /// - output_ragged_rank: The expected ragged rank of the output `RaggedTensor`. The following must hold: - /// `output_ragged_rank = rank(encoded_ragged) + input_ragged_rank`. - /// - /// - Outputs: - /// - output_nested_splits: A list of one or more Tensors representing the splits of the output - /// `RaggedTensor`. - /// - output_dense_values: A Tensor representing the values of the output `RaggedTensor`. - @inlinable @inline(__always) - public static func raggedTensorFromVariant< - Tvalues: TensorFlowScalar, - Tsplits: TensorFlowIndex - >( - encodedRagged: VariantHandle, - inputRaggedRank: Int64, - outputRaggedRank: Int64 - ) -> (outputNestedSplits: [Tensor], outputDenseValues: Tensor) { - _RawTFEager.raggedTensorFromVariant( - encodedRagged: encodedRagged, inputRaggedRank: inputRaggedRank, - outputRaggedRank: outputRaggedRank) - } - - /// Converts a `RaggedTensor` into a `SparseTensor` with the same values. - /// - /// input=ragged.from_nested_row_splits(rt_dense_values, rt_nested_splits) - /// output=SparseTensor(indices=sparse_indices, values=sparse_values, - /// dense_shape=sparse_dense_shape) - /// - /// - Parameters: - /// - rt_nested_splits: The `row_splits` for the `RaggedTensor`. - /// - rt_dense_values: The `flat_values` for the `RaggedTensor`. - /// - /// - Attr RAGGED_RANK: The ragged rank of the input RaggedTensor. `rt_nested_splits` should contain - /// this number of ragged-splits tensors. This value should equal - /// `input.ragged_rank`. - /// - /// - Outputs: - /// - sparse_indices: The indices for the `SparseTensor`. - /// - sparse_values: The values of the `SparseTensor`. - /// - sparse_dense_shape: `sparse_dense_shape` is a tight bounding box of the input `RaggedTensor`. - @inlinable @inline(__always) - public static func raggedTensorToSparse< - T: TensorFlowScalar, - Tsplits: TensorFlowIndex - >( - rtNestedSplits: [Tensor], - rtDenseValues: Tensor - ) -> (sparseIndices: Tensor, sparseValues: Tensor, sparseDenseShape: Tensor) { - _RawTFEager.raggedTensorToSparse(rtNestedSplits: rtNestedSplits, rtDenseValues: rtDenseValues) - } - - /// Create a dense tensor from a ragged tensor, possibly altering its shape. - /// - /// The `ragged_to_dense` op creates a dense tensor from a list of row partition - /// tensors, a value vector, and default values. If the shape is unspecified, the - /// minimal shape required to contain all the elements in the ragged tensor (the - /// natural shape) will be used. If some dimensions are left unspecified, then the - /// size of the natural shape is used in that dimension. - /// - /// The default_value will be broadcast to the output shape. After that, the values - /// from the ragged tensor overwrite the default values. Note that the default_value - /// must have less dimensions than the value. - /// - /// The row partition tensors are in the order of the dimensions. - /// At present, the types can be: - /// * "ROW_SPLITS": the row_splits tensor from the ragged tensor. - /// * "VALUE_ROWIDS": the value_rowids tensor from the ragged tensor. - /// * "FIRST_DIM_SIZE": if value_rowids is used for the first dimension, then it - /// is preceded by "FIRST_DIM_SIZE". - /// - /// - Parameters: - /// - shape: The desired shape of the the output tensor. If left unspecified (empty), - /// the minimal shape required to contain all the elements in the ragged tensor - /// (the natural shape) will be used. If some dimensions are left unspecified, then - /// the size of the natural shape is used in that dimension. - /// - /// Note that dense dimensions cannot be modified by the shape argument. Trying to - /// change the size of a dense dimension will cause the op to fail. - /// Examples: - /// natural shape: [4, 5, 6] - /// shape: -1 - /// output shape: [4, 5, 6] - /// - /// natural shape: [4, 5, 6] - /// shape: [3, -1, 2] - /// output shape: [3, 5, 2] - /// - /// natural shape: [4, 5, 6] - /// shape: [3, 7, 2] - /// output shape: [3, 7, 2] - /// - /// - values: A 1D tensor representing the values of the ragged tensor. - /// - default_value: The default_value when the shape is larger than the ragged tensor. The - /// default_value is broadcast until it is the shape of the output tensor, and - /// then overwritten by values in the ragged tensor. The default value must be - /// compatible with this broadcast operation, and must have fewer dimensions than - /// the value tensor. - /// - /// - Attr row_partition_types: The types of the row partition tensors. At present, these can be: - /// * "ROW_SPLITS": the row_splits tensor from the ragged tensor. - /// * "VALUE_ROWIDS": the value_rowids tensor from the ragged tensor. - /// * "FIRST_DIM_SIZE": if value_rowids is used for the first dimension, then it - /// is preceeded by "FIRST_DIM_SIZE". - /// The tensors are in the order of the dimensions. - /// - /// - Output result: The resulting dense tensor. - @inlinable @inline(__always) - public static func raggedTensorToTensor< - T: TensorFlowScalar, - Tindex: TensorFlowIndex, - Tshape: TensorFlowIndex - >( - shape: Tensor, - _ values: Tensor, - defaultValue: Tensor, - rowPartitionTensors: [Tensor], - rowPartitionTypes: [String] - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(shape.handle.backend, values.handle.backend), defaultValue.handle.backend), - commonBackend(rowPartitionTensors)) - { - case .XLA: - let output_device = defaultValue.device - let shape = Tensor(copying: shape, to: .defaultTFEager) - let values = Tensor(copying: values, to: .defaultTFEager) - let defaultValue = Tensor(copying: defaultValue, to: .defaultTFEager) - let rowPartitionTensors = [Tensor]( - copying: rowPartitionTensors, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.raggedTensorToTensor( - shape: shape, values, defaultValue: defaultValue, - rowPartitionTensors: rowPartitionTensors, rowPartitionTypes: rowPartitionTypes), - to: output_device) - case .TF_EAGER: - return _RawTFEager.raggedTensorToTensor( - shape: shape, values, defaultValue: defaultValue, - rowPartitionTensors: rowPartitionTensors, rowPartitionTypes: rowPartitionTypes) - } - - } - - /// Encodes a `RaggedTensor` into a `variant` Tensor. - /// - /// - /// Encodes the given `RaggedTensor` and returns a `variant` Tensor. If - /// `batched_input` is True, then input `RaggedTensor` is unbatched along the - /// zero-th dimension, each component `RaggedTensor` is encoded into a scalar - /// `variant` Tensor, and these are stacked to return a 1-D `variant` Tensor. - /// If `batched_input` is False, then the input `RaggedTensor` is encoded as is and - /// a scalar `variant` Tensor is returned. A `RaggedTensor` is encoded by first - /// creating a 1-D `variant` Tensor with `ragged_rank + 1` elements, containing the - /// splits and values Tensors of the `RaggedTensor`. Then the 1-D `variant` Tensor - /// is wrapped in a scalar `variant` Tensor. See `RaggedTensorFromVariant` for the - /// corresponding decoding logic. - /// - /// - /// - Parameters: - /// - rt_nested_splits: A list of one or more Tensors representing the splits of the input - /// `RaggedTensor`. - /// - rt_dense_values: A Tensor representing the values of the input `RaggedTensor`. - /// - /// - Attr batched_input: A `bool` denoting whether the input is a batched `RaggedTensor`. - /// - /// - Output encoded_ragged: A `variant` Tensor that containing encoded `RaggedTensor`. - @inlinable @inline(__always) - public static func raggedTensorToVariant< - Tvalues: TensorFlowScalar, - Tsplits: TensorFlowIndex - >( - rtNestedSplits: [Tensor], - rtDenseValues: Tensor, - batchedInput: Bool - ) -> VariantHandle { - _RawTFEager.raggedTensorToVariant( - rtNestedSplits: rtNestedSplits, rtDenseValues: rtDenseValues, batchedInput: batchedInput) - } - - /// Randomly crop `image`. - /// - /// `size` is a 1-D int64 tensor with 2 elements representing the crop height and - /// width. The values must be non negative. - /// - /// This Op picks a random location in `image` and crops a `height` by `width` - /// rectangle from that location. The random location is picked so the cropped - /// area will fit inside the original image. - /// - /// - Parameters: - /// - image: 3-D of shape `[height, width, channels]`. - /// - size: 1-D of length 2 containing: `crop_height`, `crop_width`.. - /// - /// - Attrs: - /// - seed: If either seed or seed2 are set to be non-zero, the random number - /// generator is seeded by the given seed. Otherwise, it is seeded by a - /// random seed. - /// - seed2: An second seed to avoid seed collision. - /// - /// - Output output: 3-D of shape `[crop_height, crop_width, channels].` - @inlinable @inline(__always) - public static func randomCrop( - image: Tensor, - size: Tensor, - seed: Int64 = 0, - seed2: Int64 = 0 - ) -> Tensor { - switch commonBackend(image.handle.backend, size.handle.backend) { - case .XLA: - let output_device = size.device - let image = Tensor(copying: image, to: .defaultTFEager) - let size = Tensor(copying: size, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.randomCrop(image: image, size: size, seed: seed, seed2: seed2), - to: output_device) - case .TF_EAGER: - return _RawTFEager.randomCrop(image: image, size: size, seed: seed, seed2: seed2) - } - - } - - /// Creates a Dataset that returns pseudorandom numbers. - /// - /// Creates a Dataset that returns a stream of uniformly distributed - /// pseudorandom 64-bit signed integers. - /// - /// In the TensorFlow Python API, you can instantiate this dataset via the - /// class `tf.data.experimental.RandomDataset`. - /// - /// Instances of this dataset are also created as a result of the - /// `hoist_random_uniform` static optimization. Whether this optimization is - /// performed is determined by the `experimental_optimization.hoist_random_uniform` - /// option of `tf.data.Options`. - /// - /// - Parameters: - /// - seed: A scalar seed for the random number generator. If either seed or - /// seed2 is set to be non-zero, the random number generator is seeded - /// by the given seed. Otherwise, a random seed is used. - /// - seed2: A second scalar seed to avoid seed collision. - @inlinable @inline(__always) - public static func randomDataset( - seed: Tensor, - seed2: Tensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.randomDataset( - seed: seed, seed2: seed2, outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Outputs random values from the Gamma distribution(s) described by alpha. - /// - /// This op uses the algorithm by Marsaglia et al. to acquire samples via - /// transformation-rejection from pairs of uniform and normal random variables. - /// See http://dl.acm.org/citation.cfm?id=358414 - /// - /// - Parameters: - /// - shape: 1-D integer tensor. Shape of independent samples to draw from each - /// distribution described by the shape parameters given in alpha. - /// - alpha: A tensor in which each scalar is a "shape" parameter describing the - /// associated gamma distribution. - /// - /// - Attrs: - /// - seed: If either `seed` or `seed2` are set to be non-zero, the random number - /// generator is seeded by the given seed. Otherwise, it is seeded by a - /// random seed. - /// - seed2: A second seed to avoid seed collision. - /// - /// - Output output: A tensor with shape `shape + shape(alpha)`. Each slice - /// `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for - /// `alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha. - @inlinable @inline(__always) - public static func randomGamma< - S: TensorFlowIndex, - T: FloatingPoint & TensorFlowScalar - >( - shape: Tensor, - alpha: Tensor, - seed: Int64 = 0, - seed2: Int64 = 0 - ) -> Tensor { - switch commonBackend(shape.handle.backend, alpha.handle.backend) { - case .XLA: - let output_device = alpha.device - let shape = Tensor(copying: shape, to: .defaultTFEager) - let alpha = Tensor(copying: alpha, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.randomGamma(shape: shape, alpha: alpha, seed: seed, seed2: seed2), - to: output_device) - case .TF_EAGER: - return _RawTFEager.randomGamma(shape: shape, alpha: alpha, seed: seed, seed2: seed2) - } - - } - - /// Computes the derivative of a Gamma random sample w.r.t. `alpha`. - @inlinable @inline(__always) - public static func randomGammaGrad( - alpha: Tensor, - sample: Tensor - ) -> Tensor { - switch commonBackend(alpha.handle.backend, sample.handle.backend) { - case .XLA: - let output_device = sample.device - let alpha = Tensor(copying: alpha, to: .defaultTFEager) - let sample = Tensor(copying: sample, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.randomGammaGrad(alpha: alpha, sample: sample), to: output_device) - case .TF_EAGER: - return _RawTFEager.randomGammaGrad(alpha: alpha, sample: sample) - } - - } - - /// Use RandomPoissonV2 instead. - @inlinable @inline(__always) - public static func randomPoisson< - S: TensorFlowIndex, - Dtype: FloatingPoint & TensorFlowScalar - >( - shape: Tensor, - rate: Tensor, - seed: Int64 = 0, - seed2: Int64 = 0 - ) -> Tensor { - switch commonBackend(shape.handle.backend, rate.handle.backend) { - case .XLA: - let output_device = rate.device - let shape = Tensor(copying: shape, to: .defaultTFEager) - let rate = Tensor(copying: rate, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.randomPoisson(shape: shape, rate: rate, seed: seed, seed2: seed2), - to: output_device) - case .TF_EAGER: - return _RawTFEager.randomPoisson(shape: shape, rate: rate, seed: seed, seed2: seed2) - } - - } - - /// Outputs random values from the Poisson distribution(s) described by rate. - /// - /// This op uses two algorithms, depending on rate. If rate >= 10, then - /// the algorithm by Hormann is used to acquire samples via - /// transformation-rejection. - /// See http://www.sciencedirect.com/science/article/pii/0167668793909974. - /// - /// Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform - /// random variables. - /// See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer - /// Programming, Volume 2. Addison Wesley - /// - /// - Parameters: - /// - shape: 1-D integer tensor. Shape of independent samples to draw from each - /// distribution described by the shape parameters given in rate. - /// - rate: A tensor in which each scalar is a "rate" parameter describing the - /// associated poisson distribution. - /// - /// - Attrs: - /// - seed: If either `seed` or `seed2` are set to be non-zero, the random number - /// generator is seeded by the given seed. Otherwise, it is seeded by a - /// random seed. - /// - seed2: A second seed to avoid seed collision. - /// - /// - Output output: A tensor with shape `shape + shape(rate)`. Each slice - /// `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for - /// `rate[i0, i1, ...iN]`. - @inlinable @inline(__always) - public static func randomPoissonV2< - S: TensorFlowIndex, - R: TensorFlowNumeric, - Dtype: TensorFlowNumeric - >( - shape: Tensor, - rate: Tensor, - seed: Int64 = 0, - seed2: Int64 = 0 - ) -> Tensor { - switch commonBackend(shape.handle.backend, rate.handle.backend) { - case .XLA: - let output_device = rate.device - let shape = Tensor(copying: shape, to: .defaultTFEager) - let rate = Tensor(copying: rate, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.randomPoissonV2(shape: shape, rate: rate, seed: seed, seed2: seed2), - to: output_device) - case .TF_EAGER: - return _RawTFEager.randomPoissonV2(shape: shape, rate: rate, seed: seed, seed2: seed2) - } - - } - - /// Randomly shuffles a tensor along its first dimension. - /// - /// The tensor is shuffled along dimension 0, such that each `value[j]` is mapped - /// to one and only one `output[i]`. For example, a mapping that might occur for a - /// 3x2 tensor is: - /// - /// ``` - /// [[1, 2], [[5, 6], - /// [3, 4], ==> [1, 2], - /// [5, 6]] [3, 4]] - /// ``` - /// - /// - Parameter value: The tensor to be shuffled. - /// - /// - Attrs: - /// - seed: If either `seed` or `seed2` are set to be non-zero, the random number - /// generator is seeded by the given seed. Otherwise, it is seeded by a - /// random seed. - /// - seed2: A second seed to avoid seed collision. - /// - /// - Output output: A tensor of same shape and type as `value`, shuffled along its first - /// dimension. - @inlinable @inline(__always) - public static func randomShuffle( - value: Tensor, - seed: Int64 = 0, - seed2: Int64 = 0 - ) -> Tensor { - switch value.handle.backend { - case .XLA: - let output_device = value.device - let value = Tensor(copying: value, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.randomShuffle(value: value, seed: seed, seed2: seed2), - to: output_device) - case .TF_EAGER: - return _RawTFEager.randomShuffle(value: value, seed: seed, seed2: seed2) - } - - } - - /// A queue that randomizes the order of elements. - /// - /// - Attrs: - /// - component_types: The type of each component in a value. - /// - shapes: The shape of each component in a value. The length of this attr must - /// be either 0 or the same as the length of component_types. If the length of - /// this attr is 0, the shapes of queue elements are not constrained, and - /// only one element may be dequeued at a time. - /// - capacity: The upper bound on the number of elements in this queue. - /// Negative numbers mean no limit. - /// - min_after_dequeue: Dequeue will block unless there would be this - /// many elements after the dequeue or the queue is closed. This - /// ensures a minimum level of mixing of elements. - /// - seed: If either seed or seed2 is set to be non-zero, the random number - /// generator is seeded by the given seed. Otherwise, a random seed is used. - /// - seed2: A second seed to avoid seed collision. - /// - container: If non-empty, this queue is placed in the given container. - /// Otherwise, a default container is used. - /// - shared_name: If non-empty, this queue will be shared under the given name - /// across multiple sessions. - /// - /// - Output handle: The handle to the queue. - @inlinable @inline(__always) - public static func randomShuffleQueueV2( - componentTypes: [TensorDataType], - shapes: [TensorShape?], - capacity: Int64 = -1, - minAfterDequeue: Int64 = 0, - seed: Int64 = 0, - seed2: Int64 = 0, - container: String, - sharedName: String - ) -> ResourceHandle { - _RawTFEager.randomShuffleQueueV2( - componentTypes: componentTypes, shapes: shapes, capacity: capacity, - minAfterDequeue: minAfterDequeue, seed: seed, seed2: seed2, container: container, - sharedName: sharedName) - } - - /// Outputs random values from a normal distribution. - /// - /// The generated values will have mean 0 and standard deviation 1. - /// - /// - Parameter shape: The shape of the output tensor. - /// - /// - Attrs: - /// - seed: If either `seed` or `seed2` are set to be non-zero, the random number - /// generator is seeded by the given seed. Otherwise, it is seeded by a - /// random seed. - /// - seed2: A second seed to avoid seed collision. - /// - dtype: The type of the output. - /// - /// - Output output: A tensor of the specified shape filled with random normal values. - @inlinable @inline(__always) - public static func randomStandardNormal< - Dtype: FloatingPoint & TensorFlowScalar, - T: TensorFlowIndex - >( - shape: Tensor, - seed: Int64 = 0, - seed2: Int64 = 0 - ) -> Tensor { - switch shape.handle.backend { - case .XLA: - let output_device = shape.device - let shape = Tensor(copying: shape, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.randomStandardNormal(shape: shape, seed: seed, seed2: seed2), - to: output_device) - case .TF_EAGER: - return _RawTFEager.randomStandardNormal(shape: shape, seed: seed, seed2: seed2) - } - - } - - /// Outputs random values from a uniform distribution. - /// - /// The generated values follow a uniform distribution in the range `[0, 1)`. The - /// lower bound 0 is included in the range, while the upper bound 1 is excluded. - /// - /// - Parameter shape: The shape of the output tensor. - /// - /// - Attrs: - /// - seed: If either `seed` or `seed2` are set to be non-zero, the random number - /// generator is seeded by the given seed. Otherwise, it is seeded by a - /// random seed. - /// - seed2: A second seed to avoid seed collision. - /// - dtype: The type of the output. - /// - /// - Output output: A tensor of the specified shape filled with uniform random values. - @inlinable @inline(__always) - public static func randomUniform< - Dtype: FloatingPoint & TensorFlowScalar, - T: TensorFlowIndex - >( - shape: Tensor, - seed: Int64 = 0, - seed2: Int64 = 0 - ) -> Tensor { - switch shape.handle.backend { - case .XLA: - let output_device = shape.device - let shape = Tensor(copying: shape, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.randomUniform(shape: shape, seed: seed, seed2: seed2), - to: output_device) - case .TF_EAGER: - return _RawTFEager.randomUniform(shape: shape, seed: seed, seed2: seed2) - } - - } - - /// Outputs random integers from a uniform distribution. - /// - /// The generated values are uniform integers in the range `[minval, maxval)`. - /// The lower bound `minval` is included in the range, while the upper bound - /// `maxval` is excluded. - /// - /// The random integers are slightly biased unless `maxval - minval` is an exact - /// power of two. The bias is small for values of `maxval - minval` significantly - /// smaller than the range of the output (either `2^32` or `2^64`). - /// - /// - Parameters: - /// - shape: The shape of the output tensor. - /// - minval: 0-D. Inclusive lower bound on the generated integers. - /// - maxval: 0-D. Exclusive upper bound on the generated integers. - /// - /// - Attrs: - /// - seed: If either `seed` or `seed2` are set to be non-zero, the random number - /// generator is seeded by the given seed. Otherwise, it is seeded by a - /// random seed. - /// - seed2: A second seed to avoid seed collision. - /// - /// - Output output: A tensor of the specified shape filled with uniform random integers. - @inlinable @inline(__always) - public static func randomUniformInt< - Tout: TensorFlowIndex, - T: TensorFlowIndex - >( - shape: Tensor, - minval: Tensor, - maxval: Tensor, - seed: Int64 = 0, - seed2: Int64 = 0 - ) -> Tensor { - switch commonBackend( - commonBackend(shape.handle.backend, minval.handle.backend), maxval.handle.backend) - { - case .XLA: - let output_device = maxval.device - let shape = Tensor(copying: shape, to: .defaultTFEager) - let minval = Tensor(copying: minval, to: .defaultTFEager) - let maxval = Tensor(copying: maxval, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.randomUniformInt( - shape: shape, minval: minval, maxval: maxval, seed: seed, seed2: seed2), - to: output_device) - case .TF_EAGER: - return _RawTFEager.randomUniformInt( - shape: shape, minval: minval, maxval: maxval, seed: seed, seed2: seed2) - } - - } - - /// Creates a sequence of numbers. - /// - /// This operation creates a sequence of numbers that begins at `start` and - /// extends by increments of `delta` up to but not including `limit`. - /// - /// For example: - /// - /// ``` - /// # 'start' is 3 - /// # 'limit' is 18 - /// # 'delta' is 3 - /// tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] - /// ``` - /// - /// - Parameters: - /// - start: 0-D (scalar). First entry in the sequence. - /// - limit: 0-D (scalar). Upper limit of sequence, exclusive. - /// - delta: 0-D (scalar). Optional. Default is 1. Number that increments `start`. - /// - /// - Output output: 1-D. - @inlinable @inline(__always) - public static func range( - start: Tensor, - limit: Tensor, - delta: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(start.handle.backend, limit.handle.backend), delta.handle.backend) - { - case .XLA: - return _RawXLA.range(start: start, limit: limit, delta: delta) - case .TF_EAGER: - return _RawTFEager.range(start: start, limit: limit, delta: delta) - } - - } - - /// Creates a dataset with a range of values. Corresponds to python's xrange. - /// - /// - Parameters: - /// - start: corresponds to start in python's xrange(). - /// - stop: corresponds to stop in python's xrange(). - /// - step: corresponds to step in python's xrange(). - @inlinable @inline(__always) - public static func rangeDataset( - start: Tensor, - stop: Tensor, - step: Tensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.rangeDataset( - start: start, stop: stop, step: step, outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Returns the rank of a tensor. - /// - /// This operation returns an integer representing the rank of `input`. - /// - /// For example: - /// - /// ``` - /// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] - /// # shape of tensor 't' is [2, 2, 3] - /// rank(t) ==> 3 - /// ``` - /// - /// **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank - /// of a tensor is the number of indices required to uniquely select each element - /// of the tensor. Rank is also known as "order", "degree", or "ndims." - @inlinable @inline(__always) - public static func rank( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - return _RawXLA.rank(input) - case .TF_EAGER: - return _RawTFEager.rank(input) - } - - } - - /// Reads and outputs the entire contents of the input filename. - @inlinable @inline(__always) - public static func readFile( - filename: StringTensor - ) -> StringTensor { - _RawTFEager.readFile(filename: filename) - } - - /// Reads the value of a variable. - /// - /// The tensor returned by this operation is immutable. - /// - /// The value returned by this operation is guaranteed to be influenced by all the - /// writes on which this operation depends directly or indirectly, and to not be - /// influenced by any of the writes which depend directly or indirectly on this - /// operation. - /// - /// - Parameter resource: handle to the resource in which to store the variable. - /// - /// - Attr dtype: the dtype of the value. - @inlinable @inline(__always) - public static func readVariableOp( - resource: ResourceHandle - ) -> Tensor { - _RawTFEager.readVariableOp(resource: resource) - } - - /// Returns the number of records this Reader has produced. - /// - /// This is the same as the number of ReaderRead executions that have - /// succeeded. - /// - /// - Parameter reader_handle: Handle to a Reader. - @inlinable @inline(__always) - public static func readerNumRecordsProducedV2( - readerHandle: ResourceHandle - ) -> Tensor { - _RawTFEager.readerNumRecordsProducedV2(readerHandle: readerHandle) - } - - /// Returns the number of work units this Reader has finished processing. - /// - /// - Parameter reader_handle: Handle to a Reader. - @inlinable @inline(__always) - public static func readerNumWorkUnitsCompletedV2( - readerHandle: ResourceHandle - ) -> Tensor { - _RawTFEager.readerNumWorkUnitsCompletedV2(readerHandle: readerHandle) - } - - /// Returns up to `num_records` (key, value) pairs produced by a Reader. - /// - /// Will dequeue from the input queue if necessary (e.g. when the - /// Reader needs to start reading from a new file since it has finished - /// with the previous file). - /// It may return less than `num_records` even before the last batch. - /// - /// - Parameters: - /// - reader_handle: Handle to a `Reader`. - /// - queue_handle: Handle to a `Queue`, with string work items. - /// - num_records: number of records to read from `Reader`. - /// - /// - Outputs: - /// - keys: A 1-D tensor. - /// - values: A 1-D tensor. - @inlinable @inline(__always) - public static func readerReadUpToV2( - readerHandle: ResourceHandle, - queueHandle: ResourceHandle, - numRecords: Tensor - ) -> (keys: StringTensor, values: StringTensor) { - _RawTFEager.readerReadUpToV2( - readerHandle: readerHandle, queueHandle: queueHandle, numRecords: numRecords) - } - - /// Returns the next record (key, value pair) produced by a Reader. - /// - /// Will dequeue from the input queue if necessary (e.g. when the - /// Reader needs to start reading from a new file since it has finished - /// with the previous file). - /// - /// - Parameters: - /// - reader_handle: Handle to a Reader. - /// - queue_handle: Handle to a Queue, with string work items. - /// - /// - Outputs: - /// - key: A scalar. - /// - value: A scalar. - @inlinable @inline(__always) - public static func readerReadV2( - readerHandle: ResourceHandle, - queueHandle: ResourceHandle - ) -> (key: StringTensor, value: StringTensor) { - _RawTFEager.readerReadV2(readerHandle: readerHandle, queueHandle: queueHandle) - } - - /// Restore a Reader to its initial clean state. - /// - /// - Parameter reader_handle: Handle to a Reader. - @inlinable @inline(__always) - public static func readerResetV2( - readerHandle: ResourceHandle - ) { - _RawTFEager.readerResetV2(readerHandle: readerHandle) - } - - /// Restore a reader to a previously saved state. - /// - /// Not all Readers support being restored, so this can produce an - /// Unimplemented error. - /// - /// - Parameters: - /// - reader_handle: Handle to a Reader. - /// - state: Result of a ReaderSerializeState of a Reader with type - /// matching reader_handle. - @inlinable @inline(__always) - public static func readerRestoreStateV2( - readerHandle: ResourceHandle, - state: StringTensor - ) { - _RawTFEager.readerRestoreStateV2(readerHandle: readerHandle, state: state) - } - - /// Produce a string tensor that encodes the state of a Reader. - /// - /// Not all Readers support being serialized, so this can produce an - /// Unimplemented error. - /// - /// - Parameter reader_handle: Handle to a Reader. - @inlinable @inline(__always) - public static func readerSerializeStateV2( - readerHandle: ResourceHandle - ) -> StringTensor { - _RawTFEager.readerSerializeStateV2(readerHandle: readerHandle) - } - - /// Returns the real part of a complex number. - /// - /// Given a tensor `input` of complex numbers, this operation returns a tensor of - /// type `float` that is the real part of each element in `input`. All elements in - /// `input` must be complex numbers of the form \\(a + bj\\), where *a* is the real - /// part returned by this operation and *b* is the imaginary part. - /// - /// For example: - /// - /// ``` - /// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] - /// tf.real(input) ==> [-2.25, 3.25] - /// ``` - @inlinable @inline(__always) - public static func real< - T: TensorFlowScalar, - Tout: FloatingPoint & TensorFlowScalar - >( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.real(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.real(input) - } - - } - - /// Returns x / y element-wise for real types. - /// - /// If `x` and `y` are reals, this will return the floating-point division. - /// - /// *NOTE*: `Div` supports broadcasting. More about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - @inlinable @inline(__always) - public static func realDiv( - _ x: Tensor, - _ y: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - let output_device = y.device - let x = Tensor(copying: x, to: .defaultTFEager) - let y = Tensor(copying: y, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.realDiv(x, y), to: output_device) - case .TF_EAGER: - return _RawTFEager.realDiv(x, y) - } - - } - - /// Creates a dataset that changes the batch size. - /// - /// Creates a dataset that changes the batch size of the dataset to current batch - /// size // num_workers. - /// - /// - Parameters: - /// - input_dataset: A variant tensor representing the input dataset. - /// - num_replicas: A scalar representing the number of replicas to distribute this batch across. As - /// a result of this transformation the current batch size would end up being - /// divided by this parameter. - @inlinable @inline(__always) - public static func rebatchDataset( - inputDataset: VariantHandle, - numReplicas: Tensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?], - useFallback: Bool = true - ) -> VariantHandle { - _RawTFEager.rebatchDataset( - inputDataset: inputDataset, numReplicas: numReplicas, outputTypes: outputTypes, - outputShapes: outputShapes, useFallback: useFallback) - } - - /// Computes the reciprocal of x element-wise. - /// - /// I.e., \\(y = 1 / x\\). - @inlinable @inline(__always) - public static func reciprocal( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - let output_device = x.device - let x = Tensor(copying: x, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.reciprocal(x), to: output_device) - case .TF_EAGER: - return _RawTFEager.reciprocal(x) - } - - } - - /// Computes the gradient for the inverse of `x` wrt its input. - /// - /// Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy` - /// is the corresponding input gradient. - @inlinable @inline(__always) - public static func reciprocalGrad( - _ y: Tensor, - dy: Tensor - ) -> Tensor { - switch commonBackend(y.handle.backend, dy.handle.backend) { - case .XLA: - let output_device = dy.device - let y = Tensor(copying: y, to: .defaultTFEager) - let dy = Tensor(copying: dy, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.reciprocalGrad(y, dy: dy), to: output_device) - case .TF_EAGER: - return _RawTFEager.reciprocalGrad(y, dy: dy) - } - - } - - /// Emits randomized records. - /// - /// - Attrs: - /// - file_pattern: Glob pattern for the data files. - /// - file_random_seed: Random seeds used to produce randomized records. - /// - file_shuffle_shift_ratio: Shifts the list of files after the list is randomly - /// shuffled. - /// - file_buffer_size: The randomization shuffling buffer. - /// - file_parallelism: How many sstables are opened and concurrently iterated over. - /// - batch_size: The batch size. - /// - compression_type: The type of compression for the file. Currently ZLIB and - /// GZIP are supported. Defaults to none. - /// - /// - Output records: A tensor of shape [batch_size]. - @inlinable @inline(__always) - public static func recordInput( - filePattern: String, - fileRandomSeed: Int64 = 301, - fileShuffleShiftRatio: Double = 0, - fileBufferSize: Int64 = 10000, - fileParallelism: Int64 = 16, - batchSize: Int64 = 32, - compressionType: String - ) -> StringTensor { - _RawTFEager.recordInput( - filePattern: filePattern, fileRandomSeed: fileRandomSeed, - fileShuffleShiftRatio: fileShuffleShiftRatio, fileBufferSize: fileBufferSize, - fileParallelism: fileParallelism, batchSize: batchSize, compressionType: compressionType) - } - - /// Receives the named tensor from send_device on recv_device. - /// - /// - Attrs: - /// - tensor_name: The name of the tensor to receive. - /// - send_device: The name of the device sending the tensor. - /// - send_device_incarnation: The current incarnation of send_device. - /// - recv_device: The name of the device receiving the tensor. - /// - client_terminated: If set to true, this indicates that the node was added - /// to the graph as a result of a client-side feed or fetch of Tensor data, - /// in which case the corresponding send or recv is expected to be managed - /// locally by the caller. - /// - /// - Output tensor: The tensor to receive. - @inlinable @inline(__always) - public static func recv( - tensorName: String, - sendDevice: String, - sendDeviceIncarnation: Int64, - recvDevice: String, - clientTerminated: Bool = false - ) -> Tensor { - _RawTFEager.recv( - tensorName: tensorName, sendDevice: sendDevice, - sendDeviceIncarnation: sendDeviceIncarnation, recvDevice: recvDevice, - clientTerminated: clientTerminated) - } - - /// An op that receives embedding activations on the TPU. - /// - /// The TPU system performs the embedding lookups and aggregations specified by - /// the arguments to TPUEmbeddingEnqueue(Integer/Sparse/SparseTensor)Batch. The - /// results of these aggregations are visible to the Tensorflow Graph as the - /// outputs of a RecvTPUEmbeddingActivations op. This op returns a list containing - /// one Tensor of activations per table specified in the model. There can be at - /// most one RecvTPUEmbeddingActivations op in the TPU graph. - /// - /// - Attrs: - /// - num_outputs: The number of output activation tensors, equal to the number of - /// embedding tables in the model. - /// - config: Serialized TPUEmbeddingConfiguration proto. - /// - /// - Output outputs: A TensorList of embedding activations containing one Tensor per - /// embedding table in the model. - @inlinable @inline(__always) - public static func recvTPUEmbeddingActivations( - numOutputs: Int64, - config: String - ) -> [Tensor] { - _RawTFEager.recvTPUEmbeddingActivations(numOutputs: numOutputs, config: config) - } - - /// Reduces the input dataset to a singleton using a reduce function. - /// - /// - Parameters: - /// - input_dataset: A variant tensor representing the input dataset. - /// - initial_state: A nested structure of tensors, representing the initial state of the - /// transformation. - /// - /// - Attr f: A function that maps `(old_state, input_element)` to `new_state`. It must take - /// two arguments and return a nested structures of tensors. The structure of - /// `new_state` must match the structure of `initial_state`. - @inlinable @inline(__always) - public static func reduceDataset< - FIn: TensorGroup, - FOut: TensorGroup, - Tstate: TensorArrayProtocol, - Targuments: TensorArrayProtocol, - OutputTypes: TensorGroup - >( - inputDataset: VariantHandle, - initialState: Tstate, - otherArguments: Targuments, - f: (FIn) -> FOut, - outputShapes: [TensorShape?], - useInterOpParallelism: Bool = true - ) -> OutputTypes { - _RawTFEager.reduceDataset( - inputDataset: inputDataset, initialState: initialState, otherArguments: otherArguments, - f: f, outputShapes: outputShapes, useInterOpParallelism: useInterOpParallelism) - } - - /// Joins a string Tensor across the given dimensions. - /// - /// Computes the string join across dimensions in the given string Tensor of shape - /// `[\\(d_0, d_1, ..., d_{n-1}\\)]`. Returns a new Tensor created by joining the input - /// strings with the given separator (default: empty string). Negative indices are - /// counted backwards from the end, with `-1` being equivalent to `n - 1`. If - /// indices are not specified, joins across all dimensions beginning from `n - 1` - /// through `0`. - /// - /// For example: - /// - /// ```python - /// # tensor `a` is [["a", "b"], ["c", "d"]] - /// tf.reduce_join(a, 0) ==> ["ac", "bd"] - /// tf.reduce_join(a, 1) ==> ["ab", "cd"] - /// tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"] - /// tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"] - /// tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]] - /// tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]] - /// tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"] - /// tf.reduce_join(a, [0, 1]) ==> "acbd" - /// tf.reduce_join(a, [1, 0]) ==> "abcd" - /// tf.reduce_join(a, []) ==> [["a", "b"], ["c", "d"]] - /// tf.reduce_join(a) = tf.reduce_join(a, [1, 0]) ==> "abcd" - /// ``` - /// - /// - Parameters: - /// - inputs: The input to be joined. All reduced indices must have non-zero size. - /// - reduction_indices: The dimensions to reduce over. Dimensions are reduced in the - /// order specified. Omitting `reduction_indices` is equivalent to passing - /// `[n-1, n-2, ..., 0]`. Negative indices from `-n` to `-1` are supported. - /// - /// - Attrs: - /// - keep_dims: If `True`, retain reduced dimensions with length `1`. - /// - separator: The separator to use when joining. - /// - /// - Output output: Has shape equal to that of the input with reduced dimensions removed or - /// set to `1` depending on `keep_dims`. - @inlinable @inline(__always) - public static func reduceJoin( - inputs: StringTensor, - reductionIndices: Tensor, - keepDims: Bool = false, - separator: String - ) -> StringTensor { - _RawTFEager.reduceJoin( - inputs: inputs, reductionIndices: reductionIndices, keepDims: keepDims, separator: separator - ) - } + } - /// Check if the input matches the regex pattern. - /// - /// The input is a string tensor of any shape. The pattern is a scalar - /// string tensor which is applied to every element of the input tensor. - /// The boolean values (True or False) of the output tensor indicate - /// if the input matches the regex pattern provided. - /// - /// The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) - /// - /// Examples: - /// - /// >>> tf.strings.regex_full_match(["TF lib", "lib TF"], ".*lib$") - /// - /// >>> tf.strings.regex_full_match(["TF lib", "lib TF"], ".*TF$") - /// - /// - /// - Parameters: - /// - input: A string tensor of the text to be processed. - /// - pattern: A scalar string tensor containing the regular expression to match the input. - /// - /// - Output output: A bool tensor with the same shape as `input`. - @inlinable @inline(__always) - public static func regexFullMatch( - _ input: StringTensor, - pattern: StringTensor - ) -> Tensor { - _RawTFEager.regexFullMatch(input, pattern: pattern) - } - - /// Replaces matches of the `pattern` regular expression in `input` with the - /// replacement string provided in `rewrite`. - /// - /// It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) - /// - /// - Parameters: - /// - input: The text to be processed. - /// - pattern: The regular expression to be matched in the `input` strings. - /// - rewrite: The rewrite string to be substituted for the `pattern` expression where it is - /// matched in the `input` strings. - /// - /// - Attr replace_global: If True, the replacement is global (that is, all matches of the `pattern` regular - /// expression in each input string are rewritten), otherwise the `rewrite` - /// substitution is only made for the first `pattern` match. - /// - /// - Output output: The text after applying pattern match and rewrite substitution. - @inlinable @inline(__always) - public static func regexReplace( - _ input: StringTensor, - pattern: StringTensor, - rewrite: StringTensor, - replaceGlobal: Bool = true - ) -> StringTensor { - _RawTFEager.regexReplace( - input, pattern: pattern, rewrite: rewrite, replaceGlobal: replaceGlobal) - } - - /// Computes rectified linear: `max(features, 0)`. - /// - /// See: https://en.wikipedia.org/wiki/Rectifier_(neural_networks) - /// Example usage: - /// >>> tf.nn.relu([-2., 0., -0., 3.]).numpy() - /// array([ 0., 0., -0., 3.], dtype=float32) - @inlinable @inline(__always) - public static func relu( - features: Tensor - ) -> Tensor { - switch features.handle.backend { - case .XLA: - return _RawXLA.relu(features: features) - case .TF_EAGER: - return _RawTFEager.relu(features: features) - } - - } - - /// Computes rectified linear 6: `min(max(features, 0), 6)`. - @inlinable @inline(__always) - public static func relu6( - features: Tensor - ) -> Tensor { - switch features.handle.backend { - case .XLA: - return _RawXLA.relu6(features: features) - case .TF_EAGER: - return _RawTFEager.relu6(features: features) - } - - } - - /// Computes rectified linear 6 gradients for a Relu6 operation. - /// - /// - Parameters: - /// - gradients: The backpropagated gradients to the corresponding Relu6 operation. - /// - features: The features passed as input to the corresponding Relu6 operation, or - /// its output; using either one produces the same result. - /// - /// - Output backprops: The gradients: - /// `gradients * (features > 0) * (features < 6)`. - @inlinable @inline(__always) - public static func relu6Grad( - gradients: Tensor, - features: Tensor - ) -> Tensor { - switch commonBackend(gradients.handle.backend, features.handle.backend) { - case .XLA: - return _RawXLA.relu6Grad(gradients: gradients, features: features) - case .TF_EAGER: - return _RawTFEager.relu6Grad(gradients: gradients, features: features) - } - - } - - /// Computes rectified linear gradients for a Relu operation. - /// - /// - Parameters: - /// - gradients: The backpropagated gradients to the corresponding Relu operation. - /// - features: The features passed as input to the corresponding Relu operation, OR - /// the outputs of that operation (both work equivalently). - /// - /// - Output backprops: `gradients * (features > 0)`. - @inlinable @inline(__always) - public static func reluGrad( - gradients: Tensor, - features: Tensor - ) -> Tensor { - switch commonBackend(gradients.handle.backend, features.handle.backend) { - case .XLA: - return _RawXLA.reluGrad(gradients: gradients, features: features) - case .TF_EAGER: - return _RawTFEager.reluGrad(gradients: gradients, features: features) - } - - } - - /// Runs function `f` on a remote device indicated by `target`. - /// - /// - Parameters: - /// - target: A fully specified device name where we want to run the function. - /// - args: A list of arguments for the function. - /// - /// - Attrs: - /// - Tin: The type list for the arguments. - /// - Tout: The type list for the return values. - /// - f: The function to run remotely. - /// - /// - Output output: A list of return values. - @inlinable @inline(__always) - public static func remoteCall< - Tin: TensorArrayProtocol, - Tout: TensorGroup, - FIn: TensorGroup, - FOut: TensorGroup - >( - target: StringTensor, - args: Tin, - f: (FIn) -> FOut - ) -> Tout { - _RawTFEager.remoteCall(target: target, args: args, f: f) - } - - /// Execute a sub graph on a remote processor. - /// - /// The graph specifications(such as graph itself, input tensors and output names) - /// are stored as a serialized protocol buffer of RemoteFusedGraphExecuteInfo - /// as serialized_remote_fused_graph_execute_info. - /// The specifications will be passed to a dedicated registered - /// remote fused graph executor. The executor will send the graph specifications - /// to a remote processor and execute that graph. The execution results - /// will be passed to consumer nodes as outputs of this node. - /// - /// - Parameter inputs: Arbitrary number of tensors with arbitrary data types - /// - /// - Attr serialized_remote_fused_graph_execute_info: Serialized protocol buffer - /// of RemoteFusedGraphExecuteInfo which contains graph specifications. - /// - /// - Output outputs: Arbitrary number of tensors with arbitrary data types - @inlinable @inline(__always) - public static func remoteFusedGraphExecute< - Tinputs: TensorArrayProtocol, - Toutputs: TensorGroup - >( - inputs: Tinputs, - serializedRemoteFusedGraphExecuteInfo: String - ) -> Toutputs { - _RawTFEager.remoteFusedGraphExecute( - inputs: inputs, serializedRemoteFusedGraphExecuteInfo: serializedRemoteFusedGraphExecuteInfo - ) + /// An op which supports basic einsum op with 2 inputs and 1 output. + /// + /// This op has better TPU performnce since it doesn't have explicitly reshape and + /// transpose operations as tf.einsum does. + @inlinable @inline(__always) + public static func xlaEinsum( + _ a: Tensor, + _ b: Tensor, + equation: String + ) -> Tensor { + switch commonBackend(a.handle.backend, b.handle.backend) { + case .XLA: + let output_device = b.device + let a = Tensor(copying: a, to: .defaultTFEager) + let b = Tensor(copying: b, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.xlaEinsum(a, b, equation: equation), to: output_device) + case .TF_EAGER: + return _RawTFEager.xlaEinsum(a, b, equation: equation) } - /// Creates a dataset that emits the outputs of `input_dataset` `count` times. - /// - /// - Parameter count: A scalar representing the number of times that `input_dataset` should - /// be repeated. A value of `-1` indicates that it should be repeated infinitely. - @inlinable @inline(__always) - public static func repeatDataset( - inputDataset: VariantHandle, - count: Tensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.repeatDataset( - inputDataset: inputDataset, count: count, outputTypes: outputTypes, - outputShapes: outputShapes) - } - - /// Computes a range that covers the actual values present in a quantized tensor. - /// - /// Given a quantized tensor described by `(input, input_min, input_max)`, outputs a - /// range that covers the actual values present in that tensor. This op is typically - /// used to produce the `requested_output_min` and `requested_output_max` for - /// `Requantize`. - /// - /// - Parameters: - /// - input_min: The float value that the minimum quantized input value represents. - /// - input_max: The float value that the maximum quantized input value represents. - /// - /// - Attr Tinput: The type of the input. - /// - /// - Outputs: - /// - output_min: The computed min output. - /// - output_max: the computed max output. - @inlinable @inline(__always) - public static func requantizationRange( - _ input: Tensor, - inputMin: Tensor, - inputMax: Tensor - ) -> (outputMin: Tensor, outputMax: Tensor) { - _RawTFEager.requantizationRange(input, inputMin: inputMin, inputMax: inputMax) - } - - /// Computes requantization range per channel. - /// - /// - Parameters: - /// - input: The original input tensor. - /// - input_min: The minimum value of the input tensor - /// - input_max: The maximum value of the input tensor. - /// - /// - Attrs: - /// - T: The quantized type of input tensor that needs to be converted. - /// - clip_value_max: The maximum value of the output that needs to be clipped. - /// Example: set this to 6 for Relu6. - /// - /// - Outputs: - /// - output_min: The minimum value of the final output tensor - /// - output_max: The maximum value of the final output tensor. - @inlinable @inline(__always) - public static func requantizationRangePerChannel( - _ input: Tensor, - inputMin: Tensor, - inputMax: Tensor, - clipValueMax: Double - ) -> (outputMin: Tensor, outputMax: Tensor) { - _RawTFEager.requantizationRangePerChannel( - input, inputMin: inputMin, inputMax: inputMax, clipValueMax: clipValueMax) - } - - /// Converts the quantized `input` tensor into a lower-precision `output`. - /// - /// Converts the quantized `input` tensor into a lower-precision `output`, using the - /// output range specified with `requested_output_min` and `requested_output_max`. - /// - /// `[input_min, input_max]` are scalar floats that specify the range for the float - /// interpretation of the `input` data. For example, if `input_min` is -1.0f and - /// `input_max` is 1.0f, and we are dealing with `quint16` quantized data, then a 0 - /// value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. - /// - /// - Parameters: - /// - input_min: The float value that the minimum quantized input value represents. - /// - input_max: The float value that the maximum quantized input value represents. - /// - requested_output_min: The float value that the minimum quantized output value represents. - /// - requested_output_max: The float value that the maximum quantized output value represents. - /// - /// - Attrs: - /// - Tinput: The type of the input. - /// - out_type: The type of the output. Should be a lower bit depth than Tinput. - /// - /// - Outputs: - /// - output_min: The requested_output_min value is copied into this output. - /// - output_max: The requested_output_max value is copied into this output. - @inlinable @inline(__always) - public static func requantize< - Tinput: TensorFlowScalar, - OutType: TensorFlowScalar - >( - _ input: Tensor, - inputMin: Tensor, - inputMax: Tensor, - requestedOutputMin: Tensor, - requestedOutputMax: Tensor - ) -> (output: Tensor, outputMin: Tensor, outputMax: Tensor) { - _RawTFEager.requantize( - input, inputMin: inputMin, inputMax: inputMax, requestedOutputMin: requestedOutputMin, - requestedOutputMax: requestedOutputMax) - } - - /// Requantizes input with min and max values known per channel. - /// - /// - Parameters: - /// - input: The original input tensor. - /// - input_min: The minimum value of the input tensor - /// - input_max: The maximum value of the input tensor. - /// - requested_output_min: The minimum value of the output tensor requested. - /// - requested_output_max: The maximum value of the output tensor requested. - /// - /// - Attrs: - /// - T: The quantized type of input tensor that needs to be converted. - /// - out_type: The quantized type of output tensor that needs to be converted. - /// - /// - Outputs: - /// - output: Output tensor. - /// - output_min: The minimum value of the final output tensor - /// - output_max: The maximum value of the final output tensor. - @inlinable @inline(__always) - public static func requantizePerChannel< - T: TensorFlowScalar, - OutType: TensorFlowScalar - >( - _ input: Tensor, - inputMin: Tensor, - inputMax: Tensor, - requestedOutputMin: Tensor, - requestedOutputMax: Tensor - ) -> (output: Tensor, outputMin: Tensor, outputMax: Tensor) { - _RawTFEager.requantizePerChannel( - input, inputMin: inputMin, inputMax: inputMax, requestedOutputMin: requestedOutputMin, - requestedOutputMax: requestedOutputMax) - } - - @inlinable @inline(__always) - public static func requiresOlderGraphVersion() -> Tensor { - _RawTFEager.requiresOlderGraphVersion() - } - - @inlinable @inline(__always) - public static func reservedAttr( - range: Int64 - ) { - _RawTFEager.reservedAttr(range: range) - } + } - @inlinable @inline(__always) - public static func reservedInput( - _ input: Tensor - ) { - _RawTFEager.reservedInput(input) - } - - /// Reshapes a tensor. - /// - /// Given `tensor`, this operation returns a tensor that has the same values - /// as `tensor` with shape `shape`. - /// - /// If one component of 1-D tensor `shape` is the special value -1, the size of that - /// dimension is computed so that the total size remains constant. In particular, a - /// `shape` of `[-1]` flattens into 1-D. At most one component of `shape` may be - /// unknown. - /// - /// The `shape` must be 1-D and the operation returns a tensor with shape - /// `shape` filled with the values of `tensor`. In this case, the number of elements - /// implied by `shape` must be the same as the number of elements in `tensor`. - /// - /// It is an error if `shape` is not 1-D. - /// - /// For example: - /// - /// ``` - /// # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9] - /// # tensor 't' has shape [9] - /// reshape(t, [3, 3]) ==> [[1, 2, 3], - /// [4, 5, 6], - /// [7, 8, 9]] - /// - /// # tensor 't' is [[[1, 1], [2, 2]], - /// # [[3, 3], [4, 4]]] - /// # tensor 't' has shape [2, 2, 2] - /// reshape(t, [2, 4]) ==> [[1, 1, 2, 2], - /// [3, 3, 4, 4]] - /// - /// # tensor 't' is [[[1, 1, 1], - /// # [2, 2, 2]], - /// # [[3, 3, 3], - /// # [4, 4, 4]], - /// # [[5, 5, 5], - /// # [6, 6, 6]]] - /// # tensor 't' has shape [3, 2, 3] - /// # pass '[-1]' to flatten 't' - /// reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6] - /// - /// # -1 can also be used to infer the shape - /// - /// # -1 is inferred to be 9: - /// reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], - /// [4, 4, 4, 5, 5, 5, 6, 6, 6]] - /// # -1 is inferred to be 2: - /// reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], - /// [4, 4, 4, 5, 5, 5, 6, 6, 6]] - /// # -1 is inferred to be 3: - /// reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1], - /// [2, 2, 2], - /// [3, 3, 3]], - /// [[4, 4, 4], - /// [5, 5, 5], - /// [6, 6, 6]]] - /// - /// # tensor 't' is [7] - /// # shape `[]` reshapes to a scalar - /// reshape(t, []) ==> 7 - /// ``` - /// - /// - Parameter shape: Defines the shape of the output tensor. - @inlinable @inline(__always) - public static func reshape< - T: TensorFlowScalar, - Tshape: TensorFlowIndex - >( - _ tensor: Tensor, - shape: Tensor - ) -> Tensor { - switch commonBackend(tensor.handle.backend, shape.handle.backend) { - case .XLA: - return _RawXLA.reshape(tensor, shape: shape) - case .TF_EAGER: - return _RawTFEager.reshape(tensor, shape: shape) - } - - } - - /// Resize `images` to `size` using area interpolation. - /// - /// Input images can be of different types but output images are always float. - /// - /// The range of pixel values for the output image might be slightly different - /// from the range for the input image because of limited numerical precision. - /// To guarantee an output range, for example `[0.0, 1.0]`, apply - /// `tf.clip_by_value` to the output. - /// - /// Each output pixel is computed by first transforming the pixel's footprint into - /// the input tensor and then averaging the pixels that intersect the footprint. An - /// input pixel's contribution to the average is weighted by the fraction of its - /// area that intersects the footprint. This is the same as OpenCV's INTER_AREA. - /// - /// - Parameters: - /// - images: 4-D with shape `[batch, height, width, channels]`. - /// - size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The - /// new size for the images. - /// - /// - Attr align_corners: If true, the centers of the 4 corner pixels of the input and output tensors are - /// aligned, preserving the values at the corner pixels. Defaults to false. - /// - /// - Output resized_images: 4-D with shape - /// `[batch, new_height, new_width, channels]`. - @inlinable @inline(__always) - public static func resizeArea( - images: Tensor, - size: Tensor, - alignCorners: Bool = false - ) -> Tensor { - switch commonBackend(images.handle.backend, size.handle.backend) { - case .XLA: - let output_device = size.device - let images = Tensor(copying: images, to: .defaultTFEager) - let size = Tensor(copying: size, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.resizeArea(images: images, size: size, alignCorners: alignCorners), - to: output_device) - case .TF_EAGER: - return _RawTFEager.resizeArea(images: images, size: size, alignCorners: alignCorners) - } - - } - - /// Resize `images` to `size` using bicubic interpolation. - /// - /// Input images can be of different types but output images are always float. - /// - /// - Parameters: - /// - images: 4-D with shape `[batch, height, width, channels]`. - /// - size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The - /// new size for the images. - /// - /// - Attr align_corners: If true, the centers of the 4 corner pixels of the input and output tensors are - /// aligned, preserving the values at the corner pixels. Defaults to false. - /// - /// - Output resized_images: 4-D with shape - /// `[batch, new_height, new_width, channels]`. - @inlinable @inline(__always) - public static func resizeBicubic( - images: Tensor, - size: Tensor, - alignCorners: Bool = false, - halfPixelCenters: Bool = false - ) -> Tensor { - switch commonBackend(images.handle.backend, size.handle.backend) { - case .XLA: - let output_device = size.device - let images = Tensor(copying: images, to: .defaultTFEager) - let size = Tensor(copying: size, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.resizeBicubic( - images: images, size: size, alignCorners: alignCorners, - halfPixelCenters: halfPixelCenters), to: output_device) - case .TF_EAGER: - return _RawTFEager.resizeBicubic( - images: images, size: size, alignCorners: alignCorners, halfPixelCenters: halfPixelCenters - ) - } - - } - - /// Computes the gradient of bicubic interpolation. - /// - /// - Parameters: - /// - grads: 4-D with shape `[batch, height, width, channels]`. - /// - original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`, - /// The image tensor that was resized. - /// - /// - Attr align_corners: If true, the centers of the 4 corner pixels of the input and grad tensors are - /// aligned. Defaults to false. - /// - /// - Output output: 4-D with shape `[batch, orig_height, orig_width, channels]`. - /// Gradients with respect to the input image. Input image must have been - /// float or double. - @inlinable @inline(__always) - public static func resizeBicubicGrad( - grads: Tensor, - originalImage: Tensor, - alignCorners: Bool = false, - halfPixelCenters: Bool = false - ) -> Tensor { - switch commonBackend(grads.handle.backend, originalImage.handle.backend) { - case .XLA: - let output_device = originalImage.device - let grads = Tensor(copying: grads, to: .defaultTFEager) - let originalImage = Tensor(copying: originalImage, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.resizeBicubicGrad( - grads: grads, originalImage: originalImage, alignCorners: alignCorners, - halfPixelCenters: halfPixelCenters), to: output_device) - case .TF_EAGER: - return _RawTFEager.resizeBicubicGrad( - grads: grads, originalImage: originalImage, alignCorners: alignCorners, - halfPixelCenters: halfPixelCenters) - } - - } - - /// Resize `images` to `size` using bilinear interpolation. - /// - /// Input images can be of different types but output images are always float. - /// - /// - Parameters: - /// - images: 4-D with shape `[batch, height, width, channels]`. - /// - size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The - /// new size for the images. - /// - /// - Attr align_corners: If true, the centers of the 4 corner pixels of the input and output tensors are - /// aligned, preserving the values at the corner pixels. Defaults to false. - /// - /// - Output resized_images: 4-D with shape - /// `[batch, new_height, new_width, channels]`. - @inlinable @inline(__always) - public static func resizeBilinear( - images: Tensor, - size: Tensor, - alignCorners: Bool = false, - halfPixelCenters: Bool = false - ) -> Tensor { - switch commonBackend(images.handle.backend, size.handle.backend) { - case .XLA: - let output_device = size.device - let images = Tensor(copying: images, to: .defaultTFEager) - let size = Tensor(copying: size, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.resizeBilinear( - images: images, size: size, alignCorners: alignCorners, - halfPixelCenters: halfPixelCenters), to: output_device) - case .TF_EAGER: - return _RawTFEager.resizeBilinear( - images: images, size: size, alignCorners: alignCorners, halfPixelCenters: halfPixelCenters - ) - } - - } - - /// Computes the gradient of bilinear interpolation. - /// - /// - Parameters: - /// - grads: 4-D with shape `[batch, height, width, channels]`. - /// - original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`, - /// The image tensor that was resized. - /// - /// - Attr align_corners: If true, the centers of the 4 corner pixels of the input and grad tensors are - /// aligned. Defaults to false. - /// - /// - Output output: 4-D with shape `[batch, orig_height, orig_width, channels]`. - /// Gradients with respect to the input image. Input image must have been - /// float or double. - @inlinable @inline(__always) - public static func resizeBilinearGrad( - grads: Tensor, - originalImage: Tensor, - alignCorners: Bool = false, - halfPixelCenters: Bool = false - ) -> Tensor { - switch commonBackend(grads.handle.backend, originalImage.handle.backend) { - case .XLA: - let output_device = originalImage.device - let grads = Tensor(copying: grads, to: .defaultTFEager) - let originalImage = Tensor(copying: originalImage, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.resizeBilinearGrad( - grads: grads, originalImage: originalImage, alignCorners: alignCorners, - halfPixelCenters: halfPixelCenters), to: output_device) - case .TF_EAGER: - return _RawTFEager.resizeBilinearGrad( - grads: grads, originalImage: originalImage, alignCorners: alignCorners, - halfPixelCenters: halfPixelCenters) - } - - } - - /// Resize `images` to `size` using nearest neighbor interpolation. - /// - /// - Parameters: - /// - images: 4-D with shape `[batch, height, width, channels]`. - /// - size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The - /// new size for the images. - /// - /// - Attr align_corners: If true, the centers of the 4 corner pixels of the input and output tensors are - /// aligned, preserving the values at the corner pixels. Defaults to false. - /// - /// - Output resized_images: 4-D with shape - /// `[batch, new_height, new_width, channels]`. - @inlinable @inline(__always) - public static func resizeNearestNeighbor( - images: Tensor, - size: Tensor, - alignCorners: Bool = false, - halfPixelCenters: Bool = false - ) -> Tensor { - switch commonBackend(images.handle.backend, size.handle.backend) { - case .XLA: - let output_device = size.device - let images = Tensor(copying: images, to: .defaultTFEager) - let size = Tensor(copying: size, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.resizeNearestNeighbor( - images: images, size: size, alignCorners: alignCorners, - halfPixelCenters: halfPixelCenters), to: output_device) - case .TF_EAGER: - return _RawTFEager.resizeNearestNeighbor( - images: images, size: size, alignCorners: alignCorners, halfPixelCenters: halfPixelCenters - ) - } - - } - - /// Computes the gradient of nearest neighbor interpolation. - /// - /// - Parameters: - /// - grads: 4-D with shape `[batch, height, width, channels]`. - /// - size: = A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The - /// original input size. - /// - /// - Attr align_corners: If true, the centers of the 4 corner pixels of the input and grad tensors are - /// aligned. Defaults to false. - /// - /// - Output output: 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients - /// with respect to the input image. - @inlinable @inline(__always) - public static func resizeNearestNeighborGrad( - grads: Tensor, - size: Tensor, - alignCorners: Bool = false, - halfPixelCenters: Bool = false - ) -> Tensor { - switch commonBackend(grads.handle.backend, size.handle.backend) { - case .XLA: - let output_device = size.device - let grads = Tensor(copying: grads, to: .defaultTFEager) - let size = Tensor(copying: size, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.resizeNearestNeighborGrad( - grads: grads, size: size, alignCorners: alignCorners, halfPixelCenters: halfPixelCenters - ), to: output_device) - case .TF_EAGER: - return _RawTFEager.resizeNearestNeighborGrad( - grads: grads, size: size, alignCorners: alignCorners, halfPixelCenters: halfPixelCenters) - } - - } - - /// Applies a gradient to a given accumulator. - /// - /// Does not add if local_step is lesser than the accumulator's global_step. - /// - /// - Parameters: - /// - handle: The handle to a accumulator. - /// - local_step: The local_step value at which the gradient was computed. - /// - gradient: A tensor of the gradient to be accumulated. - /// - /// - Attr dtype: The data type of accumulated gradients. Needs to correspond to the type - /// of the accumulator. - @inlinable @inline(__always) - public static func resourceAccumulatorApplyGradient( - handle: ResourceHandle, - localStep: Tensor, - gradient: Tensor - ) { - _RawTFEager.resourceAccumulatorApplyGradient( - handle: handle, localStep: localStep, gradient: gradient) - } - - /// Returns the number of gradients aggregated in the given accumulators. - /// - /// - Parameter handle: The handle to an accumulator. - /// - /// - Output num_accumulated: The number of gradients aggregated in the given accumulator. - @inlinable @inline(__always) - public static func resourceAccumulatorNumAccumulated( - handle: ResourceHandle - ) -> Tensor { - _RawTFEager.resourceAccumulatorNumAccumulated(handle: handle) - } - - /// Updates the accumulator with a new value for global_step. - /// - /// Logs warning if the accumulator's value is already higher than - /// new_global_step. - /// - /// - Parameters: - /// - handle: The handle to an accumulator. - /// - new_global_step: The new global_step value to set. - @inlinable @inline(__always) - public static func resourceAccumulatorSetGlobalStep( - handle: ResourceHandle, - newGlobalStep: Tensor - ) { - _RawTFEager.resourceAccumulatorSetGlobalStep(handle: handle, newGlobalStep: newGlobalStep) - } - - /// Extracts the average gradient in the given ConditionalAccumulator. - /// - /// The op blocks until sufficient (i.e., more than num_required) - /// gradients have been accumulated. If the accumulator has already - /// aggregated more than num_required gradients, it returns the average of - /// the accumulated gradients. Also automatically increments the recorded - /// global_step in the accumulator by 1, and resets the aggregate to 0. - /// - /// - Parameters: - /// - handle: The handle to an accumulator. - /// - num_required: Number of gradients required before we return an aggregate. - /// - /// - Attr dtype: The data type of accumulated gradients. Needs to correspond to the type - /// of the accumulator. - /// - /// - Output average: The average of the accumulated gradients. - @inlinable @inline(__always) - public static func resourceAccumulatorTakeGradient( - handle: ResourceHandle, - numRequired: Tensor - ) -> Tensor { - switch numRequired.handle.backend { - case .XLA: - let output_device = numRequired.device - let numRequired = Tensor(copying: numRequired, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.resourceAccumulatorTakeGradient( - handle: handle, numRequired: numRequired), to: output_device) - case .TF_EAGER: - return _RawTFEager.resourceAccumulatorTakeGradient(handle: handle, numRequired: numRequired) - } - - } - - /// Update '*var' according to the AdaMax algorithm. - /// - /// m_t <- beta1 * m_{t-1} + (1 - beta1) * g - /// v_t <- max(beta2 * v_{t-1}, abs(g)) - /// variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon) - /// - /// - Parameters: - /// - var: Should be from a Variable(). - /// - m: Should be from a Variable(). - /// - v: Should be from a Variable(). - /// - beta1_power: Must be a scalar. - /// - lr: Scaling factor. Must be a scalar. - /// - beta1: Momentum factor. Must be a scalar. - /// - beta2: Momentum factor. Must be a scalar. - /// - epsilon: Ridge term. Must be a scalar. - /// - grad: The gradient. - /// - /// - Attr use_locking: If `True`, updating of the var, m, and v tensors will be protected - /// by a lock; otherwise the behavior is undefined, but may exhibit less - /// contention. - @inlinable @inline(__always) - public static func resourceApplyAdaMax( - var_: ResourceHandle, - m: ResourceHandle, - v: ResourceHandle, - beta1Power: Tensor, - lr: Tensor, - beta1: Tensor, - beta2: Tensor, - epsilon: Tensor, - grad: Tensor, - useLocking: Bool = false - ) { - _RawTFEager.resourceApplyAdaMax( - var_: var_, m: m, v: v, beta1Power: beta1Power, lr: lr, beta1: beta1, beta2: beta2, - epsilon: epsilon, grad: grad, useLocking: useLocking) - } - - /// Update '*var' according to the adadelta scheme. - /// - /// accum = rho() * accum + (1 - rho()) * grad.square(); - /// update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; - /// update_accum = rho() * update_accum + (1 - rho()) * update.square(); - /// var -= update; - /// - /// - Parameters: - /// - var: Should be from a Variable(). - /// - accum: Should be from a Variable(). - /// - accum_update: Should be from a Variable(). - /// - lr: Scaling factor. Must be a scalar. - /// - rho: Decay factor. Must be a scalar. - /// - epsilon: Constant factor. Must be a scalar. - /// - grad: The gradient. - /// - /// - Attr use_locking: If True, updating of the var, accum and update_accum tensors will be protected by - /// a lock; otherwise the behavior is undefined, but may exhibit less contention. - @inlinable @inline(__always) - public static func resourceApplyAdadelta( - var_: ResourceHandle, - accum: ResourceHandle, - accumUpdate: ResourceHandle, - lr: Tensor, - rho: Tensor, - epsilon: Tensor, - grad: Tensor, - useLocking: Bool = false - ) { - _RawTFEager.resourceApplyAdadelta( - var_: var_, accum: accum, accumUpdate: accumUpdate, lr: lr, rho: rho, epsilon: epsilon, - grad: grad, useLocking: useLocking) - } - - /// Update '*var' according to the adagrad scheme. - /// - /// accum += grad * grad - /// var -= lr * grad * (1 / sqrt(accum)) - /// - /// - Parameters: - /// - var: Should be from a Variable(). - /// - accum: Should be from a Variable(). - /// - lr: Scaling factor. Must be a scalar. - /// - grad: The gradient. - /// - /// - Attr use_locking: If `True`, updating of the var and accum tensors will be protected - /// by a lock; otherwise the behavior is undefined, but may exhibit less - /// contention. - @inlinable @inline(__always) - public static func resourceApplyAdagrad( - var_: ResourceHandle, - accum: ResourceHandle, - lr: Tensor, - grad: Tensor, - useLocking: Bool = false, - updateSlots: Bool = true - ) { - _RawTFEager.resourceApplyAdagrad( - var_: var_, accum: accum, lr: lr, grad: grad, useLocking: useLocking, - updateSlots: updateSlots) - } - - /// Update '*var' according to the proximal adagrad scheme. - /// - /// - Parameters: - /// - var: Should be from a Variable(). - /// - gradient_accumulator: Should be from a Variable(). - /// - gradient_squared_accumulator: Should be from a Variable(). - /// - grad: The gradient. - /// - lr: Scaling factor. Must be a scalar. - /// - l1: L1 regularization. Must be a scalar. - /// - l2: L2 regularization. Must be a scalar. - /// - global_step: Training step number. Must be a scalar. - /// - /// - Attr use_locking: If True, updating of the var and accum tensors will be protected by - /// a lock; otherwise the behavior is undefined, but may exhibit less contention. - @inlinable @inline(__always) - public static func resourceApplyAdagradDA( - var_: ResourceHandle, - gradientAccumulator: ResourceHandle, - gradientSquaredAccumulator: ResourceHandle, - grad: Tensor, - lr: Tensor, - l1: Tensor, - l2: Tensor, - globalStep: Tensor, - useLocking: Bool = false - ) { - _RawTFEager.resourceApplyAdagradDA( - var_: var_, gradientAccumulator: gradientAccumulator, - gradientSquaredAccumulator: gradientSquaredAccumulator, grad: grad, lr: lr, l1: l1, l2: l2, - globalStep: globalStep, useLocking: useLocking) - } - - /// Update '*var' according to the adagrad scheme. - /// - /// accum += grad * grad - /// var -= lr * grad * (1 / sqrt(accum)) - /// - /// - Parameters: - /// - var: Should be from a Variable(). - /// - accum: Should be from a Variable(). - /// - lr: Scaling factor. Must be a scalar. - /// - epsilon: Constant factor. Must be a scalar. - /// - grad: The gradient. - /// - /// - Attr use_locking: If `True`, updating of the var and accum tensors will be protected - /// by a lock; otherwise the behavior is undefined, but may exhibit less - /// contention. - @inlinable @inline(__always) - public static func resourceApplyAdagradV2( - var_: ResourceHandle, - accum: ResourceHandle, - lr: Tensor, - epsilon: Tensor, - grad: Tensor, - useLocking: Bool = false, - updateSlots: Bool = true - ) { - _RawTFEager.resourceApplyAdagradV2( - var_: var_, accum: accum, lr: lr, epsilon: epsilon, grad: grad, useLocking: useLocking, - updateSlots: updateSlots) - } - - /// Update '*var' according to the Adam algorithm. - /// - /// $$\text{lr}_t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ - /// $$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$ - /// $$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$ - /// $$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{v_t} + \epsilon)$$ - /// - /// - Parameters: - /// - var: Should be from a Variable(). - /// - m: Should be from a Variable(). - /// - v: Should be from a Variable(). - /// - beta1_power: Must be a scalar. - /// - beta2_power: Must be a scalar. - /// - lr: Scaling factor. Must be a scalar. - /// - beta1: Momentum factor. Must be a scalar. - /// - beta2: Momentum factor. Must be a scalar. - /// - epsilon: Ridge term. Must be a scalar. - /// - grad: The gradient. - /// - /// - Attrs: - /// - use_locking: If `True`, updating of the var, m, and v tensors will be protected - /// by a lock; otherwise the behavior is undefined, but may exhibit less - /// contention. - /// - use_nesterov: If `True`, uses the nesterov update. - @inlinable @inline(__always) - public static func resourceApplyAdam( - var_: ResourceHandle, - m: ResourceHandle, - v: ResourceHandle, - beta1Power: Tensor, - beta2Power: Tensor, - lr: Tensor, - beta1: Tensor, - beta2: Tensor, - epsilon: Tensor, - grad: Tensor, - useLocking: Bool = false, - useNesterov: Bool = false - ) { - _RawTFEager.resourceApplyAdam( - var_: var_, m: m, v: v, beta1Power: beta1Power, beta2Power: beta2Power, lr: lr, - beta1: beta1, beta2: beta2, epsilon: epsilon, grad: grad, useLocking: useLocking, - useNesterov: useNesterov) - } - - /// Update '*var' according to the Adam algorithm. - /// - /// $$\text{lr}_t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ - /// $$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$ - /// $$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$ - /// $$\hat{v}_t := max{\hat{v}_{t-1}, v_t}$$ - /// $$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{\hat{v}_t} + \epsilon)$$ - /// - /// - Parameters: - /// - var: Should be from a Variable(). - /// - m: Should be from a Variable(). - /// - v: Should be from a Variable(). - /// - vhat: Should be from a Variable(). - /// - beta1_power: Must be a scalar. - /// - beta2_power: Must be a scalar. - /// - lr: Scaling factor. Must be a scalar. - /// - beta1: Momentum factor. Must be a scalar. - /// - beta2: Momentum factor. Must be a scalar. - /// - epsilon: Ridge term. Must be a scalar. - /// - grad: The gradient. - /// - /// - Attr use_locking: If `True`, updating of the var, m, and v tensors will be protected - /// by a lock; otherwise the behavior is undefined, but may exhibit less - /// contention. - @inlinable @inline(__always) - public static func resourceApplyAdamWithAmsgrad( - var_: ResourceHandle, - m: ResourceHandle, - v: ResourceHandle, - vhat: ResourceHandle, - beta1Power: Tensor, - beta2Power: Tensor, - lr: Tensor, - beta1: Tensor, - beta2: Tensor, - epsilon: Tensor, - grad: Tensor, - useLocking: Bool = false - ) { - _RawTFEager.resourceApplyAdamWithAmsgrad( - var_: var_, m: m, v: v, vhat: vhat, beta1Power: beta1Power, beta2Power: beta2Power, lr: lr, - beta1: beta1, beta2: beta2, epsilon: epsilon, grad: grad, useLocking: useLocking) - } - - /// Update '*var' according to the AddSign update. - /// - /// m_t <- beta1 * m_{t-1} + (1 - beta1) * g - /// update <- (alpha + sign_decay * sign(g) *sign(m)) * g - /// variable <- variable - lr_t * update - /// - /// - Parameters: - /// - var: Should be from a Variable(). - /// - m: Should be from a Variable(). - /// - lr: Scaling factor. Must be a scalar. - /// - alpha: Must be a scalar. - /// - sign_decay: Must be a scalar. - /// - beta: Must be a scalar. - /// - grad: The gradient. - /// - /// - Attr use_locking: If `True`, updating of the var and m tensors is - /// protected by a lock; otherwise the behavior is undefined, but may exhibit less - /// contention. - @inlinable @inline(__always) - public static func resourceApplyAddSign( - var_: ResourceHandle, - m: ResourceHandle, - lr: Tensor, - alpha: Tensor, - signDecay: Tensor, - beta: Tensor, - grad: Tensor, - useLocking: Bool = false - ) { - _RawTFEager.resourceApplyAddSign( - var_: var_, m: m, lr: lr, alpha: alpha, signDecay: signDecay, beta: beta, grad: grad, - useLocking: useLocking) - } - - /// Update '*var' according to the centered RMSProp algorithm. - /// - /// The centered RMSProp algorithm uses an estimate of the centered second moment - /// (i.e., the variance) for normalization, as opposed to regular RMSProp, which - /// uses the (uncentered) second moment. This often helps with training, but is - /// slightly more expensive in terms of computation and memory. - /// - /// Note that in dense implementation of this algorithm, mg, ms, and mom will - /// update even if the grad is zero, but in this sparse implementation, mg, ms, - /// and mom will not update in iterations during which the grad is zero. - /// - /// mean_square = decay * mean_square + (1-decay) * gradient ** 2 - /// mean_grad = decay * mean_grad + (1-decay) * gradient - /// - /// Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) - /// - /// mg <- rho * mg_{t-1} + (1-rho) * grad - /// ms <- rho * ms_{t-1} + (1-rho) * grad * grad - /// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) - /// var <- var - mom - /// - /// - Parameters: - /// - var: Should be from a Variable(). - /// - mg: Should be from a Variable(). - /// - ms: Should be from a Variable(). - /// - mom: Should be from a Variable(). - /// - lr: Scaling factor. Must be a scalar. - /// - rho: Decay rate. Must be a scalar. - /// - epsilon: Ridge term. Must be a scalar. - /// - grad: The gradient. - /// - /// - Attr use_locking: If `True`, updating of the var, mg, ms, and mom tensors is - /// protected by a lock; otherwise the behavior is undefined, but may exhibit less - /// contention. - @inlinable @inline(__always) - public static func resourceApplyCenteredRMSProp( - var_: ResourceHandle, - mg: ResourceHandle, - ms: ResourceHandle, - mom: ResourceHandle, - lr: Tensor, - rho: Tensor, - momentum: Tensor, - epsilon: Tensor, - grad: Tensor, - useLocking: Bool = false - ) { - _RawTFEager.resourceApplyCenteredRMSProp( - var_: var_, mg: mg, ms: ms, mom: mom, lr: lr, rho: rho, momentum: momentum, - epsilon: epsilon, grad: grad, useLocking: useLocking) - } - - /// Update '*var' according to the Ftrl-proximal scheme. - /// - /// accum_new = accum + grad * grad - /// linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var - /// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 - /// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 - /// accum = accum_new - /// - /// - Parameters: - /// - var: Should be from a Variable(). - /// - accum: Should be from a Variable(). - /// - linear: Should be from a Variable(). - /// - grad: The gradient. - /// - lr: Scaling factor. Must be a scalar. - /// - l1: L1 regularization. Must be a scalar. - /// - l2: L2 regularization. Must be a scalar. - /// - lr_power: Scaling factor. Must be a scalar. - /// - /// - Attr use_locking: If `True`, updating of the var and accum tensors will be protected - /// by a lock; otherwise the behavior is undefined, but may exhibit less - /// contention. - @inlinable @inline(__always) - public static func resourceApplyFtrl( - var_: ResourceHandle, - accum: ResourceHandle, - linear: ResourceHandle, - grad: Tensor, - lr: Tensor, - l1: Tensor, - l2: Tensor, - lrPower: Tensor, - useLocking: Bool = false - ) { - _RawTFEager.resourceApplyFtrl( - var_: var_, accum: accum, linear: linear, grad: grad, lr: lr, l1: l1, l2: l2, - lrPower: lrPower, useLocking: useLocking) - } - - /// Update '*var' according to the Ftrl-proximal scheme. - /// - /// grad_with_shrinkage = grad + 2 * l2_shrinkage * var - /// accum_new = accum + grad_with_shrinkage * grad_with_shrinkage - /// linear += grad_with_shrinkage + - /// (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var - /// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 - /// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 - /// accum = accum_new - /// - /// - Parameters: - /// - var: Should be from a Variable(). - /// - accum: Should be from a Variable(). - /// - linear: Should be from a Variable(). - /// - grad: The gradient. - /// - lr: Scaling factor. Must be a scalar. - /// - l1: L1 regularization. Must be a scalar. - /// - l2: L2 shrinkage regularization. Must be a scalar. - /// - lr_power: Scaling factor. Must be a scalar. - /// - /// - Attr use_locking: If `True`, updating of the var and accum tensors will be protected - /// by a lock; otherwise the behavior is undefined, but may exhibit less - /// contention. - @inlinable @inline(__always) - public static func resourceApplyFtrlV2( - var_: ResourceHandle, - accum: ResourceHandle, - linear: ResourceHandle, - grad: Tensor, - lr: Tensor, - l1: Tensor, - l2: Tensor, - l2Shrinkage: Tensor, - lrPower: Tensor, - useLocking: Bool = false - ) { - _RawTFEager.resourceApplyFtrlV2( - var_: var_, accum: accum, linear: linear, grad: grad, lr: lr, l1: l1, l2: l2, - l2Shrinkage: l2Shrinkage, lrPower: lrPower, useLocking: useLocking) - } - - /// Update '*var' by subtracting 'alpha' * 'delta' from it. - /// - /// - Parameters: - /// - var: Should be from a Variable(). - /// - alpha: Scaling factor. Must be a scalar. - /// - delta: The change. - /// - /// - Attr use_locking: If `True`, the subtraction will be protected by a lock; - /// otherwise the behavior is undefined, but may exhibit less contention. - @inlinable @inline(__always) - public static func resourceApplyGradientDescent( - var_: ResourceHandle, - alpha: Tensor, - delta: Tensor, - useLocking: Bool = false - ) { - _RawTFEager.resourceApplyGradientDescent( - var_: var_, alpha: alpha, delta: delta, useLocking: useLocking) - } - - /// Update '*var' according to the momentum scheme. - /// - /// Set use_nesterov = True if you want to use Nesterov momentum. - /// - /// accum = accum * momentum - lr * grad - /// var += accum - /// - /// - Parameters: - /// - var: Should be from a Variable(). - /// - accum: Should be from a Variable(). - /// - lr: Scaling factor. Must be a scalar. - /// - grad: The gradient. - /// - momentum: Momentum. Must be a scalar. - /// - /// - Attrs: - /// - use_locking: If `True`, updating of the var and accum tensors will be protected - /// by a lock; otherwise the behavior is undefined, but may exhibit less - /// contention. - /// - use_nesterov: If `True`, the tensor passed to compute grad will be - /// var + momentum * accum, so in the end, the var you get is actually - /// var + momentum * accum. - @inlinable @inline(__always) - public static func resourceApplyKerasMomentum( - var_: ResourceHandle, - accum: ResourceHandle, - lr: Tensor, - grad: Tensor, - momentum: Tensor, - useLocking: Bool = false, - useNesterov: Bool = false - ) { - _RawTFEager.resourceApplyKerasMomentum( - var_: var_, accum: accum, lr: lr, grad: grad, momentum: momentum, useLocking: useLocking, - useNesterov: useNesterov) - } - - /// Update '*var' according to the momentum scheme. Set use_nesterov = True if you - /// - /// want to use Nesterov momentum. - /// - /// accum = accum * momentum + grad - /// var -= lr * accum - /// - /// - Parameters: - /// - var: Should be from a Variable(). - /// - accum: Should be from a Variable(). - /// - lr: Scaling factor. Must be a scalar. - /// - grad: The gradient. - /// - momentum: Momentum. Must be a scalar. - /// - /// - Attrs: - /// - use_locking: If `True`, updating of the var and accum tensors will be protected - /// by a lock; otherwise the behavior is undefined, but may exhibit less - /// contention. - /// - use_nesterov: If `True`, the tensor passed to compute grad will be - /// var - lr * momentum * accum, so in the end, the var you get is actually - /// var - lr * momentum * accum. - @inlinable @inline(__always) - public static func resourceApplyMomentum( - var_: ResourceHandle, - accum: ResourceHandle, - lr: Tensor, - grad: Tensor, - momentum: Tensor, - useLocking: Bool = false, - useNesterov: Bool = false - ) { - _RawTFEager.resourceApplyMomentum( - var_: var_, accum: accum, lr: lr, grad: grad, momentum: momentum, useLocking: useLocking, - useNesterov: useNesterov) - } - - /// Update '*var' according to the AddSign update. - /// - /// m_t <- beta1 * m_{t-1} + (1 - beta1) * g - /// update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g - /// variable <- variable - lr_t * update - /// - /// - Parameters: - /// - var: Should be from a Variable(). - /// - m: Should be from a Variable(). - /// - lr: Scaling factor. Must be a scalar. - /// - logbase: Must be a scalar. - /// - sign_decay: Must be a scalar. - /// - beta: Must be a scalar. - /// - grad: The gradient. - /// - /// - Attr use_locking: If `True`, updating of the var and m tensors is - /// protected by a lock; otherwise the behavior is undefined, but may exhibit less - /// contention. - @inlinable @inline(__always) - public static func resourceApplyPowerSign( - var_: ResourceHandle, - m: ResourceHandle, - lr: Tensor, - logbase: Tensor, - signDecay: Tensor, - beta: Tensor, - grad: Tensor, - useLocking: Bool = false - ) { - _RawTFEager.resourceApplyPowerSign( - var_: var_, m: m, lr: lr, logbase: logbase, signDecay: signDecay, beta: beta, grad: grad, - useLocking: useLocking) - } - - /// Update '*var' and '*accum' according to FOBOS with Adagrad learning rate. - /// - /// accum += grad * grad - /// prox_v = var - lr * grad * (1 / sqrt(accum)) - /// var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} - /// - /// - Parameters: - /// - var: Should be from a Variable(). - /// - accum: Should be from a Variable(). - /// - lr: Scaling factor. Must be a scalar. - /// - l1: L1 regularization. Must be a scalar. - /// - l2: L2 regularization. Must be a scalar. - /// - grad: The gradient. - /// - /// - Attr use_locking: If True, updating of the var and accum tensors will be protected by - /// a lock; otherwise the behavior is undefined, but may exhibit less contention. - @inlinable @inline(__always) - public static func resourceApplyProximalAdagrad( - var_: ResourceHandle, - accum: ResourceHandle, - lr: Tensor, - l1: Tensor, - l2: Tensor, - grad: Tensor, - useLocking: Bool = false - ) { - _RawTFEager.resourceApplyProximalAdagrad( - var_: var_, accum: accum, lr: lr, l1: l1, l2: l2, grad: grad, useLocking: useLocking) - } - - /// Update '*var' as FOBOS algorithm with fixed learning rate. - /// - /// prox_v = var - alpha * delta - /// var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0} - /// - /// - Parameters: - /// - var: Should be from a Variable(). - /// - alpha: Scaling factor. Must be a scalar. - /// - l1: L1 regularization. Must be a scalar. - /// - l2: L2 regularization. Must be a scalar. - /// - delta: The change. - /// - /// - Attr use_locking: If True, the subtraction will be protected by a lock; - /// otherwise the behavior is undefined, but may exhibit less contention. - @inlinable @inline(__always) - public static func resourceApplyProximalGradientDescent( - var_: ResourceHandle, - alpha: Tensor, - l1: Tensor, - l2: Tensor, - delta: Tensor, - useLocking: Bool = false - ) { - _RawTFEager.resourceApplyProximalGradientDescent( - var_: var_, alpha: alpha, l1: l1, l2: l2, delta: delta, useLocking: useLocking) - } - - /// Update '*var' according to the RMSProp algorithm. - /// - /// Note that in dense implementation of this algorithm, ms and mom will - /// update even if the grad is zero, but in this sparse implementation, ms - /// and mom will not update in iterations during which the grad is zero. - /// - /// mean_square = decay * mean_square + (1-decay) * gradient ** 2 - /// Delta = learning_rate * gradient / sqrt(mean_square + epsilon) - /// - /// ms <- rho * ms_{t-1} + (1-rho) * grad * grad - /// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) - /// var <- var - mom - /// - /// - Parameters: - /// - var: Should be from a Variable(). - /// - ms: Should be from a Variable(). - /// - mom: Should be from a Variable(). - /// - lr: Scaling factor. Must be a scalar. - /// - rho: Decay rate. Must be a scalar. - /// - epsilon: Ridge term. Must be a scalar. - /// - grad: The gradient. - /// - /// - Attr use_locking: If `True`, updating of the var, ms, and mom tensors is protected - /// by a lock; otherwise the behavior is undefined, but may exhibit less - /// contention. - @inlinable @inline(__always) - public static func resourceApplyRMSProp( - var_: ResourceHandle, - ms: ResourceHandle, - mom: ResourceHandle, - lr: Tensor, - rho: Tensor, - momentum: Tensor, - epsilon: Tensor, - grad: Tensor, - useLocking: Bool = false - ) { - _RawTFEager.resourceApplyRMSProp( - var_: var_, ms: ms, mom: mom, lr: lr, rho: rho, momentum: momentum, epsilon: epsilon, - grad: grad, useLocking: useLocking) - } - - /// A conditional accumulator for aggregating gradients. - /// - /// The accumulator accepts gradients marked with local_step greater or - /// equal to the most recent global_step known to the accumulator. The - /// average can be extracted from the accumulator, provided sufficient - /// gradients have been accumulated. Extracting the average automatically - /// resets the aggregate to 0, and increments the global_step recorded by - /// the accumulator. - /// This is a resource version of ConditionalAccumulator that will work in TF2.0 - /// with tf.cond version 2. - /// - /// - Attrs: - /// - dtype: The type of the value being accumulated. - /// - shape: The shape of the values, can be [], in which case shape is unknown. - /// - container: If non-empty, this accumulator is placed in the given container. - /// Otherwise, a default container is used. - /// - shared_name: If non-empty, this accumulator will be shared under the - /// given name across multiple sessions. - /// - /// - Output handle: The handle to the accumulator. - @inlinable @inline(__always) - public static func resourceConditionalAccumulator( - dtype: TensorDataType, - shape: TensorShape?, - container: String, - sharedName: String, - reductionType: ReductionType = .mean - ) -> ResourceHandle { - _RawTFEager.resourceConditionalAccumulator( - dtype: dtype, shape: shape, container: container, sharedName: sharedName, - reductionType: reductionType) - } - - /// Increments variable pointed to by 'resource' until it reaches 'limit'. - /// - /// - Parameter resource: Should be from a scalar `Variable` node. - /// - /// - Attr limit: If incrementing ref would bring it above limit, instead generates an - /// 'OutOfRange' error. - /// - /// - Output output: A copy of the input before increment. If nothing else modifies the - /// input, the values produced will all be distinct. - @inlinable @inline(__always) - public static func resourceCountUpTo( - resource: ResourceHandle, - limit: Int64 - ) -> Tensor { - _RawTFEager.resourceCountUpTo(resource: resource, limit: limit) - } - - @inlinable @inline(__always) - public static func resourceCreateOp( - resource: ResourceHandle - ) { - _RawTFEager.resourceCreateOp(resource: resource) - } - - /// Gather slices from the variable pointed to by `resource` according to `indices`. - /// - /// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). - /// Produces an output tensor with shape `indices.shape + params.shape[1:]` where: - /// - /// ```python - /// # Scalar indices - /// output[:, ..., :] = params[indices, :, ... :] - /// - /// # Vector indices - /// output[i, :, ..., :] = params[indices[i], :, ... :] - /// - /// # Higher rank indices - /// output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] - /// ``` - @inlinable @inline(__always) - public static func resourceGather< - Dtype: TensorFlowScalar, - Tindices: TensorFlowIndex - >( - resource: ResourceHandle, - indices: Tensor, - batchDims: Int64 = 0, - validateIndices: Bool = true - ) -> Tensor { - switch indices.handle.backend { - case .XLA: - let output_device = indices.device - let indices = Tensor(copying: indices, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.resourceGather( - resource: resource, indices: indices, batchDims: batchDims, - validateIndices: validateIndices), to: output_device) - case .TF_EAGER: - return _RawTFEager.resourceGather( - resource: resource, indices: indices, batchDims: batchDims, - validateIndices: validateIndices) - } - - } - - @inlinable @inline(__always) - public static func resourceGatherNd< - Dtype: TensorFlowScalar, - Tindices: TensorFlowIndex - >( - resource: ResourceHandle, - indices: Tensor - ) -> Tensor { - switch indices.handle.backend { - case .XLA: - let output_device = indices.device - let indices = Tensor(copying: indices, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.resourceGatherNd(resource: resource, indices: indices), - to: output_device) - case .TF_EAGER: - return _RawTFEager.resourceGatherNd(resource: resource, indices: indices) - } - - } - - @inlinable @inline(__always) - public static func resourceInitializedOp( - resource: ResourceHandle - ) -> Tensor { - _RawTFEager.resourceInitializedOp(resource: resource) - } - - /// Adds sparse updates to the variable referenced by `resource`. - /// - /// This operation computes - /// - /// # Scalar indices - /// ref[indices, ...] += updates[...] - /// - /// # Vector indices (for each i) - /// ref[indices[i], ...] += updates[i, ...] - /// - /// # High rank indices (for each i, ..., j) - /// ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] - /// - /// Duplicate entries are handled correctly: if multiple `indices` reference - /// the same location, their contributions add. - /// - /// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - /// - ///
- /// - ///
- /// - /// - Parameters: - /// - resource: Should be from a `Variable` node. - /// - indices: A tensor of indices into the first dimension of `ref`. - /// - updates: A tensor of updated values to add to `ref`. - @inlinable @inline(__always) - public static func resourceScatterAdd< - Dtype: TensorFlowNumeric, - Tindices: TensorFlowIndex - >( - resource: ResourceHandle, - indices: Tensor, - updates: Tensor - ) { - _RawTFEager.resourceScatterAdd(resource: resource, indices: indices, updates: updates) - } - - /// Divides sparse updates into the variable referenced by `resource`. - /// - /// This operation computes - /// - /// # Scalar indices - /// ref[indices, ...] /= updates[...] - /// - /// # Vector indices (for each i) - /// ref[indices[i], ...] /= updates[i, ...] - /// - /// # High rank indices (for each i, ..., j) - /// ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] - /// - /// Duplicate entries are handled correctly: if multiple `indices` reference - /// the same location, their contributions multiply. - /// - /// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - /// - ///
- /// - ///
- /// - /// - Parameters: - /// - resource: Should be from a `Variable` node. - /// - indices: A tensor of indices into the first dimension of `ref`. - /// - updates: A tensor of updated values to add to `ref`. - @inlinable @inline(__always) - public static func resourceScatterDiv< - Dtype: TensorFlowNumeric, - Tindices: TensorFlowIndex - >( - resource: ResourceHandle, - indices: Tensor, - updates: Tensor - ) { - _RawTFEager.resourceScatterDiv(resource: resource, indices: indices, updates: updates) - } - - /// Reduces sparse updates into the variable referenced by `resource` using the `max` operation. - /// - /// This operation computes - /// - /// # Scalar indices - /// ref[indices, ...] = max(ref[indices, ...], updates[...]) - /// - /// # Vector indices (for each i) - /// ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) - /// - /// # High rank indices (for each i, ..., j) - /// ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) - /// - /// Duplicate entries are handled correctly: if multiple `indices` reference - /// the same location, their contributions are combined. - /// - /// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - /// - ///
- /// - ///
- /// - /// - Parameters: - /// - resource: Should be from a `Variable` node. - /// - indices: A tensor of indices into the first dimension of `ref`. - /// - updates: A tensor of updated values to add to `ref`. - @inlinable @inline(__always) - public static func resourceScatterMax< - Dtype: TensorFlowNumeric, - Tindices: TensorFlowIndex - >( - resource: ResourceHandle, - indices: Tensor, - updates: Tensor - ) { - _RawTFEager.resourceScatterMax(resource: resource, indices: indices, updates: updates) - } - - /// Reduces sparse updates into the variable referenced by `resource` using the `min` operation. - /// - /// This operation computes - /// - /// # Scalar indices - /// ref[indices, ...] = min(ref[indices, ...], updates[...]) - /// - /// # Vector indices (for each i) - /// ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) - /// - /// # High rank indices (for each i, ..., j) - /// ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) - /// - /// Duplicate entries are handled correctly: if multiple `indices` reference - /// the same location, their contributions are combined. - /// - /// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - /// - ///
- /// - ///
- /// - /// - Parameters: - /// - resource: Should be from a `Variable` node. - /// - indices: A tensor of indices into the first dimension of `ref`. - /// - updates: A tensor of updated values to add to `ref`. - @inlinable @inline(__always) - public static func resourceScatterMin< - Dtype: TensorFlowNumeric, - Tindices: TensorFlowIndex - >( - resource: ResourceHandle, - indices: Tensor, - updates: Tensor - ) { - _RawTFEager.resourceScatterMin(resource: resource, indices: indices, updates: updates) - } - - /// Multiplies sparse updates into the variable referenced by `resource`. - /// - /// This operation computes - /// - /// # Scalar indices - /// ref[indices, ...] *= updates[...] - /// - /// # Vector indices (for each i) - /// ref[indices[i], ...] *= updates[i, ...] - /// - /// # High rank indices (for each i, ..., j) - /// ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] - /// - /// Duplicate entries are handled correctly: if multiple `indices` reference - /// the same location, their contributions multiply. - /// - /// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - /// - ///
- /// - ///
- /// - /// - Parameters: - /// - resource: Should be from a `Variable` node. - /// - indices: A tensor of indices into the first dimension of `ref`. - /// - updates: A tensor of updated values to add to `ref`. - @inlinable @inline(__always) - public static func resourceScatterMul< - Dtype: TensorFlowNumeric, - Tindices: TensorFlowIndex - >( - resource: ResourceHandle, - indices: Tensor, - updates: Tensor - ) { - _RawTFEager.resourceScatterMul(resource: resource, indices: indices, updates: updates) - } - - /// Applies sparse addition to individual values or slices in a Variable. - /// - /// `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - /// - /// `indices` must be integer tensor, containing indices into `ref`. - /// It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. - /// - /// The innermost dimension of `indices` (with length `K`) corresponds to - /// indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th - /// dimension of `ref`. - /// - /// `updates` is `Tensor` of rank `Q-1+P-K` with shape: - /// - /// ``` - /// [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] - /// ``` - /// - /// For example, say we want to add 4 scattered elements to a rank-1 tensor to - /// 8 elements. In Python, that addition would look like this: - /// - /// ```python - /// ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True) - /// indices = tf.constant([[4], [3], [1], [7]]) - /// updates = tf.constant([9, 10, 11, 12]) - /// add = tf.scatter_nd_add(ref, indices, updates) - /// with tf.Session() as sess: - /// print sess.run(add) - /// ``` - /// - /// The resulting update to ref would look like this: - /// - /// [1, 13, 3, 14, 14, 6, 7, 20] - /// - /// See `tf.scatter_nd` for more details about how to make updates to - /// slices. - /// - /// - Parameters: - /// - ref: A resource handle. Must be from a VarHandleOp. - /// - indices: A Tensor. Must be one of the following types: int32, int64. - /// A tensor of indices into ref. - /// - updates: A Tensor. Must have the same type as ref. A tensor of - /// values to add to ref. - /// - /// - Attr use_locking: An optional bool. Defaults to True. If True, the assignment will - /// be protected by a lock; otherwise the behavior is undefined, - /// but may exhibit less contention. - @inlinable @inline(__always) - public static func resourceScatterNdAdd< - T: TensorFlowScalar, - Tindices: TensorFlowIndex - >( - ref: ResourceHandle, - indices: Tensor, - updates: Tensor, - useLocking: Bool = true - ) { - _RawTFEager.resourceScatterNdAdd( - ref: ref, indices: indices, updates: updates, useLocking: useLocking) - } - - /// Applies sparse subtraction to individual values or slices in a Variable. - /// - /// `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - /// - /// `indices` must be integer tensor, containing indices into `ref`. - /// It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. - /// - /// The innermost dimension of `indices` (with length `K`) corresponds to - /// indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th - /// dimension of `ref`. - /// - /// `updates` is `Tensor` of rank `Q-1+P-K` with shape: - /// - /// ``` - /// [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] - /// ``` - /// - /// For example, say we want to subtract 4 scattered elements from a rank-1 tensor - /// with 8 elements. In Python, that subtraction would look like this: - /// - /// ```python - /// ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True) - /// indices = tf.constant([[4], [3], [1], [7]]) - /// updates = tf.constant([9, 10, 11, 12]) - /// sub = tf.scatter_nd_sub(ref, indices, updates) - /// with tf.Session() as sess: - /// print sess.run(sub) - /// ``` - /// - /// The resulting update to ref would look like this: - /// - /// [1, -9, 3, -6, -4, 6, 7, -4] - /// - /// See `tf.scatter_nd` for more details about how to make updates to - /// slices. - /// - /// - Parameters: - /// - ref: A resource handle. Must be from a VarHandleOp. - /// - indices: A Tensor. Must be one of the following types: int32, int64. - /// A tensor of indices into ref. - /// - updates: A Tensor. Must have the same type as ref. A tensor of - /// values to add to ref. - /// - /// - Attr use_locking: An optional bool. Defaults to True. If True, the assignment will - /// be protected by a lock; otherwise the behavior is undefined, - /// but may exhibit less contention. - @inlinable @inline(__always) - public static func resourceScatterNdSub< - T: TensorFlowScalar, - Tindices: TensorFlowIndex - >( - ref: ResourceHandle, - indices: Tensor, - updates: Tensor, - useLocking: Bool = true - ) { - _RawTFEager.resourceScatterNdSub( - ref: ref, indices: indices, updates: updates, useLocking: useLocking) - } - - /// Applies sparse `updates` to individual values or slices within a given - /// - /// variable according to `indices`. - /// - /// `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - /// - /// `indices` must be integer tensor, containing indices into `ref`. - /// It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. - /// - /// The innermost dimension of `indices` (with length `K`) corresponds to - /// indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th - /// dimension of `ref`. - /// - /// `updates` is `Tensor` of rank `Q-1+P-K` with shape: - /// - /// ``` - /// [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. - /// ``` - /// - /// For example, say we want to update 4 scattered elements to a rank-1 tensor to - /// 8 elements. In Python, that update would look like this: - /// - /// ```python - /// ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) - /// indices = tf.constant([[4], [3], [1] ,[7]]) - /// updates = tf.constant([9, 10, 11, 12]) - /// update = tf.scatter_nd_update(ref, indices, updates) - /// with tf.Session() as sess: - /// print sess.run(update) - /// ``` - /// - /// The resulting update to ref would look like this: - /// - /// [1, 11, 3, 10, 9, 6, 7, 12] - /// - /// See `tf.scatter_nd` for more details about how to make updates to - /// slices. - /// - /// - Parameters: - /// - ref: A resource handle. Must be from a VarHandleOp. - /// - indices: A Tensor. Must be one of the following types: int32, int64. - /// A tensor of indices into ref. - /// - updates: A Tensor. Must have the same type as ref. A tensor of updated - /// values to add to ref. - /// - /// - Attr use_locking: An optional bool. Defaults to True. If True, the assignment will - /// be protected by a lock; otherwise the behavior is undefined, - /// but may exhibit less contention. - @inlinable @inline(__always) - public static func resourceScatterNdUpdate< - T: TensorFlowScalar, - Tindices: TensorFlowIndex - >( - ref: ResourceHandle, - indices: Tensor, - updates: Tensor, - useLocking: Bool = true - ) { - _RawTFEager.resourceScatterNdUpdate( - ref: ref, indices: indices, updates: updates, useLocking: useLocking) - } - - /// Subtracts sparse updates from the variable referenced by `resource`. - /// - /// This operation computes - /// - /// # Scalar indices - /// ref[indices, ...] -= updates[...] - /// - /// # Vector indices (for each i) - /// ref[indices[i], ...] -= updates[i, ...] - /// - /// # High rank indices (for each i, ..., j) - /// ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] - /// - /// Duplicate entries are handled correctly: if multiple `indices` reference - /// the same location, their contributions add. - /// - /// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - /// - ///
- /// - ///
- /// - /// - Parameters: - /// - resource: Should be from a `Variable` node. - /// - indices: A tensor of indices into the first dimension of `ref`. - /// - updates: A tensor of updated values to add to `ref`. - @inlinable @inline(__always) - public static func resourceScatterSub< - Dtype: TensorFlowNumeric, - Tindices: TensorFlowIndex - >( - resource: ResourceHandle, - indices: Tensor, - updates: Tensor - ) { - _RawTFEager.resourceScatterSub(resource: resource, indices: indices, updates: updates) - } - - /// Assigns sparse updates to the variable referenced by `resource`. - /// - /// This operation computes - /// - /// # Scalar indices - /// ref[indices, ...] = updates[...] - /// - /// # Vector indices (for each i) - /// ref[indices[i], ...] = updates[i, ...] - /// - /// # High rank indices (for each i, ..., j) - /// ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] - /// - /// - Parameters: - /// - resource: Should be from a `Variable` node. - /// - indices: A tensor of indices into the first dimension of `ref`. - /// - updates: A tensor of updated values to add to `ref`. - @inlinable @inline(__always) - public static func resourceScatterUpdate< - Dtype: TensorFlowScalar, - Tindices: TensorFlowIndex - >( - resource: ResourceHandle, - indices: Tensor, - updates: Tensor - ) { - _RawTFEager.resourceScatterUpdate(resource: resource, indices: indices, updates: updates) - } - - /// var: Should be from a Variable(). - /// - /// - Parameters: - /// - accum: Should be from a Variable(). - /// - accum_update: : Should be from a Variable(). - /// - lr: Learning rate. Must be a scalar. - /// - rho: Decay factor. Must be a scalar. - /// - epsilon: Constant factor. Must be a scalar. - /// - grad: The gradient. - /// - indices: A vector of indices into the first dimension of var and accum. - /// - /// - Attr use_locking: If True, updating of the var and accum tensors will be protected by - /// a lock; otherwise the behavior is undefined, but may exhibit less contention. - @inlinable @inline(__always) - public static func resourceSparseApplyAdadelta< - T: TensorFlowNumeric, - Tindices: TensorFlowIndex - >( - var_: ResourceHandle, - accum: ResourceHandle, - accumUpdate: ResourceHandle, - lr: Tensor, - rho: Tensor, - epsilon: Tensor, - grad: Tensor, - indices: Tensor, - useLocking: Bool = false - ) { - _RawTFEager.resourceSparseApplyAdadelta( - var_: var_, accum: accum, accumUpdate: accumUpdate, lr: lr, rho: rho, epsilon: epsilon, - grad: grad, indices: indices, useLocking: useLocking) - } - - /// Update relevant entries in '*var' and '*accum' according to the adagrad scheme. - /// - /// That is for rows we have grad for, we update var and accum as follows: - /// accum += grad * grad - /// var -= lr * grad * (1 / sqrt(accum)) - /// - /// - Parameters: - /// - var: Should be from a Variable(). - /// - accum: Should be from a Variable(). - /// - lr: Learning rate. Must be a scalar. - /// - grad: The gradient. - /// - indices: A vector of indices into the first dimension of var and accum. - /// - /// - Attr use_locking: If `True`, updating of the var and accum tensors will be protected - /// by a lock; otherwise the behavior is undefined, but may exhibit less - /// contention. - @inlinable @inline(__always) - public static func resourceSparseApplyAdagrad< - T: TensorFlowNumeric, - Tindices: TensorFlowIndex - >( - var_: ResourceHandle, - accum: ResourceHandle, - lr: Tensor, - grad: Tensor, - indices: Tensor, - useLocking: Bool = false, - updateSlots: Bool = true - ) { - _RawTFEager.resourceSparseApplyAdagrad( - var_: var_, accum: accum, lr: lr, grad: grad, indices: indices, useLocking: useLocking, - updateSlots: updateSlots) - } - - /// Update entries in '*var' and '*accum' according to the proximal adagrad scheme. - /// - /// - Parameters: - /// - var: Should be from a Variable(). - /// - gradient_accumulator: Should be from a Variable(). - /// - gradient_squared_accumulator: Should be from a Variable(). - /// - grad: The gradient. - /// - indices: A vector of indices into the first dimension of var and accum. - /// - lr: Learning rate. Must be a scalar. - /// - l1: L1 regularization. Must be a scalar. - /// - l2: L2 regularization. Must be a scalar. - /// - global_step: Training step number. Must be a scalar. - /// - /// - Attr use_locking: If True, updating of the var and accum tensors will be protected by - /// a lock; otherwise the behavior is undefined, but may exhibit less contention. - @inlinable @inline(__always) - public static func resourceSparseApplyAdagradDA< - T: TensorFlowNumeric, - Tindices: TensorFlowIndex - >( - var_: ResourceHandle, - gradientAccumulator: ResourceHandle, - gradientSquaredAccumulator: ResourceHandle, - grad: Tensor, - indices: Tensor, - lr: Tensor, - l1: Tensor, - l2: Tensor, - globalStep: Tensor, - useLocking: Bool = false - ) { - _RawTFEager.resourceSparseApplyAdagradDA( - var_: var_, gradientAccumulator: gradientAccumulator, - gradientSquaredAccumulator: gradientSquaredAccumulator, grad: grad, indices: indices, - lr: lr, l1: l1, l2: l2, globalStep: globalStep, useLocking: useLocking) - } - - /// Update relevant entries in '*var' and '*accum' according to the adagrad scheme. - /// - /// That is for rows we have grad for, we update var and accum as follows: - /// accum += grad * grad - /// var -= lr * grad * (1 / sqrt(accum)) - /// - /// - Parameters: - /// - var: Should be from a Variable(). - /// - accum: Should be from a Variable(). - /// - lr: Learning rate. Must be a scalar. - /// - epsilon: Constant factor. Must be a scalar. - /// - grad: The gradient. - /// - indices: A vector of indices into the first dimension of var and accum. - /// - /// - Attr use_locking: If `True`, updating of the var and accum tensors will be protected - /// by a lock; otherwise the behavior is undefined, but may exhibit less - /// contention. - @inlinable @inline(__always) - public static func resourceSparseApplyAdagradV2< - T: TensorFlowNumeric, - Tindices: TensorFlowIndex - >( - var_: ResourceHandle, - accum: ResourceHandle, - lr: Tensor, - epsilon: Tensor, - grad: Tensor, - indices: Tensor, - useLocking: Bool = false, - updateSlots: Bool = true - ) { - _RawTFEager.resourceSparseApplyAdagradV2( - var_: var_, accum: accum, lr: lr, epsilon: epsilon, grad: grad, indices: indices, - useLocking: useLocking, updateSlots: updateSlots) - } - - /// Update '*var' according to the centered RMSProp algorithm. - /// - /// The centered RMSProp algorithm uses an estimate of the centered second moment - /// (i.e., the variance) for normalization, as opposed to regular RMSProp, which - /// uses the (uncentered) second moment. This often helps with training, but is - /// slightly more expensive in terms of computation and memory. - /// - /// Note that in dense implementation of this algorithm, mg, ms, and mom will - /// update even if the grad is zero, but in this sparse implementation, mg, ms, - /// and mom will not update in iterations during which the grad is zero. - /// - /// mean_square = decay * mean_square + (1-decay) * gradient ** 2 - /// mean_grad = decay * mean_grad + (1-decay) * gradient - /// Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) - /// - /// ms <- rho * ms_{t-1} + (1-rho) * grad * grad - /// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) - /// var <- var - mom - /// - /// - Parameters: - /// - var: Should be from a Variable(). - /// - mg: Should be from a Variable(). - /// - ms: Should be from a Variable(). - /// - mom: Should be from a Variable(). - /// - lr: Scaling factor. Must be a scalar. - /// - rho: Decay rate. Must be a scalar. - /// - epsilon: Ridge term. Must be a scalar. - /// - grad: The gradient. - /// - indices: A vector of indices into the first dimension of var, ms and mom. - /// - /// - Attr use_locking: If `True`, updating of the var, mg, ms, and mom tensors is - /// protected by a lock; otherwise the behavior is undefined, but may exhibit less - /// contention. - @inlinable @inline(__always) - public static func resourceSparseApplyCenteredRMSProp< - T: TensorFlowNumeric, - Tindices: TensorFlowIndex - >( - var_: ResourceHandle, - mg: ResourceHandle, - ms: ResourceHandle, - mom: ResourceHandle, - lr: Tensor, - rho: Tensor, - momentum: Tensor, - epsilon: Tensor, - grad: Tensor, - indices: Tensor, - useLocking: Bool = false - ) { - _RawTFEager.resourceSparseApplyCenteredRMSProp( - var_: var_, mg: mg, ms: ms, mom: mom, lr: lr, rho: rho, momentum: momentum, - epsilon: epsilon, grad: grad, indices: indices, useLocking: useLocking) - } - - /// Update relevant entries in '*var' according to the Ftrl-proximal scheme. - /// - /// That is for rows we have grad for, we update var, accum and linear as follows: - /// accum_new = accum + grad * grad - /// linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var - /// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 - /// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 - /// accum = accum_new - /// - /// - Parameters: - /// - var: Should be from a Variable(). - /// - accum: Should be from a Variable(). - /// - linear: Should be from a Variable(). - /// - grad: The gradient. - /// - indices: A vector of indices into the first dimension of var and accum. - /// - lr: Scaling factor. Must be a scalar. - /// - l1: L1 regularization. Must be a scalar. - /// - l2: L2 regularization. Must be a scalar. - /// - lr_power: Scaling factor. Must be a scalar. - /// - /// - Attr use_locking: If `True`, updating of the var and accum tensors will be protected - /// by a lock; otherwise the behavior is undefined, but may exhibit less - /// contention. - @inlinable @inline(__always) - public static func resourceSparseApplyFtrl< - T: TensorFlowNumeric, - Tindices: TensorFlowIndex - >( - var_: ResourceHandle, - accum: ResourceHandle, - linear: ResourceHandle, - grad: Tensor, - indices: Tensor, - lr: Tensor, - l1: Tensor, - l2: Tensor, - lrPower: Tensor, - useLocking: Bool = false - ) { - _RawTFEager.resourceSparseApplyFtrl( - var_: var_, accum: accum, linear: linear, grad: grad, indices: indices, lr: lr, l1: l1, - l2: l2, lrPower: lrPower, useLocking: useLocking) - } - - /// Update relevant entries in '*var' according to the Ftrl-proximal scheme. - /// - /// That is for rows we have grad for, we update var, accum and linear as follows: - /// grad_with_shrinkage = grad + 2 * l2_shrinkage * var - /// accum_new = accum + grad_with_shrinkage * grad_with_shrinkage - /// linear += grad_with_shrinkage + - /// (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var - /// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 - /// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 - /// accum = accum_new - /// - /// - Parameters: - /// - var: Should be from a Variable(). - /// - accum: Should be from a Variable(). - /// - linear: Should be from a Variable(). - /// - grad: The gradient. - /// - indices: A vector of indices into the first dimension of var and accum. - /// - lr: Scaling factor. Must be a scalar. - /// - l1: L1 regularization. Must be a scalar. - /// - l2: L2 shrinkage regularization. Must be a scalar. - /// - lr_power: Scaling factor. Must be a scalar. - /// - /// - Attr use_locking: If `True`, updating of the var and accum tensors will be protected - /// by a lock; otherwise the behavior is undefined, but may exhibit less - /// contention. - @inlinable @inline(__always) - public static func resourceSparseApplyFtrlV2< - T: TensorFlowNumeric, - Tindices: TensorFlowIndex - >( - var_: ResourceHandle, - accum: ResourceHandle, - linear: ResourceHandle, - grad: Tensor, - indices: Tensor, - lr: Tensor, - l1: Tensor, - l2: Tensor, - l2Shrinkage: Tensor, - lrPower: Tensor, - useLocking: Bool = false - ) { - _RawTFEager.resourceSparseApplyFtrlV2( - var_: var_, accum: accum, linear: linear, grad: grad, indices: indices, lr: lr, l1: l1, - l2: l2, l2Shrinkage: l2Shrinkage, lrPower: lrPower, useLocking: useLocking) - } - - /// Update relevant entries in '*var' and '*accum' according to the momentum scheme. - /// - /// Set use_nesterov = True if you want to use Nesterov momentum. - /// - /// That is for rows we have grad for, we update var and accum as follows: - /// - /// accum = accum * momentum - lr * grad - /// var += accum - /// - /// - Parameters: - /// - var: Should be from a Variable(). - /// - accum: Should be from a Variable(). - /// - lr: Learning rate. Must be a scalar. - /// - grad: The gradient. - /// - indices: A vector of indices into the first dimension of var and accum. - /// - momentum: Momentum. Must be a scalar. - /// - /// - Attrs: - /// - use_locking: If `True`, updating of the var and accum tensors will be protected - /// by a lock; otherwise the behavior is undefined, but may exhibit less - /// contention. - /// - use_nesterov: If `True`, the tensor passed to compute grad will be - /// var + momentum * accum, so in the end, the var you get is actually - /// var + momentum * accum. - @inlinable @inline(__always) - public static func resourceSparseApplyKerasMomentum< - T: TensorFlowNumeric, - Tindices: TensorFlowIndex - >( - var_: ResourceHandle, - accum: ResourceHandle, - lr: Tensor, - grad: Tensor, - indices: Tensor, - momentum: Tensor, - useLocking: Bool = false, - useNesterov: Bool = false - ) { - _RawTFEager.resourceSparseApplyKerasMomentum( - var_: var_, accum: accum, lr: lr, grad: grad, indices: indices, momentum: momentum, - useLocking: useLocking, useNesterov: useNesterov) - } - - /// Update relevant entries in '*var' and '*accum' according to the momentum scheme. - /// - /// Set use_nesterov = True if you want to use Nesterov momentum. - /// - /// That is for rows we have grad for, we update var and accum as follows: - /// - /// accum = accum * momentum + grad - /// var -= lr * accum - /// - /// - Parameters: - /// - var: Should be from a Variable(). - /// - accum: Should be from a Variable(). - /// - lr: Learning rate. Must be a scalar. - /// - grad: The gradient. - /// - indices: A vector of indices into the first dimension of var and accum. - /// - momentum: Momentum. Must be a scalar. - /// - /// - Attrs: - /// - use_locking: If `True`, updating of the var and accum tensors will be protected - /// by a lock; otherwise the behavior is undefined, but may exhibit less - /// contention. - /// - use_nesterov: If `True`, the tensor passed to compute grad will be - /// var - lr * momentum * accum, so in the end, the var you get is actually - /// var - lr * momentum * accum. - @inlinable @inline(__always) - public static func resourceSparseApplyMomentum< - T: TensorFlowNumeric, - Tindices: TensorFlowIndex - >( - var_: ResourceHandle, - accum: ResourceHandle, - lr: Tensor, - grad: Tensor, - indices: Tensor, - momentum: Tensor, - useLocking: Bool = false, - useNesterov: Bool = false - ) { - _RawTFEager.resourceSparseApplyMomentum( - var_: var_, accum: accum, lr: lr, grad: grad, indices: indices, momentum: momentum, - useLocking: useLocking, useNesterov: useNesterov) - } - - /// Sparse update entries in '*var' and '*accum' according to FOBOS algorithm. - /// - /// That is for rows we have grad for, we update var and accum as follows: - /// accum += grad * grad - /// prox_v = var - /// prox_v -= lr * grad * (1 / sqrt(accum)) - /// var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} - /// - /// - Parameters: - /// - var: Should be from a Variable(). - /// - accum: Should be from a Variable(). - /// - lr: Learning rate. Must be a scalar. - /// - l1: L1 regularization. Must be a scalar. - /// - l2: L2 regularization. Must be a scalar. - /// - grad: The gradient. - /// - indices: A vector of indices into the first dimension of var and accum. - /// - /// - Attr use_locking: If True, updating of the var and accum tensors will be protected by - /// a lock; otherwise the behavior is undefined, but may exhibit less contention. - @inlinable @inline(__always) - public static func resourceSparseApplyProximalAdagrad< - T: TensorFlowNumeric, - Tindices: TensorFlowIndex - >( - var_: ResourceHandle, - accum: ResourceHandle, - lr: Tensor, - l1: Tensor, - l2: Tensor, - grad: Tensor, - indices: Tensor, - useLocking: Bool = false - ) { - _RawTFEager.resourceSparseApplyProximalAdagrad( - var_: var_, accum: accum, lr: lr, l1: l1, l2: l2, grad: grad, indices: indices, - useLocking: useLocking) - } - - /// Sparse update '*var' as FOBOS algorithm with fixed learning rate. - /// - /// That is for rows we have grad for, we update var as follows: - /// prox_v = var - alpha * grad - /// var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0} - /// - /// - Parameters: - /// - var: Should be from a Variable(). - /// - alpha: Scaling factor. Must be a scalar. - /// - l1: L1 regularization. Must be a scalar. - /// - l2: L2 regularization. Must be a scalar. - /// - grad: The gradient. - /// - indices: A vector of indices into the first dimension of var and accum. - /// - /// - Attr use_locking: If True, the subtraction will be protected by a lock; - /// otherwise the behavior is undefined, but may exhibit less contention. - @inlinable @inline(__always) - public static func resourceSparseApplyProximalGradientDescent< - T: TensorFlowNumeric, - Tindices: TensorFlowIndex - >( - var_: ResourceHandle, - alpha: Tensor, - l1: Tensor, - l2: Tensor, - grad: Tensor, - indices: Tensor, - useLocking: Bool = false - ) { - _RawTFEager.resourceSparseApplyProximalGradientDescent( - var_: var_, alpha: alpha, l1: l1, l2: l2, grad: grad, indices: indices, - useLocking: useLocking) - } - - /// Update '*var' according to the RMSProp algorithm. - /// - /// Note that in dense implementation of this algorithm, ms and mom will - /// update even if the grad is zero, but in this sparse implementation, ms - /// and mom will not update in iterations during which the grad is zero. - /// - /// mean_square = decay * mean_square + (1-decay) * gradient ** 2 - /// Delta = learning_rate * gradient / sqrt(mean_square + epsilon) - /// - /// ms <- rho * ms_{t-1} + (1-rho) * grad * grad - /// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) - /// var <- var - mom - /// - /// - Parameters: - /// - var: Should be from a Variable(). - /// - ms: Should be from a Variable(). - /// - mom: Should be from a Variable(). - /// - lr: Scaling factor. Must be a scalar. - /// - rho: Decay rate. Must be a scalar. - /// - epsilon: Ridge term. Must be a scalar. - /// - grad: The gradient. - /// - indices: A vector of indices into the first dimension of var, ms and mom. - /// - /// - Attr use_locking: If `True`, updating of the var, ms, and mom tensors is protected - /// by a lock; otherwise the behavior is undefined, but may exhibit less - /// contention. - @inlinable @inline(__always) - public static func resourceSparseApplyRMSProp< - T: TensorFlowNumeric, - Tindices: TensorFlowIndex - >( - var_: ResourceHandle, - ms: ResourceHandle, - mom: ResourceHandle, - lr: Tensor, - rho: Tensor, - momentum: Tensor, - epsilon: Tensor, - grad: Tensor, - indices: Tensor, - useLocking: Bool = false - ) { - _RawTFEager.resourceSparseApplyRMSProp( - var_: var_, ms: ms, mom: mom, lr: lr, rho: rho, momentum: momentum, epsilon: epsilon, - grad: grad, indices: indices, useLocking: useLocking) - } - - /// Assign `value` to the sliced l-value reference of `ref`. - /// - /// The values of `value` are assigned to the positions in the variable - /// `ref` that are selected by the slice parameters. The slice parameters - /// `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`. - /// - /// NOTE this op currently does not support broadcasting and so `value`'s - /// shape must be exactly the shape produced by the slice of `ref`. - @inlinable @inline(__always) - public static func resourceStridedSliceAssign< - T: TensorFlowScalar, - Index: TensorFlowIndex - >( - ref: ResourceHandle, - begin: Tensor, - end: Tensor, - strides: Tensor, - value: Tensor, - beginMask: Int64 = 0, - endMask: Int64 = 0, - ellipsisMask: Int64 = 0, - newAxisMask: Int64 = 0, - shrinkAxisMask: Int64 = 0 - ) { - _RawTFEager.resourceStridedSliceAssign( - ref: ref, begin: begin, end: end, strides: strides, value: value, beginMask: beginMask, - endMask: endMask, ellipsisMask: ellipsisMask, newAxisMask: newAxisMask, - shrinkAxisMask: shrinkAxisMask) - } + /// output = cond ? then_branch(inputs) : else_branch(inputs). + /// + /// - Parameters: + /// - cond: A boolean scalar. + /// - inputs: A list of input tensors. + /// + /// - Attrs: + /// - then_branch: A function takes 'inputs' and returns a list of tensors, + /// whose types are the same as what else_branch returns. + /// - else_branch: A function takes 'inputs' and returns a list of tensors. + /// whose types are the same as what then_branch returns. + /// + /// - Output output: A list of tensors returned by either then_branch(inputs) or + /// else_branch(inputs). The input shapes of the then_branch and + /// else_branch must match. + @inlinable @inline(__always) + public static func xlaIf< + Tcond: TensorFlowScalar, + ThenbranchIn: TensorGroup, + ThenbranchOut: TensorGroup, + ElsebranchIn: TensorGroup, + ElsebranchOut: TensorGroup, + Tin: TensorArrayProtocol, + Tout: TensorGroup + >( + cond: Tensor, + inputs: Tin, + thenBranch: (ThenbranchIn) -> ThenbranchOut, + elseBranch: (ElsebranchIn) -> ElsebranchOut + ) -> Tout { + _RawTFEager.xlaIf(cond: cond, inputs: inputs, thenBranch: thenBranch, elseBranch: elseBranch) + } - @inlinable @inline(__always) - public static func resourceUsingOp( - resource: ResourceHandle - ) { - _RawTFEager.resourceUsingOp(resource: resource) - } - - /// Restores a tensor from checkpoint files. - /// - /// Reads a tensor stored in one or several files. If there are several files (for - /// instance because a tensor was saved as slices), `file_pattern` may contain - /// wildcard symbols (`*` and `?`) in the filename portion only, not in the - /// directory portion. - /// - /// If a `file_pattern` matches several files, `preferred_shard` can be used to hint - /// in which file the requested tensor is likely to be found. This op will first - /// open the file at index `preferred_shard` in the list of matching files and try - /// to restore tensors from that file. Only if some tensors or tensor slices are - /// not found in that first file, then the Op opens all the files. Setting - /// `preferred_shard` to match the value passed as the `shard` input - /// of a matching `Save` Op may speed up Restore. This attribute only affects - /// performance, not correctness. The default value -1 means files are processed in - /// order. - /// - /// See also `RestoreSlice`. - /// - /// - Parameters: - /// - file_pattern: Must have a single element. The pattern of the files from - /// which we read the tensor. - /// - tensor_name: Must have a single element. The name of the tensor to be - /// restored. - /// - /// - Attrs: - /// - dt: The type of the tensor to be restored. - /// - preferred_shard: Index of file to open first if multiple files match - /// `file_pattern`. - /// - /// - Output tensor: The restored tensor. - @inlinable @inline(__always) - public static func restore( - filePattern: StringTensor, - tensorName: StringTensor, - preferredShard: Int64 = -1 - ) -> Tensor
{ - _RawTFEager.restore( - filePattern: filePattern, tensorName: tensorName, preferredShard: preferredShard) - } - - /// Restores a tensor from checkpoint files. - /// - /// This is like `Restore` except that restored tensor can be listed as filling - /// only a slice of a larger tensor. `shape_and_slice` specifies the shape of the - /// larger tensor and the slice that the restored tensor covers. - /// - /// The `shape_and_slice` input has the same format as the - /// elements of the `shapes_and_slices` input of the `SaveSlices` op. - /// - /// - Parameters: - /// - file_pattern: Must have a single element. The pattern of the files from - /// which we read the tensor. - /// - tensor_name: Must have a single element. The name of the tensor to be - /// restored. - /// - shape_and_slice: Scalar. The shapes and slice specifications to use when - /// restoring a tensors. - /// - /// - Attrs: - /// - dt: The type of the tensor to be restored. - /// - preferred_shard: Index of file to open first if multiple files match - /// `file_pattern`. See the documentation for `Restore`. - /// - /// - Output tensor: The restored tensor. - @inlinable @inline(__always) - public static func restoreSlice( - filePattern: StringTensor, - tensorName: StringTensor, - shapeAndSlice: StringTensor, - preferredShard: Int64 = -1 - ) -> Tensor
{ - _RawTFEager.restoreSlice( - filePattern: filePattern, tensorName: tensorName, shapeAndSlice: shapeAndSlice, - preferredShard: preferredShard) - } - - /// Restores tensors from a V2 checkpoint. - /// - /// For backward compatibility with the V1 format, this Op currently allows - /// restoring from a V1 checkpoint as well: - /// - This Op first attempts to find the V2 index file pointed to by "prefix", and - /// if found proceed to read it as a V2 checkpoint; - /// - Otherwise the V1 read path is invoked. - /// Relying on this behavior is not recommended, as the ability to fall back to read - /// V1 might be deprecated and eventually removed. - /// - /// By default, restores the named tensors in full. If the caller wishes to restore - /// specific slices of stored tensors, "shape_and_slices" should be non-empty - /// strings and correspondingly well-formed. - /// - /// Callers must ensure all the named tensors are indeed stored in the checkpoint. - /// - /// - Parameters: - /// - prefix: Must have a single element. The prefix of a V2 checkpoint. - /// - tensor_names: shape {N}. The names of the tensors to be restored. - /// - shape_and_slices: shape {N}. The slice specs of the tensors to be restored. - /// Empty strings indicate that they are non-partitioned tensors. - /// - /// - Attr dtypes: shape {N}. The list of expected dtype for the tensors. Must match - /// those stored in the checkpoint. - /// - /// - Output tensors: shape {N}. The restored tensors, whose shapes are read from the - /// checkpoint directly. - @inlinable @inline(__always) - public static func restoreV2( - prefix: StringTensor, - tensorNames: StringTensor, - shapeAndSlices: StringTensor - ) -> Dtypes { - _RawTFEager.restoreV2( - prefix: prefix, tensorNames: tensorNames, shapeAndSlices: shapeAndSlices) - } - - @inlinable @inline(__always) - public static func restrict( - _ a: Tensor - ) -> Tensor { - switch a.handle.backend { - case .XLA: - let output_device = a.device - let a = Tensor(copying: a, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.restrict(a), to: output_device) - case .TF_EAGER: - return _RawTFEager.restrict(a) - } - - } - - @inlinable @inline(__always) - public static func restrict( - _ a: StringTensor - ) -> StringTensor { - _RawTFEager.restrict(a) - } - - /// Retrieve ADAM embedding parameters. - /// - /// An op that retrieves optimization parameters from embedding to host - /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up - /// the correct embedding table configuration. For example, this op is - /// used to retrieve updated parameters before saving a checkpoint. - /// - /// - Outputs: - /// - parameters: Parameter parameters updated by the ADAM optimization algorithm. - /// - momenta: Parameter momenta updated by the ADAM optimization algorithm. - /// - velocities: Parameter velocities updated by the ADAM optimization algorithm. - @inlinable @inline(__always) - public static func retrieveTPUEmbeddingADAMParameters( - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) -> (parameters: Tensor, momenta: Tensor, velocities: Tensor) { - _RawTFEager.retrieveTPUEmbeddingADAMParameters( - tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, - config: config) - } - - /// Retrieve ADAM embedding parameters with debug support. - /// - /// An op that retrieves optimization parameters from embedding to host - /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up - /// the correct embedding table configuration. For example, this op is - /// used to retrieve updated parameters before saving a checkpoint. - /// - /// - Outputs: - /// - parameters: Parameter parameters updated by the ADAM optimization algorithm. - /// - momenta: Parameter momenta updated by the ADAM optimization algorithm. - /// - velocities: Parameter velocities updated by the ADAM optimization algorithm. - /// - gradient_accumulators: Parameter gradient_accumulators updated by the ADAM optimization algorithm. - @inlinable @inline(__always) - public static func retrieveTPUEmbeddingADAMParametersGradAccumDebug( - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) -> ( - parameters: Tensor, momenta: Tensor, velocities: Tensor, - gradientAccumulators: Tensor - ) { - _RawTFEager.retrieveTPUEmbeddingADAMParametersGradAccumDebug( - tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, - config: config) - } - - /// Retrieve Adadelta embedding parameters. - /// - /// An op that retrieves optimization parameters from embedding to host - /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up - /// the correct embedding table configuration. For example, this op is - /// used to retrieve updated parameters before saving a checkpoint. - /// - /// - Outputs: - /// - parameters: Parameter parameters updated by the Adadelta optimization algorithm. - /// - accumulators: Parameter accumulators updated by the Adadelta optimization algorithm. - /// - updates: Parameter updates updated by the Adadelta optimization algorithm. - @inlinable @inline(__always) - public static func retrieveTPUEmbeddingAdadeltaParameters( - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) -> (parameters: Tensor, accumulators: Tensor, updates: Tensor) { - _RawTFEager.retrieveTPUEmbeddingAdadeltaParameters( - tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, - config: config) - } - - /// Retrieve Adadelta embedding parameters with debug support. - /// - /// An op that retrieves optimization parameters from embedding to host - /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up - /// the correct embedding table configuration. For example, this op is - /// used to retrieve updated parameters before saving a checkpoint. - /// - /// - Outputs: - /// - parameters: Parameter parameters updated by the Adadelta optimization algorithm. - /// - accumulators: Parameter accumulators updated by the Adadelta optimization algorithm. - /// - updates: Parameter updates updated by the Adadelta optimization algorithm. - /// - gradient_accumulators: Parameter gradient_accumulators updated by the Adadelta optimization algorithm. - @inlinable @inline(__always) - public static func retrieveTPUEmbeddingAdadeltaParametersGradAccumDebug( - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) -> ( - parameters: Tensor, accumulators: Tensor, updates: Tensor, - gradientAccumulators: Tensor - ) { - _RawTFEager.retrieveTPUEmbeddingAdadeltaParametersGradAccumDebug( - tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, - config: config) - } - - /// Retrieve Adagrad embedding parameters. - /// - /// An op that retrieves optimization parameters from embedding to host - /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up - /// the correct embedding table configuration. For example, this op is - /// used to retrieve updated parameters before saving a checkpoint. - /// - /// - Outputs: - /// - parameters: Parameter parameters updated by the Adagrad optimization algorithm. - /// - accumulators: Parameter accumulators updated by the Adagrad optimization algorithm. - @inlinable @inline(__always) - public static func retrieveTPUEmbeddingAdagradParameters( - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) -> (parameters: Tensor, accumulators: Tensor) { - _RawTFEager.retrieveTPUEmbeddingAdagradParameters( - tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, - config: config) - } - - /// Retrieve Adagrad embedding parameters with debug support. - /// - /// An op that retrieves optimization parameters from embedding to host - /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up - /// the correct embedding table configuration. For example, this op is - /// used to retrieve updated parameters before saving a checkpoint. - /// - /// - Outputs: - /// - parameters: Parameter parameters updated by the Adagrad optimization algorithm. - /// - accumulators: Parameter accumulators updated by the Adagrad optimization algorithm. - /// - gradient_accumulators: Parameter gradient_accumulators updated by the Adagrad optimization algorithm. - @inlinable @inline(__always) - public static func retrieveTPUEmbeddingAdagradParametersGradAccumDebug( - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) -> ( - parameters: Tensor, accumulators: Tensor, gradientAccumulators: Tensor - ) { - _RawTFEager.retrieveTPUEmbeddingAdagradParametersGradAccumDebug( - tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, - config: config) - } - - /// Retrieve centered RMSProp embedding parameters. - /// - /// An op that retrieves optimization parameters from embedding to host - /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up - /// the correct embedding table configuration. For example, this op is - /// used to retrieve updated parameters before saving a checkpoint. - /// - /// - Outputs: - /// - parameters: Parameter parameters updated by the centered RMSProp optimization algorithm. - /// - ms: Parameter ms updated by the centered RMSProp optimization algorithm. - /// - mom: Parameter mom updated by the centered RMSProp optimization algorithm. - /// - mg: Parameter mg updated by the centered RMSProp optimization algorithm. - @inlinable @inline(__always) - public static func retrieveTPUEmbeddingCenteredRMSPropParameters( - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) -> (parameters: Tensor, ms: Tensor, mom: Tensor, mg: Tensor) { - _RawTFEager.retrieveTPUEmbeddingCenteredRMSPropParameters( - tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, - config: config) - } - - /// Retrieve FTRL embedding parameters. - /// - /// An op that retrieves optimization parameters from embedding to host - /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up - /// the correct embedding table configuration. For example, this op is - /// used to retrieve updated parameters before saving a checkpoint. - /// - /// - Outputs: - /// - parameters: Parameter parameters updated by the FTRL optimization algorithm. - /// - accumulators: Parameter accumulators updated by the FTRL optimization algorithm. - /// - linears: Parameter linears updated by the FTRL optimization algorithm. - @inlinable @inline(__always) - public static func retrieveTPUEmbeddingFTRLParameters( - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) -> (parameters: Tensor, accumulators: Tensor, linears: Tensor) { - _RawTFEager.retrieveTPUEmbeddingFTRLParameters( - tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, - config: config) - } - - /// Retrieve FTRL embedding parameters with debug support. - /// - /// An op that retrieves optimization parameters from embedding to host - /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up - /// the correct embedding table configuration. For example, this op is - /// used to retrieve updated parameters before saving a checkpoint. - /// - /// - Outputs: - /// - parameters: Parameter parameters updated by the FTRL optimization algorithm. - /// - accumulators: Parameter accumulators updated by the FTRL optimization algorithm. - /// - linears: Parameter linears updated by the FTRL optimization algorithm. - /// - gradient_accumulators: Parameter gradient_accumulators updated by the FTRL optimization algorithm. - @inlinable @inline(__always) - public static func retrieveTPUEmbeddingFTRLParametersGradAccumDebug( - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) -> ( - parameters: Tensor, accumulators: Tensor, linears: Tensor, - gradientAccumulators: Tensor - ) { - _RawTFEager.retrieveTPUEmbeddingFTRLParametersGradAccumDebug( - tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, - config: config) - } - - /// Retrieve MDL Adagrad Light embedding parameters. - /// - /// An op that retrieves optimization parameters from embedding to host - /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up - /// the correct embedding table configuration. For example, this op is - /// used to retrieve updated parameters before saving a checkpoint. - /// - /// - Outputs: - /// - parameters: Parameter parameters updated by the MDL Adagrad Light optimization algorithm. - /// - accumulators: Parameter accumulators updated by the MDL Adagrad Light optimization algorithm. - /// - weights: Parameter weights updated by the MDL Adagrad Light optimization algorithm. - /// - benefits: Parameter benefits updated by the MDL Adagrad Light optimization algorithm. - @inlinable @inline(__always) - public static func retrieveTPUEmbeddingMDLAdagradLightParameters( - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) -> ( - parameters: Tensor, accumulators: Tensor, weights: Tensor, - benefits: Tensor - ) { - _RawTFEager.retrieveTPUEmbeddingMDLAdagradLightParameters( - tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, - config: config) - } - - /// Retrieve Momentum embedding parameters. - /// - /// An op that retrieves optimization parameters from embedding to host - /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up - /// the correct embedding table configuration. For example, this op is - /// used to retrieve updated parameters before saving a checkpoint. - /// - /// - Outputs: - /// - parameters: Parameter parameters updated by the Momentum optimization algorithm. - /// - momenta: Parameter momenta updated by the Momentum optimization algorithm. - @inlinable @inline(__always) - public static func retrieveTPUEmbeddingMomentumParameters( - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) -> (parameters: Tensor, momenta: Tensor) { - _RawTFEager.retrieveTPUEmbeddingMomentumParameters( - tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, - config: config) - } - - /// Retrieve Momentum embedding parameters with debug support. - /// - /// An op that retrieves optimization parameters from embedding to host - /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up - /// the correct embedding table configuration. For example, this op is - /// used to retrieve updated parameters before saving a checkpoint. - /// - /// - Outputs: - /// - parameters: Parameter parameters updated by the Momentum optimization algorithm. - /// - momenta: Parameter momenta updated by the Momentum optimization algorithm. - /// - gradient_accumulators: Parameter gradient_accumulators updated by the Momentum optimization algorithm. - @inlinable @inline(__always) - public static func retrieveTPUEmbeddingMomentumParametersGradAccumDebug( - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) -> (parameters: Tensor, momenta: Tensor, gradientAccumulators: Tensor) { - _RawTFEager.retrieveTPUEmbeddingMomentumParametersGradAccumDebug( - tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, - config: config) - } - - /// Retrieve proximal Adagrad embedding parameters. - /// - /// An op that retrieves optimization parameters from embedding to host - /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up - /// the correct embedding table configuration. For example, this op is - /// used to retrieve updated parameters before saving a checkpoint. - /// - /// - Outputs: - /// - parameters: Parameter parameters updated by the proximal Adagrad optimization algorithm. - /// - accumulators: Parameter accumulators updated by the proximal Adagrad optimization algorithm. - @inlinable @inline(__always) - public static func retrieveTPUEmbeddingProximalAdagradParameters( - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) -> (parameters: Tensor, accumulators: Tensor) { - _RawTFEager.retrieveTPUEmbeddingProximalAdagradParameters( - tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, - config: config) - } - - /// Retrieve proximal Adagrad embedding parameters with debug support. - /// - /// An op that retrieves optimization parameters from embedding to host - /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up - /// the correct embedding table configuration. For example, this op is - /// used to retrieve updated parameters before saving a checkpoint. - /// - /// - Outputs: - /// - parameters: Parameter parameters updated by the proximal Adagrad optimization algorithm. - /// - accumulators: Parameter accumulators updated by the proximal Adagrad optimization algorithm. - /// - gradient_accumulators: Parameter gradient_accumulators updated by the proximal Adagrad optimization algorithm. - @inlinable @inline(__always) - public static func retrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug( - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) -> ( - parameters: Tensor, accumulators: Tensor, gradientAccumulators: Tensor - ) { - _RawTFEager.retrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug( - tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, - config: config) - } - - /// Retrieve RMSProp embedding parameters. - /// - /// An op that retrieves optimization parameters from embedding to host - /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up - /// the correct embedding table configuration. For example, this op is - /// used to retrieve updated parameters before saving a checkpoint. - /// - /// - Outputs: - /// - parameters: Parameter parameters updated by the RMSProp optimization algorithm. - /// - ms: Parameter ms updated by the RMSProp optimization algorithm. - /// - mom: Parameter mom updated by the RMSProp optimization algorithm. - @inlinable @inline(__always) - public static func retrieveTPUEmbeddingRMSPropParameters( - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) -> (parameters: Tensor, ms: Tensor, mom: Tensor) { - _RawTFEager.retrieveTPUEmbeddingRMSPropParameters( - tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, - config: config) - } - - /// Retrieve RMSProp embedding parameters with debug support. - /// - /// An op that retrieves optimization parameters from embedding to host - /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up - /// the correct embedding table configuration. For example, this op is - /// used to retrieve updated parameters before saving a checkpoint. - /// - /// - Outputs: - /// - parameters: Parameter parameters updated by the RMSProp optimization algorithm. - /// - ms: Parameter ms updated by the RMSProp optimization algorithm. - /// - mom: Parameter mom updated by the RMSProp optimization algorithm. - /// - gradient_accumulators: Parameter gradient_accumulators updated by the RMSProp optimization algorithm. - @inlinable @inline(__always) - public static func retrieveTPUEmbeddingRMSPropParametersGradAccumDebug( - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) -> ( - parameters: Tensor, ms: Tensor, mom: Tensor, - gradientAccumulators: Tensor - ) { - _RawTFEager.retrieveTPUEmbeddingRMSPropParametersGradAccumDebug( - tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, - config: config) - } - - /// Retrieve SGD embedding parameters. - /// - /// An op that retrieves optimization parameters from embedding to host - /// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up - /// the correct embedding table configuration. For example, this op is - /// used to retrieve updated parameters before saving a checkpoint. - /// - /// - Output parameters: Parameter parameters updated by the stochastic gradient descent optimization algorithm. - @inlinable @inline(__always) - public static func retrieveTPUEmbeddingStochasticGradientDescentParameters( - tableId: Int64 = -1, - tableName: String, - numShards: Int64, - shardId: Int64, - config: String - ) -> Tensor { - _RawTFEager.retrieveTPUEmbeddingStochasticGradientDescentParameters( - tableId: tableId, tableName: tableName, numShards: numShards, shardId: shardId, - config: config) - } - - /// Reverses specific dimensions of a tensor. - /// - /// Given a `tensor`, and a `bool` tensor `dims` representing the dimensions - /// of `tensor`, this operation reverses each dimension i of `tensor` where - /// `dims[i]` is `True`. - /// - /// `tensor` can have up to 8 dimensions. The number of dimensions - /// of `tensor` must equal the number of elements in `dims`. In other words: - /// - /// `rank(tensor) = size(dims)` - /// - /// For example: - /// - /// ``` - /// # tensor 't' is [[[[ 0, 1, 2, 3], - /// # [ 4, 5, 6, 7], - /// # [ 8, 9, 10, 11]], - /// # [[12, 13, 14, 15], - /// # [16, 17, 18, 19], - /// # [20, 21, 22, 23]]]] - /// # tensor 't' shape is [1, 2, 3, 4] - /// - /// # 'dims' is [False, False, False, True] - /// reverse(t, dims) ==> [[[[ 3, 2, 1, 0], - /// [ 7, 6, 5, 4], - /// [ 11, 10, 9, 8]], - /// [[15, 14, 13, 12], - /// [19, 18, 17, 16], - /// [23, 22, 21, 20]]]] - /// - /// # 'dims' is [False, True, False, False] - /// reverse(t, dims) ==> [[[[12, 13, 14, 15], - /// [16, 17, 18, 19], - /// [20, 21, 22, 23] - /// [[ 0, 1, 2, 3], - /// [ 4, 5, 6, 7], - /// [ 8, 9, 10, 11]]]] - /// - /// # 'dims' is [False, False, True, False] - /// reverse(t, dims) ==> [[[[8, 9, 10, 11], - /// [4, 5, 6, 7], - /// [0, 1, 2, 3]] - /// [[20, 21, 22, 23], - /// [16, 17, 18, 19], - /// [12, 13, 14, 15]]]] - /// ``` - /// - /// - Parameters: - /// - tensor: Up to 8-D. - /// - dims: 1-D. The dimensions to reverse. - /// - /// - Output output: The same shape as `tensor`. - @inlinable @inline(__always) - public static func reverse( - _ tensor: Tensor, - dims: Tensor - ) -> Tensor { - switch commonBackend(tensor.handle.backend, dims.handle.backend) { - case .XLA: - let output_device = dims.device - let tensor = Tensor(copying: tensor, to: .defaultTFEager) - let dims = Tensor(copying: dims, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.reverse(tensor, dims: dims), to: output_device) - case .TF_EAGER: - return _RawTFEager.reverse(tensor, dims: dims) - } - - } - - /// Reverses specific dimensions of a tensor. - /// - /// Given a `tensor`, and a `bool` tensor `dims` representing the dimensions - /// of `tensor`, this operation reverses each dimension i of `tensor` where - /// `dims[i]` is `True`. - /// - /// `tensor` can have up to 8 dimensions. The number of dimensions - /// of `tensor` must equal the number of elements in `dims`. In other words: - /// - /// `rank(tensor) = size(dims)` - /// - /// For example: - /// - /// ``` - /// # tensor 't' is [[[[ 0, 1, 2, 3], - /// # [ 4, 5, 6, 7], - /// # [ 8, 9, 10, 11]], - /// # [[12, 13, 14, 15], - /// # [16, 17, 18, 19], - /// # [20, 21, 22, 23]]]] - /// # tensor 't' shape is [1, 2, 3, 4] - /// - /// # 'dims' is [False, False, False, True] - /// reverse(t, dims) ==> [[[[ 3, 2, 1, 0], - /// [ 7, 6, 5, 4], - /// [ 11, 10, 9, 8]], - /// [[15, 14, 13, 12], - /// [19, 18, 17, 16], - /// [23, 22, 21, 20]]]] - /// - /// # 'dims' is [False, True, False, False] - /// reverse(t, dims) ==> [[[[12, 13, 14, 15], - /// [16, 17, 18, 19], - /// [20, 21, 22, 23] - /// [[ 0, 1, 2, 3], - /// [ 4, 5, 6, 7], - /// [ 8, 9, 10, 11]]]] - /// - /// # 'dims' is [False, False, True, False] - /// reverse(t, dims) ==> [[[[8, 9, 10, 11], - /// [4, 5, 6, 7], - /// [0, 1, 2, 3]] - /// [[20, 21, 22, 23], - /// [16, 17, 18, 19], - /// [12, 13, 14, 15]]]] - /// ``` - /// - /// - Parameters: - /// - tensor: Up to 8-D. - /// - dims: 1-D. The dimensions to reverse. - /// - /// - Output output: The same shape as `tensor`. - @inlinable @inline(__always) - public static func reverse( - _ tensor: StringTensor, - dims: Tensor - ) -> StringTensor { - _RawTFEager.reverse(tensor, dims: dims) - } - - /// Reverses variable length slices. - /// - /// This op first slices `input` along the dimension `batch_dim`, and for each - /// slice `i`, reverses the first `seq_lengths[i]` elements along - /// the dimension `seq_dim`. - /// - /// The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`, - /// and `seq_lengths` must be a vector of length `input.dims[batch_dim]`. - /// - /// The output slice `i` along dimension `batch_dim` is then given by input - /// slice `i`, with the first `seq_lengths[i]` slices along dimension - /// `seq_dim` reversed. - /// - /// For example: - /// - /// ``` - /// # Given this: - /// batch_dim = 0 - /// seq_dim = 1 - /// input.dims = (4, 8, ...) - /// seq_lengths = [7, 2, 3, 5] - /// - /// # then slices of input are reversed on seq_dim, but only up to seq_lengths: - /// output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...] - /// output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...] - /// output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...] - /// output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...] - /// - /// # while entries past seq_lens are copied through: - /// output[0, 7:, :, ...] = input[0, 7:, :, ...] - /// output[1, 2:, :, ...] = input[1, 2:, :, ...] - /// output[2, 3:, :, ...] = input[2, 3:, :, ...] - /// output[3, 2:, :, ...] = input[3, 2:, :, ...] - /// ``` - /// - /// In contrast, if: - /// - /// ``` - /// # Given this: - /// batch_dim = 2 - /// seq_dim = 0 - /// input.dims = (8, ?, 4, ...) - /// seq_lengths = [7, 2, 3, 5] - /// - /// # then slices of input are reversed on seq_dim, but only up to seq_lengths: - /// output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...] - /// output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...] - /// output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...] - /// output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...] - /// - /// # while entries past seq_lens are copied through: - /// output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...] - /// output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...] - /// output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...] - /// output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] - /// ``` - /// - /// - Parameters: - /// - input: The input to reverse. - /// - seq_lengths: 1-D with length `input.dims(batch_dim)` and - /// `max(seq_lengths) <= input.dims(seq_dim)` - /// - /// - Attrs: - /// - seq_dim: The dimension which is partially reversed. - /// - batch_dim: The dimension along which reversal is performed. - /// - /// - Output output: The partially reversed input. It has the same shape as `input`. - @inlinable @inline(__always) - public static func reverseSequence< - T: TensorFlowScalar, - Tlen: TensorFlowIndex - >( - _ input: Tensor, - seqLengths: Tensor, - seqDim: Int64, - batchDim: Int64 = 0 - ) -> Tensor { - switch commonBackend(input.handle.backend, seqLengths.handle.backend) { - case .XLA: - let output_device = seqLengths.device - let input = Tensor(copying: input, to: .defaultTFEager) - let seqLengths = Tensor(copying: seqLengths, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.reverseSequence( - input, seqLengths: seqLengths, seqDim: seqDim, batchDim: batchDim), to: output_device) - case .TF_EAGER: - return _RawTFEager.reverseSequence( - input, seqLengths: seqLengths, seqDim: seqDim, batchDim: batchDim) - } - - } - - /// Reverses specific dimensions of a tensor. - /// - /// NOTE `tf.reverse` has now changed behavior in preparation for 1.0. - /// `tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0. - /// - /// Given a `tensor`, and a `int32` tensor `axis` representing the set of - /// dimensions of `tensor` to reverse. This operation reverses each dimension - /// `i` for which there exists `j` s.t. `axis[j] == i`. - /// - /// `tensor` can have up to 8 dimensions. The number of dimensions specified - /// in `axis` may be 0 or more entries. If an index is specified more than - /// once, a InvalidArgument error is raised. - /// - /// For example: - /// - /// ``` - /// # tensor 't' is [[[[ 0, 1, 2, 3], - /// # [ 4, 5, 6, 7], - /// # [ 8, 9, 10, 11]], - /// # [[12, 13, 14, 15], - /// # [16, 17, 18, 19], - /// # [20, 21, 22, 23]]]] - /// # tensor 't' shape is [1, 2, 3, 4] - /// - /// # 'dims' is [3] or 'dims' is [-1] - /// reverse(t, dims) ==> [[[[ 3, 2, 1, 0], - /// [ 7, 6, 5, 4], - /// [ 11, 10, 9, 8]], - /// [[15, 14, 13, 12], - /// [19, 18, 17, 16], - /// [23, 22, 21, 20]]]] - /// - /// # 'dims' is '[1]' (or 'dims' is '[-3]') - /// reverse(t, dims) ==> [[[[12, 13, 14, 15], - /// [16, 17, 18, 19], - /// [20, 21, 22, 23] - /// [[ 0, 1, 2, 3], - /// [ 4, 5, 6, 7], - /// [ 8, 9, 10, 11]]]] - /// - /// # 'dims' is '[2]' (or 'dims' is '[-2]') - /// reverse(t, dims) ==> [[[[8, 9, 10, 11], - /// [4, 5, 6, 7], - /// [0, 1, 2, 3]] - /// [[20, 21, 22, 23], - /// [16, 17, 18, 19], - /// [12, 13, 14, 15]]]] - /// ``` - /// - /// - Parameters: - /// - tensor: Up to 8-D. - /// - axis: 1-D. The indices of the dimensions to reverse. Must be in the range - /// `[-rank(tensor), rank(tensor))`. - /// - /// - Output output: The same shape as `tensor`. - @inlinable @inline(__always) - public static func reverseV2< - Tidx: TensorFlowIndex, - T: TensorFlowScalar - >( - _ tensor: Tensor, - axis: Tensor - ) -> Tensor { - switch commonBackend(tensor.handle.backend, axis.handle.backend) { - case .XLA: - return _RawXLA.reverseV2(tensor, axis: axis) - case .TF_EAGER: - return _RawTFEager.reverseV2(tensor, axis: axis) - } - - } - - /// Reverses specific dimensions of a tensor. - /// - /// NOTE `tf.reverse` has now changed behavior in preparation for 1.0. - /// `tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0. - /// - /// Given a `tensor`, and a `int32` tensor `axis` representing the set of - /// dimensions of `tensor` to reverse. This operation reverses each dimension - /// `i` for which there exists `j` s.t. `axis[j] == i`. - /// - /// `tensor` can have up to 8 dimensions. The number of dimensions specified - /// in `axis` may be 0 or more entries. If an index is specified more than - /// once, a InvalidArgument error is raised. - /// - /// For example: - /// - /// ``` - /// # tensor 't' is [[[[ 0, 1, 2, 3], - /// # [ 4, 5, 6, 7], - /// # [ 8, 9, 10, 11]], - /// # [[12, 13, 14, 15], - /// # [16, 17, 18, 19], - /// # [20, 21, 22, 23]]]] - /// # tensor 't' shape is [1, 2, 3, 4] - /// - /// # 'dims' is [3] or 'dims' is [-1] - /// reverse(t, dims) ==> [[[[ 3, 2, 1, 0], - /// [ 7, 6, 5, 4], - /// [ 11, 10, 9, 8]], - /// [[15, 14, 13, 12], - /// [19, 18, 17, 16], - /// [23, 22, 21, 20]]]] - /// - /// # 'dims' is '[1]' (or 'dims' is '[-3]') - /// reverse(t, dims) ==> [[[[12, 13, 14, 15], - /// [16, 17, 18, 19], - /// [20, 21, 22, 23] - /// [[ 0, 1, 2, 3], - /// [ 4, 5, 6, 7], - /// [ 8, 9, 10, 11]]]] - /// - /// # 'dims' is '[2]' (or 'dims' is '[-2]') - /// reverse(t, dims) ==> [[[[8, 9, 10, 11], - /// [4, 5, 6, 7], - /// [0, 1, 2, 3]] - /// [[20, 21, 22, 23], - /// [16, 17, 18, 19], - /// [12, 13, 14, 15]]]] - /// ``` - /// - /// - Parameters: - /// - tensor: Up to 8-D. - /// - axis: 1-D. The indices of the dimensions to reverse. Must be in the range - /// `[-rank(tensor), rank(tensor))`. - /// - /// - Output output: The same shape as `tensor`. - @inlinable @inline(__always) - public static func reverseV2( - _ tensor: StringTensor, - axis: Tensor - ) -> StringTensor { - _RawTFEager.reverseV2(tensor, axis: axis) - } - - /// Elementwise computes the bitwise right-shift of `x` and `y`. - /// - /// Performs a logical shift for unsigned integer types, and an arithmetic shift - /// for signed integer types. - /// - /// If `y` is negative, or greater than or equal to than the width of `x` in bits - /// the result is implementation defined. - /// - /// Example: - /// - /// ```python - /// import tensorflow as tf - /// from tensorflow.python.ops import bitwise_ops - /// import numpy as np - /// dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64] - /// - /// for dtype in dtype_list: - /// lhs = tf.constant([-1, -5, -3, -14], dtype=dtype) - /// rhs = tf.constant([5, 0, 7, 11], dtype=dtype) - /// - /// right_shift_result = bitwise_ops.right_shift(lhs, rhs) - /// - /// print(right_shift_result) - /// - /// # This will print: - /// # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int8) - /// # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int16) - /// # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int32) - /// # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int64) - /// - /// lhs = np.array([-2, 64, 101, 32], dtype=np.int8) - /// rhs = np.array([-1, -5, -3, -14], dtype=np.int8) - /// bitwise_ops.right_shift(lhs, rhs) - /// # - /// ``` - /// - @inlinable @inline(__always) - public static func rightShift( - _ x: Tensor, - _ y: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - let output_device = y.device - let x = Tensor(copying: x, to: .defaultTFEager) - let y = Tensor(copying: y, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.rightShift(x, y), to: output_device) - case .TF_EAGER: - return _RawTFEager.rightShift(x, y) - } - - } - - /// Returns element-wise integer closest to x. - /// - /// If the result is midway between two representable values, - /// the even representable is chosen. - /// For example: - /// - /// ``` - /// rint(-1.5) ==> -2.0 - /// rint(0.5000001) ==> 1.0 - /// rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.] - /// ``` - @inlinable @inline(__always) - public static func rint( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - let output_device = x.device - let x = Tensor(copying: x, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.rint(x), to: output_device) - case .TF_EAGER: - return _RawTFEager.rint(x) - } - - } - - /// Advance the counter of a counter-based RNG. - /// - /// The state of the RNG after - /// `rng_skip(n)` will be the same as that after `stateful_uniform([n])` - /// (or any other distribution). The actual increment added to the - /// counter is an unspecified implementation detail. - /// - /// - Parameters: - /// - resource: The handle of the resource variable that stores the state of the RNG. - /// - algorithm: The RNG algorithm. - /// - delta: The amount of advancement. - @inlinable @inline(__always) - public static func rngSkip( - resource: ResourceHandle, - algorithm: Tensor, - delta: Tensor - ) { - _RawTFEager.rngSkip(resource: resource, algorithm: algorithm, delta: delta) - } - - /// Rolls the elements of a tensor along an axis. - /// - /// The elements are shifted positively (towards larger indices) by the offset of - /// `shift` along the dimension of `axis`. Negative `shift` values will shift - /// elements in the opposite direction. Elements that roll passed the last position - /// will wrap around to the first and vice versa. Multiple shifts along multiple - /// axes may be specified. - /// - /// For example: - /// - /// ``` - /// # 't' is [0, 1, 2, 3, 4] - /// roll(t, shift=2, axis=0) ==> [3, 4, 0, 1, 2] - /// - /// # shifting along multiple dimensions - /// # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] - /// roll(t, shift=[1, -2], axis=[0, 1]) ==> [[7, 8, 9, 5, 6], [2, 3, 4, 0, 1]] - /// - /// # shifting along the same axis multiple times - /// # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] - /// roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]] - /// ``` - /// - /// - Parameters: - /// - shift: Dimension must be 0-D or 1-D. `shift[i]` specifies the number of places by which - /// elements are shifted positively (towards larger indices) along the dimension - /// specified by `axis[i]`. Negative shifts will roll the elements in the opposite - /// direction. - /// - axis: Dimension must be 0-D or 1-D. `axis[i]` specifies the dimension that the shift - /// `shift[i]` should occur. If the same axis is referenced more than once, the - /// total shift for that axis will be the sum of all the shifts that belong to that - /// axis. - /// - /// - Output output: Has the same shape and size as the input. The elements are shifted - /// positively (towards larger indices) by the offsets of `shift` along the - /// dimensions of `axis`. - @inlinable @inline(__always) - public static func roll< - T: TensorFlowScalar, - Tshift: TensorFlowIndex, - Taxis: TensorFlowIndex - >( - _ input: Tensor, - shift: Tensor, - axis: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(input.handle.backend, shift.handle.backend), axis.handle.backend) - { - case .XLA: - let output_device = axis.device - let input = Tensor(copying: input, to: .defaultTFEager) - let shift = Tensor(copying: shift, to: .defaultTFEager) - let axis = Tensor(copying: axis, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.roll(input, shift: shift, axis: axis), to: output_device) - case .TF_EAGER: - return _RawTFEager.roll(input, shift: shift, axis: axis) - } - - } - - /// Rounds the values of a tensor to the nearest integer, element-wise. - /// - /// Rounds half to even. Also known as bankers rounding. If you want to round - /// according to the current system rounding mode use std::cint. - @inlinable @inline(__always) - public static func round( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.round(x) - case .TF_EAGER: - return _RawTFEager.round(x) - } - - } - - /// Perform batches of RPC requests. - /// - /// This op asynchronously performs either a single RPC request, or a batch - /// of requests. RPC requests are defined by three main parameters: - /// - /// - `address` (the host+port or BNS address of the request) - /// - `method` (the RPC method name for the request) - /// - `request` (the serialized proto string, or vector of strings, - /// of the RPC request argument). - /// - /// For example, if you have an RPC service running on port localhost:2345, - /// and its interface is configured with the following proto declaration: - /// - /// ``` - /// service MyService { - /// rpc MyMethod(MyRequestProto) returns (MyResponseProto) { - /// } - /// }; - /// ``` - /// - /// then call this op with arguments: - /// - /// ``` - /// address = "localhost:2345" - /// method = "MyService/MyMethod" - /// ``` - /// - /// The `request` tensor is a string tensor representing serialized `MyRequestProto` - /// strings; and the output string tensor `response` will have the same shape - /// and contain (upon successful completion) corresponding serialized - /// `MyResponseProto` strings. - /// - /// For example, to send a single, empty, `MyRequestProto`, call - /// this op with `request = ""`. To send 5 **parallel** empty requests, - /// call this op with `request = ["", "", "", "", ""]`. - /// - /// More generally, one can create a batch of `MyRequestProto` serialized protos - /// from regular batched tensors using the `encode_proto` op, and convert - /// the response `MyResponseProto` serialized protos to batched tensors - /// using the `decode_proto` op. - /// - /// **NOTE** Working with serialized proto strings is faster than instantiating - /// actual proto objects in memory, so no performance degradation is expected - /// compared to writing custom kernels for this workflow. - /// - /// If the connection fails or the remote worker returns an error - /// status, the op reraises this exception locally. - /// - /// See the `TryRpc` op if you prefer to handle RPC failures manually in the graph. - /// - /// - Parameters: - /// - address: `0-D` or `1-D`. The address (i.e. host_name:port) of the RPC server. - /// If this tensor has more than 1 element, then multiple parallel rpc requests - /// are sent. This argument broadcasts with `method` and `request`. - /// - method: `0-D` or `1-D`. The method address on the RPC server. - /// If this tensor has more than 1 element, then multiple parallel rpc requests - /// are sent. This argument broadcasts with `address` and `request`. - /// - request: `0-D` or `1-D`. Serialized proto strings: the rpc request argument. - /// If this tensor has more than 1 element, then multiple parallel rpc requests - /// are sent. This argument broadcasts with `address` and `method`. - /// - /// - Attrs: - /// - protocol: RPC protocol to use. Empty string means use the default protocol. - /// Options include 'grpc'. - /// - fail_fast: `boolean`. If `true` (default), then failures to connect - /// (i.e., the server does not immediately respond) cause an RPC failure. - /// - timeout_in_ms: `int`. If `0` (default), then the kernel will run the RPC - /// request and only time out if the RPC deadline passes or the session times out. - /// If this value is greater than `0`, then the op will raise an exception if - /// the RPC takes longer than `timeout_in_ms`. - /// - /// - Output response: Same shape as `request`. Serialized proto strings: the rpc responses. - @inlinable @inline(__always) - public static func rpc( - address: StringTensor, - method: StringTensor, - request: StringTensor, - protocol_: String, - failFast: Bool = true, - timeoutInMs: Int64 = 0 - ) -> StringTensor { - _RawTFEager.rpc( - address: address, method: method, request: request, protocol_: protocol_, - failFast: failFast, timeoutInMs: timeoutInMs) - } - - /// Computes reciprocal of square root of x element-wise. - /// - /// I.e., \\(y = 1 / \sqrt{x}\\). - @inlinable @inline(__always) - public static func rsqrt( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.rsqrt(x) - case .TF_EAGER: - return _RawTFEager.rsqrt(x) - } - - } - - /// Computes the gradient for the rsqrt of `x` wrt its input. - /// - /// Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy` - /// is the corresponding input gradient. - @inlinable @inline(__always) - public static func rsqrtGrad( - _ y: Tensor, - dy: Tensor - ) -> Tensor { - switch commonBackend(y.handle.backend, dy.handle.backend) { - case .XLA: - return _RawXLA.rsqrtGrad(y, dy: dy) - case .TF_EAGER: - return _RawTFEager.rsqrtGrad(y, dy: dy) - } - - } - - /// Generate a single randomly distorted bounding box for an image. - /// - /// Bounding box annotations are often supplied in addition to ground-truth labels - /// in image recognition or object localization tasks. A common technique for - /// training such a system is to randomly distort an image while preserving - /// its content, i.e. *data augmentation*. This Op outputs a randomly distorted - /// localization of an object, i.e. bounding box, given an `image_size`, - /// `bounding_boxes` and a series of constraints. - /// - /// The output of this Op is a single bounding box that may be used to crop the - /// original image. The output is returned as 3 tensors: `begin`, `size` and - /// `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the - /// image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize - /// what the bounding box looks like. - /// - /// Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The - /// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and - /// height of the underlying image. - /// - /// For example, - /// - /// ```python - /// # Generate a single distorted bounding box. - /// begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box( - /// tf.shape(image), - /// bounding_boxes=bounding_boxes) - /// - /// # Draw the bounding box in an image summary. - /// image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), - /// bbox_for_draw) - /// tf.summary.image('images_with_box', image_with_box) - /// - /// # Employ the bounding box to distort the image. - /// distorted_image = tf.slice(image, begin, size) - /// ``` - /// - /// Note that if no bounding box information is available, setting - /// `use_image_if_no_bounding_boxes = true` will assume there is a single implicit - /// bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is - /// false and no bounding boxes are supplied, an error is raised. - /// - /// - Parameters: - /// - image_size: 1-D, containing `[height, width, channels]`. - /// - bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes - /// associated with the image. - /// - /// - Attrs: - /// - seed: If either `seed` or `seed2` are set to non-zero, the random number - /// generator is seeded by the given `seed`. Otherwise, it is seeded by a random - /// seed. - /// - seed2: A second seed to avoid seed collision. - /// - min_object_covered: The cropped area of the image must contain at least this - /// fraction of any bounding box supplied. The value of this parameter should be - /// non-negative. In the case of 0, the cropped area does not need to overlap - /// any of the bounding boxes supplied. - /// - aspect_ratio_range: The cropped area of the image must have an aspect ratio = - /// width / height within this range. - /// - area_range: The cropped area of the image must contain a fraction of the - /// supplied image within this range. - /// - max_attempts: Number of attempts at generating a cropped region of the image - /// of the specified constraints. After `max_attempts` failures, return the entire - /// image. - /// - use_image_if_no_bounding_boxes: Controls behavior if no bounding boxes supplied. - /// If true, assume an implicit bounding box covering the whole input. If false, - /// raise an error. - /// - /// - Outputs: - /// - begin: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to - /// `tf.slice`. - /// - size: 1-D, containing `[target_height, target_width, -1]`. Provide as input to - /// `tf.slice`. - /// - bboxes: 3-D with shape `[1, 1, 4]` containing the distorted bounding box. - /// Provide as input to `tf.image.draw_bounding_boxes`. - @inlinable @inline(__always) - public static func sampleDistortedBoundingBox( - imageSize: Tensor, - boundingBoxes: Tensor, - seed: Int64 = 0, - seed2: Int64 = 0, - minObjectCovered: Double = 0.1, - aspectRatioRange: [Double] = [0.75, 1.33], - areaRange: [Double] = [0.05, 1], - maxAttempts: Int64 = 100, - useImageIfNoBoundingBoxes: Bool = false - ) -> (begin: Tensor, size: Tensor, bboxes: Tensor) { - _RawTFEager.sampleDistortedBoundingBox( - imageSize: imageSize, boundingBoxes: boundingBoxes, seed: seed, seed2: seed2, - minObjectCovered: minObjectCovered, aspectRatioRange: aspectRatioRange, - areaRange: areaRange, maxAttempts: maxAttempts, - useImageIfNoBoundingBoxes: useImageIfNoBoundingBoxes) - } - - /// Generate a single randomly distorted bounding box for an image. - /// - /// Bounding box annotations are often supplied in addition to ground-truth labels - /// in image recognition or object localization tasks. A common technique for - /// training such a system is to randomly distort an image while preserving - /// its content, i.e. *data augmentation*. This Op outputs a randomly distorted - /// localization of an object, i.e. bounding box, given an `image_size`, - /// `bounding_boxes` and a series of constraints. - /// - /// The output of this Op is a single bounding box that may be used to crop the - /// original image. The output is returned as 3 tensors: `begin`, `size` and - /// `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the - /// image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize - /// what the bounding box looks like. - /// - /// Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The - /// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and - /// height of the underlying image. - /// - /// For example, - /// - /// ```python - /// # Generate a single distorted bounding box. - /// begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box( - /// tf.shape(image), - /// bounding_boxes=bounding_boxes) - /// - /// # Draw the bounding box in an image summary. - /// image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), - /// bbox_for_draw) - /// tf.summary.image('images_with_box', image_with_box) - /// - /// # Employ the bounding box to distort the image. - /// distorted_image = tf.slice(image, begin, size) - /// ``` - /// - /// Note that if no bounding box information is available, setting - /// `use_image_if_no_bounding_boxes = true` will assume there is a single implicit - /// bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is - /// false and no bounding boxes are supplied, an error is raised. - /// - /// - Parameters: - /// - image_size: 1-D, containing `[height, width, channels]`. - /// - bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes - /// associated with the image. - /// - min_object_covered: The cropped area of the image must contain at least this - /// fraction of any bounding box supplied. The value of this parameter should be - /// non-negative. In the case of 0, the cropped area does not need to overlap - /// any of the bounding boxes supplied. - /// - /// - Attrs: - /// - seed: If either `seed` or `seed2` are set to non-zero, the random number - /// generator is seeded by the given `seed`. Otherwise, it is seeded by a random - /// seed. - /// - seed2: A second seed to avoid seed collision. - /// - aspect_ratio_range: The cropped area of the image must have an aspect ratio = - /// width / height within this range. - /// - area_range: The cropped area of the image must contain a fraction of the - /// supplied image within this range. - /// - max_attempts: Number of attempts at generating a cropped region of the image - /// of the specified constraints. After `max_attempts` failures, return the entire - /// image. - /// - use_image_if_no_bounding_boxes: Controls behavior if no bounding boxes supplied. - /// If true, assume an implicit bounding box covering the whole input. If false, - /// raise an error. - /// - /// - Outputs: - /// - begin: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to - /// `tf.slice`. - /// - size: 1-D, containing `[target_height, target_width, -1]`. Provide as input to - /// `tf.slice`. - /// - bboxes: 3-D with shape `[1, 1, 4]` containing the distorted bounding box. - /// Provide as input to `tf.image.draw_bounding_boxes`. - @inlinable @inline(__always) - public static func sampleDistortedBoundingBoxV2( - imageSize: Tensor, - boundingBoxes: Tensor, - minObjectCovered: Tensor, - seed: Int64 = 0, - seed2: Int64 = 0, - aspectRatioRange: [Double] = [0.75, 1.33], - areaRange: [Double] = [0.05, 1], - maxAttempts: Int64 = 100, - useImageIfNoBoundingBoxes: Bool = false - ) -> (begin: Tensor, size: Tensor, bboxes: Tensor) { - _RawTFEager.sampleDistortedBoundingBoxV2( - imageSize: imageSize, boundingBoxes: boundingBoxes, minObjectCovered: minObjectCovered, - seed: seed, seed2: seed2, aspectRatioRange: aspectRatioRange, areaRange: areaRange, - maxAttempts: maxAttempts, useImageIfNoBoundingBoxes: useImageIfNoBoundingBoxes) - } - - /// Creates a dataset that takes a Bernoulli sample of the contents of another dataset. - /// - /// There is no transformation in the `tf.data` Python API for creating this dataset. - /// Instead, it is created as a result of the `filter_with_random_uniform_fusion` - /// static optimization. Whether this optimization is performed is determined by the - /// `experimental_optimization.filter_with_random_uniform_fusion` option of - /// `tf.data.Options`. - /// - /// - Parameters: - /// - rate: A scalar representing the sample rate. Each element of `input_dataset` is - /// retained with this probability, independent of all other elements. - /// - seed: A scalar representing seed of random number generator. - /// - seed2: A scalar representing seed2 of random number generator. - @inlinable @inline(__always) - public static func samplingDataset( - inputDataset: VariantHandle, - rate: Tensor, - seed: Tensor, - seed2: Tensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.samplingDataset( - inputDataset: inputDataset, rate: rate, seed: seed, seed2: seed2, outputTypes: outputTypes, - outputShapes: outputShapes) - } - - /// Saves the input tensors to disk. - /// - /// The size of `tensor_names` must match the number of tensors in `data`. `data[i]` - /// is written to `filename` with name `tensor_names[i]`. - /// - /// See also `SaveSlices`. - /// - /// - Parameters: - /// - filename: Must have a single element. The name of the file to which we write - /// the tensor. - /// - tensor_names: Shape `[N]`. The names of the tensors to be saved. - /// - data: `N` tensors to save. - @inlinable @inline(__always) - public static func save( - filename: StringTensor, - tensorNames: StringTensor, - data: T - ) { - _RawTFEager.save(filename: filename, tensorNames: tensorNames, data: data) - } - - /// Saves input tensors slices to disk. - /// - /// This is like `Save` except that tensors can be listed in the saved file as being - /// a slice of a larger tensor. `shapes_and_slices` specifies the shape of the - /// larger tensor and the slice that this tensor covers. `shapes_and_slices` must - /// have as many elements as `tensor_names`. - /// - /// Elements of the `shapes_and_slices` input must either be: - /// - /// * The empty string, in which case the corresponding tensor is - /// saved normally. - /// * A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the - /// `dimI` are the dimensions of the larger tensor and `slice-spec` - /// specifies what part is covered by the tensor to save. - /// - /// `slice-spec` itself is a `:`-separated list: `slice0:slice1:...:sliceN-1` - /// where each `sliceI` is either: - /// - /// * The string `-` meaning that the slice covers all indices of this dimension - /// * `start,length` where `start` and `length` are integers. In that - /// case the slice covers `length` indices starting at `start`. - /// - /// See also `Save`. - /// - /// - Parameters: - /// - filename: Must have a single element. The name of the file to which we write the - /// tensor. - /// - tensor_names: Shape `[N]`. The names of the tensors to be saved. - /// - shapes_and_slices: Shape `[N]`. The shapes and slice specifications to use when - /// saving the tensors. - /// - data: `N` tensors to save. - @inlinable @inline(__always) - public static func saveSlices( - filename: StringTensor, - tensorNames: StringTensor, - shapesAndSlices: StringTensor, - data: T - ) { - _RawTFEager.saveSlices( - filename: filename, tensorNames: tensorNames, shapesAndSlices: shapesAndSlices, data: data) - } - - /// Saves tensors in V2 checkpoint format. - /// - /// By default, saves the named tensors in full. If the caller wishes to save - /// specific slices of full tensors, "shape_and_slices" should be non-empty strings - /// and correspondingly well-formed. - /// - /// - Parameters: - /// - prefix: Must have a single element. The prefix of the V2 checkpoint to which we - /// write the tensors. - /// - tensor_names: shape {N}. The names of the tensors to be saved. - /// - shape_and_slices: shape {N}. The slice specs of the tensors to be saved. - /// Empty strings indicate that they are non-partitioned tensors. - /// - tensors: `N` tensors to save. - @inlinable @inline(__always) - public static func saveV2( - prefix: StringTensor, - tensorNames: StringTensor, - shapeAndSlices: StringTensor, - tensors: Dtypes - ) { - _RawTFEager.saveV2( - prefix: prefix, tensorNames: tensorNames, shapeAndSlices: shapeAndSlices, tensors: tensors) - } - - /// Outputs a `Summary` protocol buffer with scalar values. - /// - /// The input `tags` and `values` must have the same shape. The generated summary - /// has a summary value for each tag-value pair in `tags` and `values`. - /// - /// - Parameters: - /// - tags: Tags for the summary. - /// - values: Same shape as `tags. Values for the summary. - /// - /// - Output summary: Scalar. Serialized `Summary` protocol buffer. - @inlinable @inline(__always) - public static func scalarSummary( - tags: StringTensor, - _ values: Tensor - ) -> StringTensor { - _RawTFEager.scalarSummary(tags: tags, values) - } - - @inlinable @inline(__always) - public static func scaleAndTranslate( - images: Tensor, - size: Tensor, - scale: Tensor, - translation: Tensor, - kernelType: String = "lanczos3", - antialias: Bool = true - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(images.handle.backend, size.handle.backend), scale.handle.backend), - translation.handle.backend) - { - case .XLA: - let output_device = translation.device - let images = Tensor(copying: images, to: .defaultTFEager) - let size = Tensor(copying: size, to: .defaultTFEager) - let scale = Tensor(copying: scale, to: .defaultTFEager) - let translation = Tensor(copying: translation, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.scaleAndTranslate( - images: images, size: size, scale: scale, translation: translation, - kernelType: kernelType, antialias: antialias), to: output_device) - case .TF_EAGER: - return _RawTFEager.scaleAndTranslate( - images: images, size: size, scale: scale, translation: translation, - kernelType: kernelType, antialias: antialias) - } + /// Wraps the XLA Sort operator, documented at + /// + /// https://www.tensorflow.org/performance/xla/operation_semantics#sort + /// . + /// + /// Sorts a tensor. Currently only sorts in ascending order are supported. + /// + /// - Parameters: + /// - keys: A `Tensor` of type K. + /// - values: A `Tensor` of type V. + /// + /// - Outputs: + /// - sorted_keys: A `Tensor` of type K. + /// - sorted_values: A `Tensor` of type V. + @inlinable @inline(__always) + public static func xlaKeyValueSort< + K: TensorFlowNumeric, + V: TensorFlowScalar + >( + keys: Tensor, + _ values: Tensor + ) -> (sortedKeys: Tensor, sortedValues: Tensor) { + _RawTFEager.xlaKeyValueSort(keys: keys, values) + } - } + /// XLA Launch Op. For use by the XLA JIT only. + @inlinable @inline(__always) + public static func xlaLaunch< + Tconstants: TensorArrayProtocol, + Targs: TensorArrayProtocol, + Tresults: TensorGroup, + FunctionIn: TensorGroup, + FunctionOut: TensorGroup + >( + constants: Tconstants, + args: Targs, + resources: [ResourceHandle], + function: (FunctionIn) -> FunctionOut + ) -> Tresults { + _RawTFEager.xlaLaunch( + constants: constants, args: args, resources: resources, function: function) + } - @inlinable @inline(__always) - public static func scaleAndTranslateGrad( - grads: Tensor, - originalImage: Tensor, - scale: Tensor, - translation: Tensor, - kernelType: String = "lanczos3", - antialias: Bool = true - ) -> Tensor { - switch commonBackend( + /// Wraps the XLA Pad operator, documented at + /// + /// https://www.tensorflow.org/performance/xla/operation_semantics#pad + /// . + /// + /// - Parameters: + /// - input: A `Tensor` of type T. + /// - padding_value: A scalar `Tensor` of type T. + /// - padding_low: the padding to apply at the start of each input dimensions + /// - padding_high: the padding to apply at the end of each input dimension. + /// - padding_interior: the padding to apply between each input element. + /// + /// - Output output: A `Tensor` of type T. + @inlinable @inline(__always) + public static func xlaPad< + T: TensorFlowScalar, + Tindices: TensorFlowIndex + >( + _ input: Tensor, + paddingValue: Tensor, + paddingLow: Tensor, + paddingHigh: Tensor, + paddingInterior: Tensor + ) -> Tensor { + switch commonBackend( + commonBackend( commonBackend( - commonBackend(grads.handle.backend, originalImage.handle.backend), scale.handle.backend), - translation.handle.backend) - { - case .XLA: - let output_device = translation.device - let grads = Tensor(copying: grads, to: .defaultTFEager) - let originalImage = Tensor(copying: originalImage, to: .defaultTFEager) - let scale = Tensor(copying: scale, to: .defaultTFEager) - let translation = Tensor(copying: translation, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.scaleAndTranslateGrad( - grads: grads, originalImage: originalImage, scale: scale, translation: translation, - kernelType: kernelType, antialias: antialias), to: output_device) - case .TF_EAGER: - return _RawTFEager.scaleAndTranslateGrad( - grads: grads, originalImage: originalImage, scale: scale, translation: translation, - kernelType: kernelType, antialias: antialias) - } - - } - - /// Creates a dataset successively reduces `f` over the elements of `input_dataset`. - @inlinable @inline(__always) - public static func scanDataset< - FIn: TensorGroup, - FOut: TensorGroup, - Tstate: TensorArrayProtocol, - Targuments: TensorArrayProtocol - >( - inputDataset: VariantHandle, - initialState: Tstate, - otherArguments: Targuments, - f: (FIn) -> FOut, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?], - preserveCardinality: Bool = false, - useDefaultDevice: Bool = true - ) -> VariantHandle { - _RawTFEager.scanDataset( - inputDataset: inputDataset, initialState: initialState, otherArguments: otherArguments, - f: f, outputTypes: outputTypes, outputShapes: outputShapes, - preserveCardinality: preserveCardinality, useDefaultDevice: useDefaultDevice) - } - - /// Scatter `updates` into a new tensor according to `indices`. - /// - /// Creates a new tensor by applying sparse `updates` to individual values or - /// slices within a tensor (initially zero for numeric, empty for string) of - /// the given `shape` according to indices. This operator is the inverse of the - /// `tf.gather_nd` operator which extracts values or slices from a given tensor. - /// - /// This operation is similar to tensor_scatter_add, except that the tensor is - /// zero-initialized. Calling `tf.scatter_nd(indices, values, shape)` is identical - /// to `tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)` - /// - /// If `indices` contains duplicates, then their updates are accumulated (summed). - /// - /// **WARNING**: The order in which updates are applied is nondeterministic, so the - /// output will be nondeterministic if `indices` contains duplicates -- because - /// of some numerical approximation issues, numbers summed in different order - /// may yield different results. - /// - /// `indices` is an integer tensor containing indices into a new tensor of shape - /// `shape`. The last dimension of `indices` can be at most the rank of `shape`: - /// - /// indices.shape[-1] <= shape.rank - /// - /// The last dimension of `indices` corresponds to indices into elements - /// (if `indices.shape[-1] = shape.rank`) or slices - /// (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of - /// `shape`. `updates` is a tensor with shape - /// - /// indices.shape[:-1] + shape[indices.shape[-1]:] - /// - /// The simplest form of scatter is to insert individual elements in a tensor by - /// index. For example, say we want to insert 4 scattered elements in a rank-1 - /// tensor with 8 elements. - /// - ///
- /// - ///
- /// - /// In Python, this scatter operation would look like this: - /// - /// ```python - /// indices = tf.constant([[4], [3], [1], [7]]) - /// updates = tf.constant([9, 10, 11, 12]) - /// shape = tf.constant([8]) - /// scatter = tf.scatter_nd(indices, updates, shape) - /// print(scatter) - /// ``` - /// - /// The resulting tensor would look like this: - /// - /// [0, 11, 0, 10, 9, 0, 0, 12] - /// - /// We can also, insert entire slices of a higher rank tensor all at once. For - /// example, if we wanted to insert two slices in the first dimension of a - /// rank-3 tensor with two matrices of new values. - /// - ///
- /// - ///
- /// - /// In Python, this scatter operation would look like this: - /// - /// ```python - /// indices = tf.constant([[0], [2]]) - /// updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], - /// [7, 7, 7, 7], [8, 8, 8, 8]], - /// [[5, 5, 5, 5], [6, 6, 6, 6], - /// [7, 7, 7, 7], [8, 8, 8, 8]]]) - /// shape = tf.constant([4, 4, 4]) - /// scatter = tf.scatter_nd(indices, updates, shape) - /// print(scatter) - /// ``` - /// - /// The resulting tensor would look like this: - /// - /// [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], - /// [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], - /// [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], - /// [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]] - /// - /// Note that on CPU, if an out of bound index is found, an error is returned. - /// On GPU, if an out of bound index is found, the index is ignored. - /// - /// - Parameters: - /// - indices: Index tensor. - /// - updates: Updates to scatter into output. - /// - shape: 1-D. The shape of the resulting tensor. - /// - /// - Output output: A new tensor with the given shape and updates applied according - /// to the indices. - @inlinable @inline(__always) - public static func scatterNd< - T: TensorFlowScalar, - Tindices: TensorFlowIndex - >( - indices: Tensor, - updates: Tensor, - shape: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(indices.handle.backend, updates.handle.backend), shape.handle.backend) - { - case .XLA: - let output_device = shape.device - let indices = Tensor(copying: indices, to: .defaultTFEager) - let updates = Tensor(copying: updates, to: .defaultTFEager) - let shape = Tensor(copying: shape, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.scatterNd(indices: indices, updates: updates, shape: shape), - to: output_device) - case .TF_EAGER: - return _RawTFEager.scatterNd(indices: indices, updates: updates, shape: shape) - } - - } - - /// Applies sparse addition to `input` using individual values or slices - /// - /// from `updates` according to indices `indices`. The updates are non-aliasing: - /// `input` is only modified in-place if no other operations will use it. - /// Otherwise, a copy of `input` is made. This operation has a gradient with - /// respect to both `input` and `updates`. - /// - /// `input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - /// - /// `indices` must be integer tensor, containing indices into `input`. - /// It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`. - /// - /// The innermost dimension of `indices` (with length `K`) corresponds to - /// indices into elements (if `K = P`) or `(P-K)`-dimensional slices - /// (if `K < P`) along the `K`th dimension of `input`. - /// - /// `updates` is `Tensor` of rank `Q-1+P-K` with shape: - /// - /// $$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$ - /// - /// For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 - /// elements. In Python, that addition would look like this: - /// - /// input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8]) - /// indices = tf.constant([[4], [3], [1], [7]]) - /// updates = tf.constant([9, 10, 11, 12]) - /// output = tf.scatter_nd_non_aliasing_add(input, indices, updates) - /// with tf.Session() as sess: - /// print(sess.run(output)) - /// - /// The resulting value `output` would look like this: - /// - /// [1, 13, 3, 14, 14, 6, 7, 20] - /// - /// See `tf.scatter_nd` for more details about how to make updates to slices. - /// - /// - Parameters: - /// - input: A Tensor. - /// - indices: A Tensor. Must be one of the following types: `int32`, `int64`. - /// A tensor of indices into `input`. - /// - updates: A Tensor. Must have the same type as ref. A tensor of updated values - /// to add to `input`. - /// - /// - Output output: A `Tensor` with the same shape as `input`, containing values of `input` - /// updated with `updates`. - @inlinable @inline(__always) - public static func scatterNdNonAliasingAdd< - T: TensorFlowScalar, - Tindices: TensorFlowIndex - >( - _ input: Tensor, - indices: Tensor, - updates: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(input.handle.backend, indices.handle.backend), updates.handle.backend) - { - case .XLA: - let output_device = updates.device - let input = Tensor(copying: input, to: .defaultTFEager) - let indices = Tensor(copying: indices, to: .defaultTFEager) - let updates = Tensor(copying: updates, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.scatterNdNonAliasingAdd(input, indices: indices, updates: updates), - to: output_device) - case .TF_EAGER: - return _RawTFEager.scatterNdNonAliasingAdd(input, indices: indices, updates: updates) - } - - } - - /// Computes fingerprints of the input strings. - /// - /// - Parameter input: vector of strings to compute fingerprints on. - /// - /// - Output output: a (N,2) shaped matrix where N is the number of elements in the input - /// vector. Each row contains the low and high parts of the fingerprint. - @inlinable @inline(__always) - public static func sdcaFprint( - _ input: StringTensor - ) -> Tensor { - _RawTFEager.sdcaFprint(input) - } - - /// Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for - /// - /// linear models with L1 + L2 regularization. As global optimization objective is - /// strongly-convex, the optimizer optimizes the dual objective at each step. The - /// optimizer applies each update one example at a time. Examples are sampled - /// uniformly, and the optimizer is learning rate free and enjoys linear convergence - /// rate. - /// - /// [Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).
- /// Shai Shalev-Shwartz, Tong Zhang. 2012 - /// - /// $$Loss Objective = \sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$ - /// - /// [Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).
- /// Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan, - /// Peter Richtarik, Martin Takac. 2015 - /// - /// [Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).
- /// Dominik Csiba, Zheng Qu, Peter Richtarik. 2015 - /// - /// - Parameters: - /// - sparse_example_indices: a list of vectors which contain example indices. - /// - sparse_feature_indices: a list of vectors which contain feature indices. - /// - sparse_feature_values: a list of vectors which contains feature value - /// associated with each feature group. - /// - dense_features: a list of matrices which contains the dense feature values. - /// - example_weights: a vector which contains the weight associated with each - /// example. - /// - example_labels: a vector which contains the label/target associated with each - /// example. - /// - sparse_indices: a list of vectors where each value is the indices which has - /// corresponding weights in sparse_weights. This field maybe omitted for the - /// dense approach. - /// - sparse_weights: a list of vectors where each value is the weight associated with - /// a sparse feature group. - /// - dense_weights: a list of vectors where the values are the weights associated - /// with a dense feature group. - /// - example_state_data: a list of vectors containing the example state data. - /// - /// - Attrs: - /// - loss_type: Type of the primal loss. Currently SdcaSolver supports logistic, - /// squared and hinge losses. - /// - adaptative: Whether to use Adaptive SDCA for the inner loop. - /// - num_sparse_features: Number of sparse feature groups to train on. - /// - num_sparse_features_with_values: Number of sparse feature groups with values - /// associated with it, otherwise implicitly treats values as 1.0. - /// - num_dense_features: Number of dense feature groups to train on. - /// - l1: Symmetric l1 regularization strength. - /// - l2: Symmetric l2 regularization strength. - /// - num_loss_partitions: Number of partitions of the global loss function. - /// - num_inner_iterations: Number of iterations per mini-batch. - /// - /// - Outputs: - /// - out_example_state_data: a list of vectors containing the updated example state - /// data. - /// - out_delta_sparse_weights: a list of vectors where each value is the delta - /// weights associated with a sparse feature group. - /// - out_delta_dense_weights: a list of vectors where the values are the delta - /// weights associated with a dense feature group. - @inlinable @inline(__always) - public static func sdcaOptimizer( - sparseExampleIndices: [Tensor], - sparseFeatureIndices: [Tensor], - sparseFeatureValues: [Tensor], - denseFeatures: [Tensor], - exampleWeights: Tensor, - exampleLabels: Tensor, - sparseIndices: [Tensor], - sparseWeights: [Tensor], - denseWeights: [Tensor], - exampleStateData: Tensor, - lossType: LossType, - adaptative: Bool = false, - l1: Double, - l2: Double, - numLossPartitions: Int64, - numInnerIterations: Int64 - ) -> ( - outExampleStateData: Tensor, outDeltaSparseWeights: [Tensor], - outDeltaDenseWeights: [Tensor] - ) { - _RawTFEager.sdcaOptimizer( - sparseExampleIndices: sparseExampleIndices, sparseFeatureIndices: sparseFeatureIndices, - sparseFeatureValues: sparseFeatureValues, denseFeatures: denseFeatures, - exampleWeights: exampleWeights, exampleLabels: exampleLabels, sparseIndices: sparseIndices, - sparseWeights: sparseWeights, denseWeights: denseWeights, - exampleStateData: exampleStateData, lossType: lossType, adaptative: adaptative, l1: l1, - l2: l2, numLossPartitions: numLossPartitions, numInnerIterations: numInnerIterations) - } - - /// Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for - /// - /// linear models with L1 + L2 regularization. As global optimization objective is - /// strongly-convex, the optimizer optimizes the dual objective at each step. The - /// optimizer applies each update one example at a time. Examples are sampled - /// uniformly, and the optimizer is learning rate free and enjoys linear convergence - /// rate. - /// - /// [Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).
- /// Shai Shalev-Shwartz, Tong Zhang. 2012 - /// - /// $$Loss Objective = \sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$ - /// - /// [Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).
- /// Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan, - /// Peter Richtarik, Martin Takac. 2015 - /// - /// [Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).
- /// Dominik Csiba, Zheng Qu, Peter Richtarik. 2015 - /// - /// - Parameters: - /// - sparse_example_indices: a list of vectors which contain example indices. - /// - sparse_feature_indices: a list of vectors which contain feature indices. - /// - sparse_feature_values: a list of vectors which contains feature value - /// associated with each feature group. - /// - dense_features: a list of matrices which contains the dense feature values. - /// - example_weights: a vector which contains the weight associated with each - /// example. - /// - example_labels: a vector which contains the label/target associated with each - /// example. - /// - sparse_indices: a list of vectors where each value is the indices which has - /// corresponding weights in sparse_weights. This field maybe omitted for the - /// dense approach. - /// - sparse_weights: a list of vectors where each value is the weight associated with - /// a sparse feature group. - /// - dense_weights: a list of vectors where the values are the weights associated - /// with a dense feature group. - /// - example_state_data: a list of vectors containing the example state data. - /// - /// - Attrs: - /// - loss_type: Type of the primal loss. Currently SdcaSolver supports logistic, - /// squared and hinge losses. - /// - adaptive: Whether to use Adaptive SDCA for the inner loop. - /// - num_sparse_features: Number of sparse feature groups to train on. - /// - num_sparse_features_with_values: Number of sparse feature groups with values - /// associated with it, otherwise implicitly treats values as 1.0. - /// - num_dense_features: Number of dense feature groups to train on. - /// - l1: Symmetric l1 regularization strength. - /// - l2: Symmetric l2 regularization strength. - /// - num_loss_partitions: Number of partitions of the global loss function. - /// - num_inner_iterations: Number of iterations per mini-batch. - /// - /// - Outputs: - /// - out_example_state_data: a list of vectors containing the updated example state - /// data. - /// - out_delta_sparse_weights: a list of vectors where each value is the delta - /// weights associated with a sparse feature group. - /// - out_delta_dense_weights: a list of vectors where the values are the delta - /// weights associated with a dense feature group. - @inlinable @inline(__always) - public static func sdcaOptimizerV2( - sparseExampleIndices: [Tensor], - sparseFeatureIndices: [Tensor], - sparseFeatureValues: [Tensor], - denseFeatures: [Tensor], - exampleWeights: Tensor, - exampleLabels: Tensor, - sparseIndices: [Tensor], - sparseWeights: [Tensor], - denseWeights: [Tensor], - exampleStateData: Tensor, - lossType: LossType, - adaptive: Bool = false, - l1: Double, - l2: Double, - numLossPartitions: Int64, - numInnerIterations: Int64 - ) -> ( - outExampleStateData: Tensor, outDeltaSparseWeights: [Tensor], - outDeltaDenseWeights: [Tensor] - ) { - _RawTFEager.sdcaOptimizerV2( - sparseExampleIndices: sparseExampleIndices, sparseFeatureIndices: sparseFeatureIndices, - sparseFeatureValues: sparseFeatureValues, denseFeatures: denseFeatures, - exampleWeights: exampleWeights, exampleLabels: exampleLabels, sparseIndices: sparseIndices, - sparseWeights: sparseWeights, denseWeights: denseWeights, - exampleStateData: exampleStateData, lossType: lossType, adaptive: adaptive, l1: l1, l2: l2, - numLossPartitions: numLossPartitions, numInnerIterations: numInnerIterations) - } - - /// Computes the maximum along segments of a tensor. - /// - /// Read - /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) - /// for an explanation of segments. - /// - /// Computes a tensor such that - /// \\(output_i = \max_j(data_j)\\) where `max` is over `j` such - /// that `segment_ids[j] == i`. - /// - /// If the max is empty for a given segment ID `i`, `output[i] = 0`. - /// - ///
- /// - ///
- /// - /// For example: - /// - /// ``` - /// c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) - /// tf.segment_max(c, tf.constant([0, 0, 1])) - /// # ==> [[4, 3, 3, 4], - /// # [5, 6, 7, 8]] - /// ``` - /// - /// - /// - Parameter segment_ids: A 1-D tensor whose size is equal to the size of `data`'s - /// first dimension. Values should be sorted and can be repeated. - /// - /// - Output output: Has same shape as data, except for dimension 0 which - /// has size `k`, the number of segments. - @inlinable @inline(__always) - public static func segmentMax< - T: TensorFlowNumeric, - Tindices: TensorFlowIndex - >( - data: Tensor, - segmentIds: Tensor - ) -> Tensor { - switch commonBackend(data.handle.backend, segmentIds.handle.backend) { - case .XLA: - let output_device = segmentIds.device - let data = Tensor(copying: data, to: .defaultTFEager) - let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.segmentMax(data: data, segmentIds: segmentIds), to: output_device) - case .TF_EAGER: - return _RawTFEager.segmentMax(data: data, segmentIds: segmentIds) - } - - } - - /// Computes the mean along segments of a tensor. - /// - /// Read - /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) - /// for an explanation of segments. - /// - /// Computes a tensor such that - /// \\(output_i = \frac{\sum_j data_j}{N}\\) where `mean` is - /// over `j` such that `segment_ids[j] == i` and `N` is the total number of - /// values summed. - /// - /// If the mean is empty for a given segment ID `i`, `output[i] = 0`. - /// - ///
- /// - ///
- /// - /// For example: - /// - /// ``` - /// c = tf.constant([[1.0,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) - /// tf.segment_mean(c, tf.constant([0, 0, 1])) - /// # ==> [[2.5, 2.5, 2.5, 2.5], - /// # [5, 6, 7, 8]] - /// ``` - /// - /// - /// - Parameter segment_ids: A 1-D tensor whose size is equal to the size of `data`'s - /// first dimension. Values should be sorted and can be repeated. - /// - /// - Output output: Has same shape as data, except for dimension 0 which - /// has size `k`, the number of segments. - @inlinable @inline(__always) - public static func segmentMean< - T: TensorFlowNumeric, - Tindices: TensorFlowIndex - >( - data: Tensor, - segmentIds: Tensor - ) -> Tensor { - switch commonBackend(data.handle.backend, segmentIds.handle.backend) { - case .XLA: - let output_device = segmentIds.device - let data = Tensor(copying: data, to: .defaultTFEager) - let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.segmentMean(data: data, segmentIds: segmentIds), to: output_device) - case .TF_EAGER: - return _RawTFEager.segmentMean(data: data, segmentIds: segmentIds) - } - - } - - /// Computes the minimum along segments of a tensor. - /// - /// Read - /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) - /// for an explanation of segments. - /// - /// Computes a tensor such that - /// \\(output_i = \min_j(data_j)\\) where `min` is over `j` such - /// that `segment_ids[j] == i`. - /// - /// If the min is empty for a given segment ID `i`, `output[i] = 0`. - /// - ///
- /// - ///
- /// - /// For example: - /// - /// ``` - /// c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) - /// tf.segment_min(c, tf.constant([0, 0, 1])) - /// # ==> [[1, 2, 2, 1], - /// # [5, 6, 7, 8]] - /// ``` - /// - /// - Parameter segment_ids: A 1-D tensor whose size is equal to the size of `data`'s - /// first dimension. Values should be sorted and can be repeated. - /// - /// - Output output: Has same shape as data, except for dimension 0 which - /// has size `k`, the number of segments. - @inlinable @inline(__always) - public static func segmentMin< - T: TensorFlowNumeric, - Tindices: TensorFlowIndex - >( - data: Tensor, - segmentIds: Tensor - ) -> Tensor { - switch commonBackend(data.handle.backend, segmentIds.handle.backend) { - case .XLA: - let output_device = segmentIds.device - let data = Tensor(copying: data, to: .defaultTFEager) - let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.segmentMin(data: data, segmentIds: segmentIds), to: output_device) - case .TF_EAGER: - return _RawTFEager.segmentMin(data: data, segmentIds: segmentIds) - } - - } - - /// Computes the product along segments of a tensor. - /// - /// Read - /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) - /// for an explanation of segments. - /// - /// Computes a tensor such that - /// \\(output_i = \prod_j data_j\\) where the product is over `j` such - /// that `segment_ids[j] == i`. - /// - /// If the product is empty for a given segment ID `i`, `output[i] = 1`. - /// - ///
- /// - ///
- /// - /// For example: - /// - /// ``` - /// c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) - /// tf.segment_prod(c, tf.constant([0, 0, 1])) - /// # ==> [[4, 6, 6, 4], - /// # [5, 6, 7, 8]] - /// ``` - /// - /// - /// - Parameter segment_ids: A 1-D tensor whose size is equal to the size of `data`'s - /// first dimension. Values should be sorted and can be repeated. - /// - /// - Output output: Has same shape as data, except for dimension 0 which - /// has size `k`, the number of segments. - @inlinable @inline(__always) - public static func segmentProd< - T: TensorFlowNumeric, - Tindices: TensorFlowIndex - >( - data: Tensor, - segmentIds: Tensor - ) -> Tensor { - switch commonBackend(data.handle.backend, segmentIds.handle.backend) { - case .XLA: - let output_device = segmentIds.device - let data = Tensor(copying: data, to: .defaultTFEager) - let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.segmentProd(data: data, segmentIds: segmentIds), to: output_device) - case .TF_EAGER: - return _RawTFEager.segmentProd(data: data, segmentIds: segmentIds) - } - - } - - /// Computes the sum along segments of a tensor. - /// - /// Read - /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) - /// for an explanation of segments. - /// - /// Computes a tensor such that - /// \\(output_i = \sum_j data_j\\) where sum is over `j` such - /// that `segment_ids[j] == i`. - /// - /// If the sum is empty for a given segment ID `i`, `output[i] = 0`. - /// - ///
- /// - ///
- /// - /// For example: - /// - /// ``` - /// c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) - /// tf.segment_sum(c, tf.constant([0, 0, 1])) - /// # ==> [[5, 5, 5, 5], - /// # [5, 6, 7, 8]] - /// ``` - /// - /// - /// - Parameter segment_ids: A 1-D tensor whose size is equal to the size of `data`'s - /// first dimension. Values should be sorted and can be repeated. - /// - /// - Output output: Has same shape as data, except for dimension 0 which - /// has size `k`, the number of segments. - @inlinable @inline(__always) - public static func segmentSum< - T: TensorFlowNumeric, - Tindices: TensorFlowIndex - >( - data: Tensor, - segmentIds: Tensor - ) -> Tensor { - switch commonBackend(data.handle.backend, segmentIds.handle.backend) { - case .XLA: - let output_device = segmentIds.device - let data = Tensor(copying: data, to: .defaultTFEager) - let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.segmentSum(data: data, segmentIds: segmentIds), to: output_device) - case .TF_EAGER: - return _RawTFEager.segmentSum(data: data, segmentIds: segmentIds) - } - - } - - /// Selects elements from `x` or `y`, depending on `condition`. - /// - /// The `x`, and `y` tensors must all have the same shape, and the - /// output will also have that shape. - /// - /// The `condition` tensor must be a scalar if `x` and `y` are scalars. - /// If `x` and `y` are vectors or higher rank, then `condition` must be either a - /// scalar, a vector with size matching the first dimension of `x`, or must have - /// the same shape as `x`. - /// - /// The `condition` tensor acts as a mask that chooses, based on the value at each - /// element, whether the corresponding element / row in the output should be - /// taken from `x` (if true) or `y` (if false). - /// - /// If `condition` is a vector and `x` and `y` are higher rank matrices, then - /// it chooses which row (outer dimension) to copy from `x` and `y`. - /// If `condition` has the same shape as `x` and `y`, then it chooses which - /// element to copy from `x` and `y`. - /// - /// For example: - /// - /// ```python - /// # 'condition' tensor is [[True, False] - /// # [False, True]] - /// # 't' is [[1, 2], - /// # [3, 4]] - /// # 'e' is [[5, 6], - /// # [7, 8]] - /// select(condition, t, e) # => [[1, 6], [7, 4]] - /// - /// - /// # 'condition' tensor is [True, False] - /// # 't' is [[1, 2], - /// # [3, 4]] - /// # 'e' is [[5, 6], - /// # [7, 8]] - /// select(condition, t, e) ==> [[1, 2], - /// [7, 8]] - /// - /// ``` - /// - /// - Parameters: - /// - t: = A `Tensor` which may have the same shape as `condition`. - /// If `condition` is rank 1, `x` may have higher rank, - /// but its first dimension must match the size of `condition`. - /// - e: = A `Tensor` with the same type and shape as `x`. - /// - /// - Output output: = A `Tensor` with the same type and shape as `x` and `y`. - @inlinable @inline(__always) - public static func select( - condition: Tensor, - t: Tensor, - e: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(condition.handle.backend, t.handle.backend), e.handle.backend) - { - case .XLA: - return _RawXLA.select(condition: condition, t: t, e: e) - case .TF_EAGER: - return _RawTFEager.select(condition: condition, t: t, e: e) - } - - } - - @inlinable @inline(__always) - public static func selectV2( - condition: Tensor, - t: Tensor, - e: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(condition.handle.backend, t.handle.backend), e.handle.backend) - { - case .XLA: - let output_device = e.device - let condition = Tensor(copying: condition, to: .defaultTFEager) - let t = Tensor(copying: t, to: .defaultTFEager) - let e = Tensor(copying: e, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.selectV2(condition: condition, t: t, e: e), to: output_device) - case .TF_EAGER: - return _RawTFEager.selectV2(condition: condition, t: t, e: e) - } - - } - - /// Computes the Eigen Decomposition of a batch of square self-adjoint matrices. - /// - /// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions - /// form square matrices, with the same constraints as the single matrix - /// SelfAdjointEig. - /// - /// The result is a [..., M+1, M] matrix with [..., 0,:] containing the - /// eigenvalues, and subsequent [...,1:, :] containing the eigenvectors. The eigenvalues - /// are sorted in non-decreasing order. - /// - /// - Parameter input: Shape is `[..., M, M]`. - /// - /// - Output output: Shape is `[..., M+1, M]`. - @inlinable @inline(__always) - public static func selfAdjointEig( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.selfAdjointEig(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.selfAdjointEig(input) - } - - } - - /// Computes the eigen decomposition of one or more square self-adjoint matrices. - /// - /// Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in - /// `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The eigenvalues - /// are sorted in non-decreasing order. - /// - /// ```python - /// # a is a tensor. - /// # e is a tensor of eigenvalues. - /// # v is a tensor of eigenvectors. - /// e, v = self_adjoint_eig(a) - /// e = self_adjoint_eig(a, compute_v=False) - /// ``` - /// - /// - Parameter input: `Tensor` input of shape `[N, N]`. - /// - /// - Attr compute_v: If `True` then eigenvectors will be computed and returned in `v`. - /// Otherwise, only the eigenvalues will be computed. - /// - /// - Outputs: - /// - e: Eigenvalues. Shape is `[N]`. - /// - v: Eigenvectors. Shape is `[N, N]`. - @inlinable @inline(__always) - public static func selfAdjointEigV2( - _ input: Tensor, - computeV: Bool = true - ) -> (e: Tensor, v: Tensor) { - _RawTFEager.selfAdjointEigV2(input, computeV: computeV) - } - - /// Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)` - /// - /// if < 0, `scale * features` otherwise. - /// - /// To be used together with - /// `initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')`. - /// For correct dropout, use `tf.contrib.nn.alpha_dropout`. - /// - /// See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515) - @inlinable @inline(__always) - public static func selu( - features: Tensor - ) -> Tensor { - switch features.handle.backend { - case .XLA: - return _RawXLA.selu(features: features) - case .TF_EAGER: - return _RawTFEager.selu(features: features) - } - - } - - /// Computes gradients for the scaled exponential linear (Selu) operation. - /// - /// - Parameters: - /// - gradients: The backpropagated gradients to the corresponding Selu operation. - /// - outputs: The outputs of the corresponding Selu operation. - /// - /// - Output backprops: The gradients: `gradients * (outputs + scale * alpha)` - /// if outputs < 0, `scale * gradients` otherwise. - @inlinable @inline(__always) - public static func seluGrad( - gradients: Tensor, - outputs: Tensor - ) -> Tensor { - switch commonBackend(gradients.handle.backend, outputs.handle.backend) { - case .XLA: - return _RawXLA.seluGrad(gradients: gradients, outputs: outputs) - case .TF_EAGER: - return _RawTFEager.seluGrad(gradients: gradients, outputs: outputs) - } - - } - - /// Sends the named tensor from send_device to recv_device. - /// - /// - Parameter tensor: The tensor to send. - /// - /// - Attrs: - /// - tensor_name: The name of the tensor to send. - /// - send_device: The name of the device sending the tensor. - /// - send_device_incarnation: The current incarnation of send_device. - /// - recv_device: The name of the device receiving the tensor. - /// - client_terminated: If set to true, this indicates that the node was added - /// to the graph as a result of a client-side feed or fetch of Tensor data, - /// in which case the corresponding send or recv is expected to be managed - /// locally by the caller. - @inlinable @inline(__always) - public static func send( - _ tensor: Tensor, - tensorName: String, - sendDevice: String, - sendDeviceIncarnation: Int64, - recvDevice: String, - clientTerminated: Bool = false - ) { - _RawTFEager.send( - tensor, tensorName: tensorName, sendDevice: sendDevice, - sendDeviceIncarnation: sendDeviceIncarnation, recvDevice: recvDevice, - clientTerminated: clientTerminated) - } - - /// Performs gradient updates of embedding tables. - /// - /// - Parameters: - /// - inputs: A TensorList of gradients with which to update embedding tables. - /// This argument has the same length and shapes as the return value of - /// RecvTPUEmbeddingActivations, but contains gradients of the model's loss - /// with respect to the embedding activations. The embedding tables are updated - /// from these gradients via the optimizer specified in the TPU embedding - /// configuration given to tpu.initialize_system. - /// - learning_rates: A TensorList of float32 scalars, one for each dynamic learning - /// rate tag: see the comments in - /// //third_party/tensorflow/core/protobuf/tpu/optimization_parameters.proto. - /// Multiple tables can share the same dynamic learning rate tag as specified - /// in the configuration. If the learning rates for all tables are constant, - /// this list should be empty. - /// - /// - Attr config: Serialized TPUEmbeddingConfiguration proto. - @inlinable @inline(__always) - public static func sendTPUEmbeddingGradients( - inputs: [Tensor], - learningRates: [Tensor], - config: String - ) { - _RawTFEager.sendTPUEmbeddingGradients( - inputs: inputs, learningRates: learningRates, config: config) - } - - /// Converts the given `resource_handle` representing an iterator to a variant tensor. - /// - /// - Parameter resource_handle: A handle to an iterator resource. - /// - /// - Output serialized: A variant tensor storing the state of the iterator contained in the - /// resource. - @inlinable @inline(__always) - public static func serializeIterator( - resourceHandle: ResourceHandle - ) -> VariantHandle { - _RawTFEager.serializeIterator(resourceHandle: resourceHandle) - } - - /// Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object. - /// - /// The `SparseTensor` must have rank `R` greater than 1, and the first dimension - /// is treated as the minibatch dimension. Elements of the `SparseTensor` - /// must be sorted in increasing order of this first dimension. The serialized - /// `SparseTensor` objects going into each row of `serialized_sparse` will have - /// rank `R-1`. - /// - /// The minibatch size `N` is extracted from `sparse_shape[0]`. - /// - /// - Parameters: - /// - sparse_indices: 2-D. The `indices` of the minibatch `SparseTensor`. - /// - sparse_values: 1-D. The `values` of the minibatch `SparseTensor`. - /// - sparse_shape: 1-D. The `shape` of the minibatch `SparseTensor`. - /// - /// - Attr out_type: The `dtype` to use for serialization; the supported types are `string` - /// (default) and `variant`. - @inlinable @inline(__always) - public static func serializeManySparse< - T: TensorFlowScalar, - OutType: TensorFlowScalar - >( - sparseIndices: Tensor, - sparseValues: Tensor, - sparseShape: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(sparseIndices.handle.backend, sparseValues.handle.backend), - sparseShape.handle.backend) - { - case .XLA: - let output_device = sparseShape.device - let sparseIndices = Tensor(copying: sparseIndices, to: .defaultTFEager) - let sparseValues = Tensor(copying: sparseValues, to: .defaultTFEager) - let sparseShape = Tensor(copying: sparseShape, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.serializeManySparse( - sparseIndices: sparseIndices, sparseValues: sparseValues, sparseShape: sparseShape), - to: output_device) - case .TF_EAGER: - return _RawTFEager.serializeManySparse( - sparseIndices: sparseIndices, sparseValues: sparseValues, sparseShape: sparseShape) - } - - } - - /// Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object. - /// - /// The `SparseTensor` must have rank `R` greater than 1, and the first dimension - /// is treated as the minibatch dimension. Elements of the `SparseTensor` - /// must be sorted in increasing order of this first dimension. The serialized - /// `SparseTensor` objects going into each row of `serialized_sparse` will have - /// rank `R-1`. - /// - /// The minibatch size `N` is extracted from `sparse_shape[0]`. - /// - /// - Parameters: - /// - sparse_indices: 2-D. The `indices` of the minibatch `SparseTensor`. - /// - sparse_values: 1-D. The `values` of the minibatch `SparseTensor`. - /// - sparse_shape: 1-D. The `shape` of the minibatch `SparseTensor`. - /// - /// - Attr out_type: The `dtype` to use for serialization; the supported types are `string` - /// (default) and `variant`. - @inlinable @inline(__always) - public static func serializeManySparse( - sparseIndices: Tensor, - sparseValues: Tensor, - sparseShape: Tensor - ) -> StringTensor { - _RawTFEager.serializeManySparse( - sparseIndices: sparseIndices, sparseValues: sparseValues, sparseShape: sparseShape) + commonBackend(input.handle.backend, paddingValue.handle.backend), + paddingLow.handle.backend), paddingHigh.handle.backend), paddingInterior.handle.backend) + { + case .XLA: + let output_device = paddingInterior.device + let input = Tensor(copying: input, to: .defaultTFEager) + let paddingValue = Tensor(copying: paddingValue, to: .defaultTFEager) + let paddingLow = Tensor(copying: paddingLow, to: .defaultTFEager) + let paddingHigh = Tensor(copying: paddingHigh, to: .defaultTFEager) + let paddingInterior = Tensor(copying: paddingInterior, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.xlaPad( + input, paddingValue: paddingValue, paddingLow: paddingLow, paddingHigh: paddingHigh, + paddingInterior: paddingInterior), to: output_device) + case .TF_EAGER: + return _RawTFEager.xlaPad( + input, paddingValue: paddingValue, paddingLow: paddingLow, paddingHigh: paddingHigh, + paddingInterior: paddingInterior) } - /// Serialize a `SparseTensor` into a `[3]` `Tensor` object. - /// - /// - Parameters: - /// - sparse_indices: 2-D. The `indices` of the `SparseTensor`. - /// - sparse_values: 1-D. The `values` of the `SparseTensor`. - /// - sparse_shape: 1-D. The `shape` of the `SparseTensor`. - /// - /// - Attr out_type: The `dtype` to use for serialization; the supported types are `string` - /// (default) and `variant`. - @inlinable @inline(__always) - public static func serializeSparse< - T: TensorFlowScalar, - OutType: TensorFlowScalar - >( - sparseIndices: Tensor, - sparseValues: Tensor, - sparseShape: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(sparseIndices.handle.backend, sparseValues.handle.backend), - sparseShape.handle.backend) - { - case .XLA: - let output_device = sparseShape.device - let sparseIndices = Tensor(copying: sparseIndices, to: .defaultTFEager) - let sparseValues = Tensor(copying: sparseValues, to: .defaultTFEager) - let sparseShape = Tensor(copying: sparseShape, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.serializeSparse( - sparseIndices: sparseIndices, sparseValues: sparseValues, sparseShape: sparseShape), - to: output_device) - case .TF_EAGER: - return _RawTFEager.serializeSparse( - sparseIndices: sparseIndices, sparseValues: sparseValues, sparseShape: sparseShape) - } - - } - - /// Serialize a `SparseTensor` into a `[3]` `Tensor` object. - /// - /// - Parameters: - /// - sparse_indices: 2-D. The `indices` of the `SparseTensor`. - /// - sparse_values: 1-D. The `values` of the `SparseTensor`. - /// - sparse_shape: 1-D. The `shape` of the `SparseTensor`. - /// - /// - Attr out_type: The `dtype` to use for serialization; the supported types are `string` - /// (default) and `variant`. - @inlinable @inline(__always) - public static func serializeSparse( - sparseIndices: Tensor, - sparseValues: Tensor, - sparseShape: Tensor - ) -> StringTensor { - _RawTFEager.serializeSparse( - sparseIndices: sparseIndices, sparseValues: sparseValues, sparseShape: sparseShape) - } + } - @inlinable @inline(__always) - public static func serializeTRTResource( - resourceName: StringTensor, - filename: StringTensor, - deleteResource: Bool = false - ) { - _RawTFEager.serializeTRTResource( - resourceName: resourceName, filename: filename, deleteResource: deleteResource) - } - - /// Transforms a Tensor into a serialized TensorProto proto. - /// - /// - Parameter tensor: A Tensor of type `T`. - /// - /// - Attr T: The type of the input tensor. - /// - /// - Output serialized: A serialized TensorProto proto of the input tensor. - @inlinable @inline(__always) - public static func serializeTensor( - _ tensor: Tensor - ) -> StringTensor { - _RawTFEager.serializeTensor(tensor) - } - - /// Number of unique elements along last dimension of input `set`. - /// - /// Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`, - /// and `set_shape`. The last dimension contains values in a set, duplicates are - /// allowed but ignored. - /// - /// If `validate_indices` is `True`, this op validates the order and range of `set` - /// indices. - /// - /// - Parameters: - /// - set_indices: 2D `Tensor`, indices of a `SparseTensor`. - /// - set_values: 1D `Tensor`, values of a `SparseTensor`. - /// - set_shape: 1D `Tensor`, shape of a `SparseTensor`. - /// - /// - Output size: For `set` ranked `n`, this is a `Tensor` with rank `n-1`, and the same 1st - /// `n-1` dimensions as `set`. Each value is the number of unique elements in - /// the corresponding `[0...n-1]` dimension of `set`. - @inlinable @inline(__always) - public static func setSize( - setIndices: Tensor, - setValues: Tensor, - setShape: Tensor, - validateIndices: Bool = true - ) -> Tensor { - switch commonBackend( - commonBackend(setIndices.handle.backend, setValues.handle.backend), setShape.handle.backend) - { - case .XLA: - let output_device = setShape.device - let setIndices = Tensor(copying: setIndices, to: .defaultTFEager) - let setValues = Tensor(copying: setValues, to: .defaultTFEager) - let setShape = Tensor(copying: setShape, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.setSize( - setIndices: setIndices, setValues: setValues, setShape: setShape, - validateIndices: validateIndices), to: output_device) - case .TF_EAGER: - return _RawTFEager.setSize( - setIndices: setIndices, setValues: setValues, setShape: setShape, - validateIndices: validateIndices) - } - - } - - /// Number of unique elements along last dimension of input `set`. - /// - /// Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`, - /// and `set_shape`. The last dimension contains values in a set, duplicates are - /// allowed but ignored. - /// - /// If `validate_indices` is `True`, this op validates the order and range of `set` - /// indices. - /// - /// - Parameters: - /// - set_indices: 2D `Tensor`, indices of a `SparseTensor`. - /// - set_values: 1D `Tensor`, values of a `SparseTensor`. - /// - set_shape: 1D `Tensor`, shape of a `SparseTensor`. - /// - /// - Output size: For `set` ranked `n`, this is a `Tensor` with rank `n-1`, and the same 1st - /// `n-1` dimensions as `set`. Each value is the number of unique elements in - /// the corresponding `[0...n-1]` dimension of `set`. - @inlinable @inline(__always) - public static func setSize( - setIndices: Tensor, - setValues: StringTensor, - setShape: Tensor, - validateIndices: Bool = true - ) -> Tensor { - switch commonBackend(setIndices.handle.backend, setShape.handle.backend) { - case .XLA: - let output_device = setShape.device - let setIndices = Tensor(copying: setIndices, to: .defaultTFEager) - let setShape = Tensor(copying: setShape, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.setSize( - setIndices: setIndices, setValues: setValues, setShape: setShape, - validateIndices: validateIndices), to: output_device) - case .TF_EAGER: - return _RawTFEager.setSize( - setIndices: setIndices, setValues: setValues, setShape: setShape, - validateIndices: validateIndices) - } - - } - - @inlinable @inline(__always) - public static func setStatsAggregatorDataset( - inputDataset: VariantHandle, - statsAggregator: ResourceHandle, - tag: StringTensor, - counterPrefix: StringTensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.setStatsAggregatorDataset( - inputDataset: inputDataset, statsAggregator: statsAggregator, tag: tag, - counterPrefix: counterPrefix, outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Returns the shape of a tensor. - /// - /// This operation returns a 1-D integer tensor representing the shape of `input`. - /// - /// For example: - /// - /// ``` - /// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] - /// shape(t) ==> [2, 2, 3] - /// ``` - @inlinable @inline(__always) - public static func shape< - T: TensorFlowScalar, - OutType: TensorFlowIndex - >( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - return _RawXLA.shape(input) - case .TF_EAGER: - return _RawTFEager.shape(input) - } - - } - - /// Returns shape of tensors. - /// - /// This operation returns N 1-D integer tensors representing shape of `input[i]s`. - @inlinable @inline(__always) - public static func shapeN< - T: TensorFlowScalar, - OutType: TensorFlowIndex - >( - _ input: [Tensor] - ) -> [Tensor] { - _RawTFEager.shapeN(input) - } - - /// Creates a `Dataset` that includes only 1/`num_shards` of this dataset. - /// - /// - Parameters: - /// - num_shards: An integer representing the number of shards operating in parallel. - /// - index: An integer representing the current worker index. - @inlinable @inline(__always) - public static func shardDataset( - inputDataset: VariantHandle, - numShards: Tensor, - index: Tensor, - requireNonEmpty: Bool = false, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.shardDataset( - inputDataset: inputDataset, numShards: numShards, index: index, - requireNonEmpty: requireNonEmpty, outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Generate a sharded filename. The filename is printf formatted as - /// - /// %s-%05d-of-%05d, basename, shard, num_shards. - @inlinable @inline(__always) - public static func shardedFilename( - basename: StringTensor, - shard: Tensor, - numShards: Tensor - ) -> StringTensor { - _RawTFEager.shardedFilename(basename: basename, shard: shard, numShards: numShards) - } - - /// Generate a glob pattern matching all sharded file names. - @inlinable @inline(__always) - public static func shardedFilespec( - basename: StringTensor, - numShards: Tensor - ) -> StringTensor { - _RawTFEager.shardedFilespec(basename: basename, numShards: numShards) - } - - /// Creates a dataset that shuffles and repeats elements from `input_dataset` - /// - /// pseudorandomly. - /// - /// - Parameters: - /// - buffer_size: The number of output elements to buffer in an iterator over - /// this dataset. Compare with the `min_after_dequeue` attr when creating a - /// `RandomShuffleQueue`. - /// - seed: A scalar seed for the random number generator. If either `seed` or - /// `seed2` is set to be non-zero, the random number generator is seeded - /// by the given seed. Otherwise, a random seed is used. - /// - seed2: A second scalar seed to avoid seed collision. - /// - count: A scalar representing the number of times the underlying dataset - /// should be repeated. The default is `-1`, which results in infinite repetition. - @inlinable @inline(__always) - public static func shuffleAndRepeatDataset( - inputDataset: VariantHandle, - bufferSize: Tensor, - seed: Tensor, - seed2: Tensor, - count: Tensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.shuffleAndRepeatDataset( - inputDataset: inputDataset, bufferSize: bufferSize, seed: seed, seed2: seed2, count: count, - outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Creates a dataset that shuffles elements from `input_dataset` pseudorandomly. - /// - /// - Parameters: - /// - buffer_size: The number of output elements to buffer in an iterator over - /// this dataset. Compare with the `min_after_dequeue` attr when creating a - /// `RandomShuffleQueue`. - /// - seed: A scalar seed for the random number generator. If either `seed` or - /// `seed2` is set to be non-zero, the random number generator is seeded - /// by the given seed. Otherwise, a random seed is used. - /// - seed2: A second scalar seed to avoid seed collision. - /// - /// - Attr reshuffle_each_iteration: If true, each iterator over this dataset will be given - /// a different pseudorandomly generated seed, based on a sequence seeded by the - /// `seed` and `seed2` inputs. If false, each iterator will be given the same - /// seed, and repeated iteration over this dataset will yield the exact same - /// sequence of results. - @inlinable @inline(__always) - public static func shuffleDataset( - inputDataset: VariantHandle, - bufferSize: Tensor, - seed: Tensor, - seed2: Tensor, - reshuffleEachIteration: Bool = true, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.shuffleDataset( - inputDataset: inputDataset, bufferSize: bufferSize, seed: seed, seed2: seed2, - reshuffleEachIteration: reshuffleEachIteration, outputTypes: outputTypes, - outputShapes: outputShapes) - } - - @inlinable @inline(__always) - public static func shuffleDatasetV2( - inputDataset: VariantHandle, - bufferSize: Tensor, - seedGenerator: ResourceHandle, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.shuffleDatasetV2( - inputDataset: inputDataset, bufferSize: bufferSize, seedGenerator: seedGenerator, - outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Shuts down a running distributed TPU system. - /// - /// The op returns an error if no system is running. - @inlinable @inline(__always) - public static func shutdownDistributedTPU() { - _RawTFEager.shutdownDistributedTPU() - } - - /// Computes sigmoid of `x` element-wise. - /// - /// Specifically, `y = 1 / (1 + exp(-x))`. - @inlinable @inline(__always) - public static func sigmoid( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.sigmoid(x) - case .TF_EAGER: - return _RawTFEager.sigmoid(x) - } - - } - - /// Computes the gradient of the sigmoid of `x` wrt its input. - /// - /// Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and - /// `dy` is the corresponding input gradient. - @inlinable @inline(__always) - public static func sigmoidGrad( - _ y: Tensor, - dy: Tensor - ) -> Tensor { - switch commonBackend(y.handle.backend, dy.handle.backend) { - case .XLA: - return _RawXLA.sigmoidGrad(y, dy: dy) - case .TF_EAGER: - return _RawTFEager.sigmoidGrad(y, dy: dy) - } - - } - - /// Returns an element-wise indication of the sign of a number. - /// - /// `y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`. - /// - /// For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`. - /// - /// Example usage: - /// >>> tf.math.sign([0., 2., -3.]) - /// - @inlinable @inline(__always) - public static func sign( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.sign(x) - case .TF_EAGER: - return _RawTFEager.sign(x) - } - - } - - @inlinable @inline(__always) - public static func simple( - _ a: Tensor - ) -> Tensor { - switch a.handle.backend { - case .XLA: - let output_device = a.device - let a = Tensor(copying: a, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.simple(a), to: output_device) - case .TF_EAGER: - return _RawTFEager.simple(a) - } - - } - - @inlinable @inline(__always) - public static func simpleStruct( - nA: Int64 - ) -> [Tensor] { - _RawTFEager.simpleStruct(nA: nA) - } - - /// Computes sine of x element-wise. - /// - /// Given an input tensor, this function computes sine of every - /// element in the tensor. Input range is `(-inf, inf)` and - /// output range is `[-1,1]`. - /// - /// ```python - /// x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10, float("inf")]) - /// tf.math.sin(x) ==> [nan -0.4121185 -0.47942555 0.84147096 0.9320391 -0.87329733 -0.54402107 nan] - /// ``` - @inlinable @inline(__always) - public static func sin( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.sin(x) - case .TF_EAGER: - return _RawTFEager.sin(x) - } - - } - - /// Computes hyperbolic sine of x element-wise. - /// - /// Given an input tensor, this function computes hyperbolic sine of every - /// element in the tensor. Input range is `[-inf,inf]` and output range - /// is `[-inf,inf]`. - /// - /// ```python - /// x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) - /// tf.math.sinh(x) ==> [-inf -4.0515420e+03 -5.2109528e-01 1.1752012e+00 1.5094614e+00 3.6268604e+00 1.1013232e+04 inf] - /// ``` - @inlinable @inline(__always) - public static func sinh( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.sinh(x) - case .TF_EAGER: - return _RawTFEager.sinh(x) - } - - } - - /// Returns the size of a tensor. - /// - /// This operation returns an integer representing the number of elements in - /// `input`. - /// - /// For example: - /// - /// ``` - /// # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] - /// size(t) ==> 12 - /// ``` - @inlinable @inline(__always) - public static func size< - T: TensorFlowScalar, - OutType: TensorFlowIndex - >( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - return _RawXLA.size(input) - case .TF_EAGER: - return _RawTFEager.size(input) - } - - } - - /// Creates a dataset that skips `count` elements from the `input_dataset`. - /// - /// - Parameter count: A scalar representing the number of elements from the `input_dataset` - /// that should be skipped. If count is -1, skips everything. - @inlinable @inline(__always) - public static func skipDataset( - inputDataset: VariantHandle, - count: Tensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.skipDataset( - inputDataset: inputDataset, count: count, outputTypes: outputTypes, - outputShapes: outputShapes) - } - - /// Parses a text file and creates a batch of examples. - /// - /// - Attrs: - /// - filename: The corpus's text file name. - /// - batch_size: The size of produced batch. - /// - window_size: The number of words to predict to the left and right of the target. - /// - min_count: The minimum number of word occurrences for it to be included in the - /// vocabulary. - /// - subsample: Threshold for word occurrence. Words that appear with higher - /// frequency will be randomly down-sampled. Set to 0 to disable. - /// - /// - Outputs: - /// - vocab_word: A vector of words in the corpus. - /// - vocab_freq: Frequencies of words. Sorted in the non-ascending order. - /// - words_per_epoch: Number of words per epoch in the data file. - /// - current_epoch: The current epoch number. - /// - total_words_processed: The total number of words processed so far. - /// - examples: A vector of word ids. - /// - labels: A vector of word ids. - @inlinable @inline(__always) - public static func skipgram( - filename: String, - batchSize: Int64, - windowSize: Int64 = 5, - minCount: Int64 = 5, - subsample: Double = 0.001 - ) -> ( - vocabWord: StringTensor, vocabFreq: Tensor, wordsPerEpoch: Tensor, - currentEpoch: Tensor, totalWordsProcessed: Tensor, examples: Tensor, - labels: Tensor - ) { - _RawTFEager.skipgram( - filename: filename, batchSize: batchSize, windowSize: windowSize, minCount: minCount, - subsample: subsample) - } - - @inlinable @inline(__always) - public static func sleepDataset( - inputDataset: VariantHandle, - sleepMicroseconds: Tensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.sleepDataset( - inputDataset: inputDataset, sleepMicroseconds: sleepMicroseconds, outputTypes: outputTypes, - outputShapes: outputShapes) - } - - /// Return a slice from 'input'. - /// - /// The output tensor is a tensor with dimensions described by 'size' - /// whose values are extracted from 'input' starting at the offsets in - /// 'begin'. - /// - /// *Requirements*: - /// 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n) - /// - /// - Parameters: - /// - begin: begin[i] specifies the offset into the 'i'th dimension of - /// 'input' to slice from. - /// - size: size[i] specifies the number of elements of the 'i'th dimension - /// of 'input' to slice. If size[i] is -1, all remaining elements in dimension - /// i are included in the slice (i.e. this is equivalent to setting - /// size[i] = input.dim_size(i) - begin[i]). - @inlinable @inline(__always) - public static func slice< - T: TensorFlowScalar, - Index: TensorFlowIndex - >( - _ input: Tensor, - begin: Tensor, - size: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(input.handle.backend, begin.handle.backend), size.handle.backend) - { - case .XLA: - return _RawXLA.slice(input, begin: begin, size: size) - case .TF_EAGER: - return _RawTFEager.slice(input, begin: begin, size: size) - } - - } - - /// Creates a dataset that passes a sliding window over `input_dataset`. - /// - /// - Parameters: - /// - window_size: A scalar representing the number of elements in the - /// sliding window. - /// - window_shift: A scalar representing the steps moving the sliding window - /// forward in one iteration. It must be positive. - /// - window_stride: A scalar representing the stride of the input elements of the sliding window. - /// It must be positive. - @inlinable @inline(__always) - public static func slidingWindowDataset( - inputDataset: VariantHandle, - windowSize: Tensor, - windowShift: Tensor, - windowStride: Tensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.slidingWindowDataset( - inputDataset: inputDataset, windowSize: windowSize, windowShift: windowShift, - windowStride: windowStride, outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Returns a copy of the input tensor. - @inlinable @inline(__always) - public static func snapshot( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.snapshot(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.snapshot(input) - } - - } - - /// Creates a dataset that will write to / read from a snapshot. - /// - /// This dataset attempts to determine whether a valid snapshot exists at the - /// `snapshot_path`, and reads from the snapshot in lieu of using `input_dataset`. - /// If not, it will run the preprocessing pipeline as usual, and write out a - /// snapshot of the data processed for future use. - /// - /// - Parameters: - /// - input_dataset: A variant tensor representing the input dataset. - /// - path: The path we should write snapshots to / read snapshots from. - @inlinable @inline(__always) - public static func snapshotDataset( - inputDataset: VariantHandle, - path: StringTensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?], - compression: String, - readerPathPrefix: String, - writerPathPrefix: String, - shardSizeBytes: Int64 = 10_737_418_240, - pendingSnapshotExpirySeconds: Int64 = 86400, - numReaderThreads: Int64 = 1, - readerBufferSize: Int64 = 1, - numWriterThreads: Int64 = 1, - writerBufferSize: Int64 = 1, - shuffleOnRead: Bool = false, - seed: Int64 = 0, - seed2: Int64 = 0 - ) -> VariantHandle { - _RawTFEager.snapshotDataset( - inputDataset: inputDataset, path: path, outputTypes: outputTypes, - outputShapes: outputShapes, compression: compression, readerPathPrefix: readerPathPrefix, - writerPathPrefix: writerPathPrefix, shardSizeBytes: shardSizeBytes, - pendingSnapshotExpirySeconds: pendingSnapshotExpirySeconds, - numReaderThreads: numReaderThreads, readerBufferSize: readerBufferSize, - numWriterThreads: numWriterThreads, writerBufferSize: writerBufferSize, - shuffleOnRead: shuffleOnRead, seed: seed, seed2: seed2) - } - - /// Computes softmax activations. - /// - /// For each batch `i` and class `j` we have - /// - /// $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$ - /// - /// - Parameter logits: 2-D with shape `[batch_size, num_classes]`. - /// - /// - Output softmax: Same shape as `logits`. - @inlinable @inline(__always) - public static func softmax( - logits: Tensor - ) -> Tensor { - switch logits.handle.backend { - case .XLA: - return _RawXLA.softmax(logits: logits) - case .TF_EAGER: - return _RawTFEager.softmax(logits: logits) - } - - } - - /// Computes softmax cross entropy cost and gradients to backpropagate. - /// - /// Inputs are the logits, not probabilities. - /// - /// - Parameters: - /// - features: batch_size x num_classes matrix - /// - labels: batch_size x num_classes matrix - /// The caller must ensure that each batch of labels represents a valid - /// probability distribution. - /// - /// - Outputs: - /// - loss: Per example loss (batch_size vector). - /// - backprop: backpropagated gradients (batch_size x num_classes matrix). - @inlinable @inline(__always) - public static func softmaxCrossEntropyWithLogits( - features: Tensor, - labels: Tensor - ) -> (loss: Tensor, backprop: Tensor) { - switch commonBackend(features.handle.backend, labels.handle.backend) { - case .XLA: - return _RawXLA.softmaxCrossEntropyWithLogits(features: features, labels: labels) - case .TF_EAGER: - return _RawTFEager.softmaxCrossEntropyWithLogits(features: features, labels: labels) - } - - } - - /// Computes softplus: `log(exp(features) + 1)`. - @inlinable @inline(__always) - public static func softplus( - features: Tensor - ) -> Tensor { - switch features.handle.backend { - case .XLA: - return _RawXLA.softplus(features: features) - case .TF_EAGER: - return _RawTFEager.softplus(features: features) - } - - } - - /// Computes softplus gradients for a softplus operation. - /// - /// - Parameters: - /// - gradients: The backpropagated gradients to the corresponding softplus operation. - /// - features: The features passed as input to the corresponding softplus operation. - /// - /// - Output backprops: The gradients: `gradients / (1 + exp(-features))`. - @inlinable @inline(__always) - public static func softplusGrad( - gradients: Tensor, - features: Tensor - ) -> Tensor { - switch commonBackend(gradients.handle.backend, features.handle.backend) { - case .XLA: - return _RawXLA.softplusGrad(gradients: gradients, features: features) - case .TF_EAGER: - return _RawTFEager.softplusGrad(gradients: gradients, features: features) - } - - } - - /// Computes softsign: `features / (abs(features) + 1)`. - @inlinable @inline(__always) - public static func softsign( - features: Tensor - ) -> Tensor { - switch features.handle.backend { - case .XLA: - return _RawXLA.softsign(features: features) - case .TF_EAGER: - return _RawTFEager.softsign(features: features) - } - - } - - /// Computes softsign gradients for a softsign operation. - /// - /// - Parameters: - /// - gradients: The backpropagated gradients to the corresponding softsign operation. - /// - features: The features passed as input to the corresponding softsign operation. - /// - /// - Output backprops: The gradients: `gradients / (1 + abs(features)) ** 2`. - @inlinable @inline(__always) - public static func softsignGrad( - gradients: Tensor, - features: Tensor - ) -> Tensor { - switch commonBackend(gradients.handle.backend, features.handle.backend) { - case .XLA: - return _RawXLA.softsignGrad(gradients: gradients, features: features) - case .TF_EAGER: - return _RawTFEager.softsignGrad(gradients: gradients, features: features) - } - - } - - /// SpaceToBatch for 4-D tensors of type T. - /// - /// This is a legacy version of the more general SpaceToBatchND. - /// - /// Zero-pads and then rearranges (permutes) blocks of spatial data into batch. - /// More specifically, this op outputs a copy of the input tensor where values from - /// the `height` and `width` dimensions are moved to the `batch` dimension. After - /// the zero-padding, both `height` and `width` of the input must be divisible by the - /// block size. - /// - /// - Parameters: - /// - input: 4-D with shape `[batch, height, width, depth]`. - /// - paddings: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies - /// the padding of the input with zeros across the spatial dimensions as follows: - /// - /// paddings = [[pad_top, pad_bottom], [pad_left, pad_right]] - /// - /// The effective spatial dimensions of the zero-padded input tensor will be: - /// - /// height_pad = pad_top + height + pad_bottom - /// width_pad = pad_left + width + pad_right - /// - /// The attr `block_size` must be greater than one. It indicates the block size. - /// - /// * Non-overlapping blocks of size `block_size x block size` in the height and - /// width dimensions are rearranged into the batch dimension at each location. - /// * The batch of the output tensor is `batch * block_size * block_size`. - /// * Both height_pad and width_pad must be divisible by block_size. - /// - /// The shape of the output will be: - /// - /// [batch*block_size*block_size, height_pad/block_size, width_pad/block_size, - /// depth] - /// - /// Some examples: - /// - /// (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2: - /// - /// ``` - /// x = [[[[1], [2]], [[3], [4]]]] - /// ``` - /// - /// The output tensor has shape `[4, 1, 1, 1]` and value: - /// - /// ``` - /// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] - /// ``` - /// - /// (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2: - /// - /// ``` - /// x = [[[[1, 2, 3], [4, 5, 6]], - /// [[7, 8, 9], [10, 11, 12]]]] - /// ``` - /// - /// The output tensor has shape `[4, 1, 1, 3]` and value: - /// - /// ``` - /// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] - /// ``` - /// - /// (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2: - /// - /// ``` - /// x = [[[[1], [2], [3], [4]], - /// [[5], [6], [7], [8]], - /// [[9], [10], [11], [12]], - /// [[13], [14], [15], [16]]]] - /// ``` - /// - /// The output tensor has shape `[4, 2, 2, 1]` and value: - /// - /// ``` - /// x = [[[[1], [3]], [[9], [11]]], - /// [[[2], [4]], [[10], [12]]], - /// [[[5], [7]], [[13], [15]]], - /// [[[6], [8]], [[14], [16]]]] - /// ``` - /// - /// (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2: - /// - /// ``` - /// x = [[[[1], [2], [3], [4]], - /// [[5], [6], [7], [8]]], - /// [[[9], [10], [11], [12]], - /// [[13], [14], [15], [16]]]] - /// ``` - /// - /// The output tensor has shape `[8, 1, 2, 1]` and value: - /// - /// ``` - /// x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], - /// [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] - /// ``` - /// - /// Among others, this operation is useful for reducing atrous convolution into - /// regular convolution. - @inlinable @inline(__always) - public static func spaceToBatch< - T: TensorFlowScalar, - Tpaddings: TensorFlowIndex - >( - _ input: Tensor, - paddings: Tensor, - blockSize: Int64 - ) -> Tensor { - switch commonBackend(input.handle.backend, paddings.handle.backend) { - case .XLA: - let output_device = paddings.device - let input = Tensor(copying: input, to: .defaultTFEager) - let paddings = Tensor(copying: paddings, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.spaceToBatch(input, paddings: paddings, blockSize: blockSize), - to: output_device) - case .TF_EAGER: - return _RawTFEager.spaceToBatch(input, paddings: paddings, blockSize: blockSize) - } - - } - - /// SpaceToBatch for N-D tensors of type T. - /// - /// This operation divides "spatial" dimensions `[1, ..., M]` of the input into a - /// grid of blocks of shape `block_shape`, and interleaves these blocks with the - /// "batch" dimension (0) such that in the output, the spatial dimensions - /// `[1, ..., M]` correspond to the position within the grid, and the batch - /// dimension combines both the position within a spatial block and the original - /// batch position. Prior to division into blocks, the spatial dimensions of the - /// input are optionally zero padded according to `paddings`. See below for a - /// precise description. - /// - /// - Parameters: - /// - input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, - /// where spatial_shape has `M` dimensions. - /// - block_shape: 1-D with shape `[M]`, all values must be >= 1. - /// - paddings: 2-D with shape `[M, 2]`, all values must be >= 0. - /// `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension - /// `i + 1`, which corresponds to spatial dimension `i`. It is required that - /// `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`. - /// - /// This operation is equivalent to the following steps: - /// - /// 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the - /// input according to `paddings` to produce `padded` of shape `padded_shape`. - /// - /// 2. Reshape `padded` to `reshaped_padded` of shape: - /// - /// [batch] + - /// [padded_shape[1] / block_shape[0], - /// block_shape[0], - /// ..., - /// padded_shape[M] / block_shape[M-1], - /// block_shape[M-1]] + - /// remaining_shape - /// - /// 3. Permute dimensions of `reshaped_padded` to produce - /// `permuted_reshaped_padded` of shape: - /// - /// block_shape + - /// [batch] + - /// [padded_shape[1] / block_shape[0], - /// ..., - /// padded_shape[M] / block_shape[M-1]] + - /// remaining_shape - /// - /// 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch - /// dimension, producing an output tensor of shape: - /// - /// [batch * prod(block_shape)] + - /// [padded_shape[1] / block_shape[0], - /// ..., - /// padded_shape[M] / block_shape[M-1]] + - /// remaining_shape - /// - /// Some examples: - /// - /// (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and - /// `paddings = [[0, 0], [0, 0]]`: - /// - /// ``` - /// x = [[[[1], [2]], [[3], [4]]]] - /// ``` - /// - /// The output tensor has shape `[4, 1, 1, 1]` and value: - /// - /// ``` - /// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] - /// ``` - /// - /// (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and - /// `paddings = [[0, 0], [0, 0]]`: - /// - /// ``` - /// x = [[[[1, 2, 3], [4, 5, 6]], - /// [[7, 8, 9], [10, 11, 12]]]] - /// ``` - /// - /// The output tensor has shape `[4, 1, 1, 3]` and value: - /// - /// ``` - /// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] - /// ``` - /// - /// (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and - /// `paddings = [[0, 0], [0, 0]]`: - /// - /// ``` - /// x = [[[[1], [2], [3], [4]], - /// [[5], [6], [7], [8]], - /// [[9], [10], [11], [12]], - /// [[13], [14], [15], [16]]]] - /// ``` - /// - /// The output tensor has shape `[4, 2, 2, 1]` and value: - /// - /// ``` - /// x = [[[[1], [3]], [[9], [11]]], - /// [[[2], [4]], [[10], [12]]], - /// [[[5], [7]], [[13], [15]]], - /// [[[6], [8]], [[14], [16]]]] - /// ``` - /// - /// (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and - /// paddings = `[[0, 0], [2, 0]]`: - /// - /// ``` - /// x = [[[[1], [2], [3], [4]], - /// [[5], [6], [7], [8]]], - /// [[[9], [10], [11], [12]], - /// [[13], [14], [15], [16]]]] - /// ``` - /// - /// The output tensor has shape `[8, 1, 3, 1]` and value: - /// - /// ``` - /// x = [[[[0], [1], [3]]], [[[0], [9], [11]]], - /// [[[0], [2], [4]]], [[[0], [10], [12]]], - /// [[[0], [5], [7]]], [[[0], [13], [15]]], - /// [[[0], [6], [8]]], [[[0], [14], [16]]]] - /// ``` - /// - /// Among others, this operation is useful for reducing atrous convolution into - /// regular convolution. - @inlinable @inline(__always) - public static func spaceToBatchND< - T: TensorFlowScalar, - TblockShape: TensorFlowIndex, - Tpaddings: TensorFlowIndex - >( - _ input: Tensor, - blockShape: Tensor, - paddings: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(input.handle.backend, blockShape.handle.backend), paddings.handle.backend) - { - case .XLA: - let output_device = paddings.device - let input = Tensor(copying: input, to: .defaultTFEager) - let blockShape = Tensor(copying: blockShape, to: .defaultTFEager) - let paddings = Tensor(copying: paddings, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.spaceToBatchND(input, blockShape: blockShape, paddings: paddings), - to: output_device) - case .TF_EAGER: - return _RawTFEager.spaceToBatchND(input, blockShape: blockShape, paddings: paddings) - } - - } - - /// SpaceToDepth for tensors of type T. - /// - /// Rearranges blocks of spatial data, into depth. More specifically, - /// this op outputs a copy of the input tensor where values from the `height` - /// and `width` dimensions are moved to the `depth` dimension. - /// The attr `block_size` indicates the input block size. - /// - /// * Non-overlapping blocks of size `block_size x block size` are rearranged - /// into depth at each location. - /// * The depth of the output tensor is `block_size * block_size * input_depth`. - /// * The Y, X coordinates within each block of the input become the high order - /// component of the output channel index. - /// * The input tensor's height and width must be divisible by block_size. - /// - /// The `data_format` attr specifies the layout of the input and output tensors - /// with the following options: - /// "NHWC": `[ batch, height, width, channels ]` - /// "NCHW": `[ batch, channels, height, width ]` - /// "NCHW_VECT_C": - /// `qint8 [ batch, channels / 4, height, width, 4 ]` - /// - /// It is useful to consider the operation as transforming a 6-D Tensor. - /// e.g. for data_format = NHWC, - /// Each element in the input tensor can be specified via 6 coordinates, - /// ordered by decreasing memory layout significance as: - /// n,oY,bY,oX,bX,iC (where n=batch index, oX, oY means X or Y coordinates - /// within the output image, bX, bY means coordinates - /// within the input block, iC means input channels). - /// The output would be a transpose to the following layout: - /// n,oY,oX,bY,bX,iC - /// - /// This operation is useful for resizing the activations between convolutions - /// (but keeping all data), e.g. instead of pooling. It is also useful for training - /// purely convolutional models. - /// - /// For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" and - /// block_size = 2: - /// - /// ``` - /// x = [[[[1], [2]], - /// [[3], [4]]]] - /// ``` - /// - /// This operation will output a tensor of shape `[1, 1, 1, 4]`: - /// - /// ``` - /// [[[[1, 2, 3, 4]]]] - /// ``` - /// - /// Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`, - /// the corresponding output will have a single element (i.e. width and height are - /// both 1) and will have a depth of 4 channels (1 * block_size * block_size). - /// The output element shape is `[1, 1, 4]`. - /// - /// For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g. - /// - /// ``` - /// x = [[[[1, 2, 3], [4, 5, 6]], - /// [[7, 8, 9], [10, 11, 12]]]] - /// ``` - /// - /// This operation, for block_size of 2, will return the following tensor of shape - /// `[1, 1, 1, 12]` - /// - /// ``` - /// [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] - /// ``` - /// - /// Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2: - /// - /// ``` - /// x = [[[[1], [2], [5], [6]], - /// [[3], [4], [7], [8]], - /// [[9], [10], [13], [14]], - /// [[11], [12], [15], [16]]]] - /// ``` - /// - /// the operator will return the following tensor of shape `[1 2 2 4]`: - /// - /// ``` - /// x = [[[[1, 2, 3, 4], - /// [5, 6, 7, 8]], - /// [[9, 10, 11, 12], - /// [13, 14, 15, 16]]]] - /// ``` - /// - /// - Attr block_size: The size of the spatial block. - @inlinable @inline(__always) - public static func spaceToDepth( - _ input: Tensor, - blockSize: Int64, - dataFormat: DataFormat2 = .nhwc - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.spaceToDepth(input, blockSize: blockSize, dataFormat: dataFormat), - to: output_device) - case .TF_EAGER: - return _RawTFEager.spaceToDepth(input, blockSize: blockSize, dataFormat: dataFormat) - } - - } - - /// Adds two `SparseTensor` objects to produce another `SparseTensor`. - /// - /// The input `SparseTensor` objects' indices are assumed ordered in standard - /// lexicographic order. If this is not the case, before this step run - /// `SparseReorder` to restore index ordering. - /// - /// By default, if two values sum to zero at some index, the output `SparseTensor` - /// would still include that particular location in its index, storing a zero in the - /// corresponding value slot. To override this, callers can specify `thresh`, - /// indicating that if the sum has a magnitude strictly smaller than `thresh`, its - /// corresponding value and index would then not be included. In particular, - /// `thresh == 0` (default) means everything is kept and actual thresholding happens - /// only for a positive value. - /// - /// In the following shapes, `nnz` is the count after taking `thresh` into account. - /// - /// - Parameters: - /// - a_indices: 2-D. The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix. - /// - a_values: 1-D. The `values` of the first `SparseTensor`, size `[nnz]` Vector. - /// - a_shape: 1-D. The `shape` of the first `SparseTensor`, size `[ndims]` Vector. - /// - b_indices: 2-D. The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix. - /// - b_values: 1-D. The `values` of the second `SparseTensor`, size `[nnz]` Vector. - /// - b_shape: 1-D. The `shape` of the second `SparseTensor`, size `[ndims]` Vector. - /// - thresh: 0-D. The magnitude threshold that determines if an output value/index - /// pair takes space. - @inlinable @inline(__always) - public static func sparseAdd< - T: TensorFlowNumeric, - Treal: TensorFlowNumeric - >( - aIndices: Tensor, - aValues: Tensor, - aShape: Tensor, - bIndices: Tensor, - bValues: Tensor, - bShape: Tensor, - thresh: Tensor - ) -> (sumIndices: Tensor, sumValues: Tensor, sumShape: Tensor) { - _RawTFEager.sparseAdd( - aIndices: aIndices, aValues: aValues, aShape: aShape, bIndices: bIndices, bValues: bValues, - bShape: bShape, thresh: thresh) - } - - /// The gradient operator for the SparseAdd op. - /// - /// The SparseAdd op calculates A + B, where A, B, and the sum are all represented - /// as `SparseTensor` objects. This op takes in the upstream gradient w.r.t. - /// non-empty values of the sum, and outputs the gradients w.r.t. the non-empty - /// values of A and B. - /// - /// - Parameters: - /// - backprop_val_grad: 1-D with shape `[nnz(sum)]`. The gradient with respect to - /// the non-empty values of the sum. - /// - a_indices: 2-D. The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`. - /// - b_indices: 2-D. The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`. - /// - sum_indices: 2-D. The `indices` of the sum `SparseTensor`, size - /// `[nnz(sum), ndims]`. - /// - /// - Outputs: - /// - a_val_grad: 1-D with shape `[nnz(A)]`. The gradient with respect to the - /// non-empty values of A. - /// - b_val_grad: 1-D with shape `[nnz(B)]`. The gradient with respect to the - /// non-empty values of B. - @inlinable @inline(__always) - public static func sparseAddGrad( - backpropValGrad: Tensor, - aIndices: Tensor, - bIndices: Tensor, - sumIndices: Tensor - ) -> (aValGrad: Tensor, bValGrad: Tensor) { - _RawTFEager.sparseAddGrad( - backpropValGrad: backpropValGrad, aIndices: aIndices, bIndices: bIndices, - sumIndices: sumIndices) - } - - /// Concatenates a list of `SparseTensor` along the specified dimension. - /// - /// Concatenation is with respect to the dense versions of these sparse tensors. - /// It is assumed that each input is a `SparseTensor` whose elements are ordered - /// along increasing dimension number. - /// - /// All inputs' shapes must match, except for the concat dimension. The - /// `indices`, `values`, and `shapes` lists must have the same length. - /// - /// The output shape is identical to the inputs', except along the concat - /// dimension, where it is the sum of the inputs' sizes along that dimension. - /// - /// The output elements will be resorted to preserve the sort order along - /// increasing dimension number. - /// - /// This op runs in `O(M log M)` time, where `M` is the total number of non-empty - /// values across all inputs. This is due to the need for an internal sort in - /// order to concatenate efficiently across an arbitrary dimension. - /// - /// For example, if `concat_dim = 1` and the inputs are - /// - /// sp_inputs[0]: shape = [2, 3] - /// [0, 2]: "a" - /// [1, 0]: "b" - /// [1, 1]: "c" - /// - /// sp_inputs[1]: shape = [2, 4] - /// [0, 1]: "d" - /// [0, 2]: "e" - /// - /// then the output will be - /// - /// shape = [2, 7] - /// [0, 2]: "a" - /// [0, 4]: "d" - /// [0, 5]: "e" - /// [1, 0]: "b" - /// [1, 1]: "c" - /// - /// Graphically this is equivalent to doing - /// - /// [ a] concat [ d e ] = [ a d e ] - /// [b c ] [ ] [b c ] - /// - /// - Parameters: - /// - indices: 2-D. Indices of each input `SparseTensor`. - /// - values: 1-D. Non-empty values of each `SparseTensor`. - /// - shapes: 1-D. Shapes of each `SparseTensor`. - /// - /// - Attr concat_dim: Dimension to concatenate along. Must be in range [-rank, rank), - /// where rank is the number of dimensions in each input `SparseTensor`. - /// - /// - Outputs: - /// - output_indices: 2-D. Indices of the concatenated `SparseTensor`. - /// - output_values: 1-D. Non-empty values of the concatenated `SparseTensor`. - /// - output_shape: 1-D. Shape of the concatenated `SparseTensor`. - @inlinable @inline(__always) - public static func sparseConcat( - indices: [Tensor], - _ values: [Tensor], - shapes: [Tensor], - concatDim: Int64 - ) -> (outputIndices: Tensor, outputValues: Tensor, outputShape: Tensor) { - _RawTFEager.sparseConcat(indices: indices, values, shapes: shapes, concatDim: concatDim) - } - - /// Generates sparse cross from a list of sparse and dense tensors. - /// - /// The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each - /// representing features of one feature column. It outputs a 2D `SparseTensor` with - /// the batchwise crosses of these features. - /// - /// For example, if the inputs are - /// - /// inputs[0]: SparseTensor with shape = [2, 2] - /// [0, 0]: "a" - /// [1, 0]: "b" - /// [1, 1]: "c" - /// - /// inputs[1]: SparseTensor with shape = [2, 1] - /// [0, 0]: "d" - /// [1, 0]: "e" - /// - /// inputs[2]: Tensor [["f"], ["g"]] - /// - /// then the output will be - /// - /// shape = [2, 2] - /// [0, 0]: "a_X_d_X_f" - /// [1, 0]: "b_X_e_X_g" - /// [1, 1]: "c_X_e_X_g" - /// - /// if hashed_output=true then the output will be - /// - /// shape = [2, 2] - /// [0, 0]: FingerprintCat64( - /// Fingerprint64("f"), FingerprintCat64( - /// Fingerprint64("d"), Fingerprint64("a"))) - /// [1, 0]: FingerprintCat64( - /// Fingerprint64("g"), FingerprintCat64( - /// Fingerprint64("e"), Fingerprint64("b"))) - /// [1, 1]: FingerprintCat64( - /// Fingerprint64("g"), FingerprintCat64( - /// Fingerprint64("e"), Fingerprint64("c"))) - /// - /// - Parameters: - /// - indices: 2-D. Indices of each input `SparseTensor`. - /// - values: 1-D. values of each `SparseTensor`. - /// - shapes: 1-D. Shapes of each `SparseTensor`. - /// - dense_inputs: 2-D. Columns represented by dense `Tensor`. - /// - /// - Attrs: - /// - hashed_output: If true, returns the hash of the cross instead of the string. - /// This will allow us avoiding string manipulations. - /// - num_buckets: It is used if hashed_output is true. - /// output = hashed_value%num_buckets if num_buckets > 0 else hashed_value. - /// - hash_key: Specify the hash_key that will be used by the `FingerprintCat64` - /// function to combine the crosses fingerprints. - /// - /// - Outputs: - /// - output_indices: 2-D. Indices of the concatenated `SparseTensor`. - /// - output_values: 1-D. Non-empty values of the concatenated or hashed - /// `SparseTensor`. - /// - output_shape: 1-D. Shape of the concatenated `SparseTensor`. - @inlinable @inline(__always) - public static func sparseCross< - SparseTypes: TensorArrayProtocol, - DenseTypes: TensorArrayProtocol, - OutType: TensorFlowIndex - >( - indices: [Tensor], - _ values: SparseTypes, - shapes: [Tensor], - denseInputs: DenseTypes, - hashedOutput: Bool, - numBuckets: Int64, - hashKey: Int64, - internalType: TensorDataType - ) -> (outputIndices: Tensor, outputValues: Tensor, outputShape: Tensor) { - _RawTFEager.sparseCross( - indices: indices, values, shapes: shapes, denseInputs: denseInputs, - hashedOutput: hashedOutput, numBuckets: numBuckets, hashKey: hashKey, - internalType: internalType) - } - - /// Generates sparse cross from a list of sparse and dense tensors. - /// - /// The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each - /// representing features of one feature column. It outputs a 2D `SparseTensor` with - /// the batchwise crosses of these features. - /// - /// For example, if the inputs are - /// - /// inputs[0]: SparseTensor with shape = [2, 2] - /// [0, 0]: "a" - /// [1, 0]: "b" - /// [1, 1]: "c" - /// - /// inputs[1]: SparseTensor with shape = [2, 1] - /// [0, 0]: "d" - /// [1, 0]: "e" - /// - /// inputs[2]: Tensor [["f"], ["g"]] - /// - /// then the output will be - /// - /// shape = [2, 2] - /// [0, 0]: "a_X_d_X_f" - /// [1, 0]: "b_X_e_X_g" - /// [1, 1]: "c_X_e_X_g" - /// - /// if hashed_output=true then the output will be - /// - /// shape = [2, 2] - /// [0, 0]: FingerprintCat64( - /// Fingerprint64("f"), FingerprintCat64( - /// Fingerprint64("d"), Fingerprint64("a"))) - /// [1, 0]: FingerprintCat64( - /// Fingerprint64("g"), FingerprintCat64( - /// Fingerprint64("e"), Fingerprint64("b"))) - /// [1, 1]: FingerprintCat64( - /// Fingerprint64("g"), FingerprintCat64( - /// Fingerprint64("e"), Fingerprint64("c"))) - /// - /// - Parameters: - /// - indices: 2-D. Indices of each input `SparseTensor`. - /// - values: 1-D. values of each `SparseTensor`. - /// - shapes: 1-D. Shapes of each `SparseTensor`. - /// - dense_inputs: 2-D. Columns represented by dense `Tensor`. - /// - /// - Attrs: - /// - hashed_output: If true, returns the hash of the cross instead of the string. - /// This will allow us avoiding string manipulations. - /// - num_buckets: It is used if hashed_output is true. - /// output = hashed_value%num_buckets if num_buckets > 0 else hashed_value. - /// - hash_key: Specify the hash_key that will be used by the `FingerprintCat64` - /// function to combine the crosses fingerprints. - /// - /// - Outputs: - /// - output_indices: 2-D. Indices of the concatenated `SparseTensor`. - /// - output_values: 1-D. Non-empty values of the concatenated or hashed - /// `SparseTensor`. - /// - output_shape: 1-D. Shape of the concatenated `SparseTensor`. - @inlinable @inline(__always) - public static func sparseCross< - SparseTypes: TensorArrayProtocol, - DenseTypes: TensorArrayProtocol - >( - indices: [Tensor], - _ values: SparseTypes, - shapes: [Tensor], - denseInputs: DenseTypes, - hashedOutput: Bool, - numBuckets: Int64, - hashKey: Int64, - internalType: TensorDataType - ) -> (outputIndices: Tensor, outputValues: StringTensor, outputShape: Tensor) { - _RawTFEager.sparseCross( - indices: indices, values, shapes: shapes, denseInputs: denseInputs, - hashedOutput: hashedOutput, numBuckets: numBuckets, hashKey: hashKey, - internalType: internalType) - } - - /// Adds up a SparseTensor and a dense Tensor, using these special rules: - /// - /// (1) Broadcasts the dense side to have the same shape as the sparse side, if - /// eligible; - /// (2) Then, only the dense values pointed to by the indices of the SparseTensor - /// participate in the cwise addition. - /// - /// By these rules, the result is a logical SparseTensor with exactly the same - /// indices and shape, but possibly with different non-zero values. The output of - /// this Op is the resultant non-zero values. - /// - /// - Parameters: - /// - sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a - /// SparseTensor, possibly not in canonical ordering. - /// - sp_values: 1-D. `N` non-empty values corresponding to `sp_indices`. - /// - sp_shape: 1-D. Shape of the input SparseTensor. - /// - dense: `R`-D. The dense Tensor operand. - /// - /// - Output output: 1-D. The `N` values that are operated on. - @inlinable @inline(__always) - public static func sparseDenseCwiseAdd( - spIndices: Tensor, - spValues: Tensor, - spShape: Tensor, - dense: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(spIndices.handle.backend, spValues.handle.backend), spShape.handle.backend), - dense.handle.backend) - { - case .XLA: - let output_device = dense.device - let spIndices = Tensor(copying: spIndices, to: .defaultTFEager) - let spValues = Tensor(copying: spValues, to: .defaultTFEager) - let spShape = Tensor(copying: spShape, to: .defaultTFEager) - let dense = Tensor(copying: dense, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.sparseDenseCwiseAdd( - spIndices: spIndices, spValues: spValues, spShape: spShape, dense: dense), - to: output_device) - case .TF_EAGER: - return _RawTFEager.sparseDenseCwiseAdd( - spIndices: spIndices, spValues: spValues, spShape: spShape, dense: dense) - } - - } - - /// Component-wise divides a SparseTensor by a dense Tensor. - /// - /// *Limitation*: this Op only broadcasts the dense side to the sparse side, but not - /// the other direction. - /// - /// - Parameters: - /// - sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a - /// SparseTensor, possibly not in canonical ordering. - /// - sp_values: 1-D. `N` non-empty values corresponding to `sp_indices`. - /// - sp_shape: 1-D. Shape of the input SparseTensor. - /// - dense: `R`-D. The dense Tensor operand. - /// - /// - Output output: 1-D. The `N` values that are operated on. - @inlinable @inline(__always) - public static func sparseDenseCwiseDiv( - spIndices: Tensor, - spValues: Tensor, - spShape: Tensor, - dense: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(spIndices.handle.backend, spValues.handle.backend), spShape.handle.backend), - dense.handle.backend) - { - case .XLA: - let output_device = dense.device - let spIndices = Tensor(copying: spIndices, to: .defaultTFEager) - let spValues = Tensor(copying: spValues, to: .defaultTFEager) - let spShape = Tensor(copying: spShape, to: .defaultTFEager) - let dense = Tensor(copying: dense, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.sparseDenseCwiseDiv( - spIndices: spIndices, spValues: spValues, spShape: spShape, dense: dense), - to: output_device) - case .TF_EAGER: - return _RawTFEager.sparseDenseCwiseDiv( - spIndices: spIndices, spValues: spValues, spShape: spShape, dense: dense) - } - - } - - /// Component-wise multiplies a SparseTensor by a dense Tensor. - /// - /// The output locations corresponding to the implicitly zero elements in the sparse - /// tensor will be zero (i.e., will not take up storage space), regardless of the - /// contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN). - /// - /// *Limitation*: this Op only broadcasts the dense side to the sparse side, but not - /// the other direction. - /// - /// - Parameters: - /// - sp_indices: 2-D. `N x R` matrix with the indices of non-empty values in a - /// SparseTensor, possibly not in canonical ordering. - /// - sp_values: 1-D. `N` non-empty values corresponding to `sp_indices`. - /// - sp_shape: 1-D. Shape of the input SparseTensor. - /// - dense: `R`-D. The dense Tensor operand. - /// - /// - Output output: 1-D. The `N` values that are operated on. - @inlinable @inline(__always) - public static func sparseDenseCwiseMul( - spIndices: Tensor, - spValues: Tensor, - spShape: Tensor, - dense: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(spIndices.handle.backend, spValues.handle.backend), spShape.handle.backend), - dense.handle.backend) - { - case .XLA: - let output_device = dense.device - let spIndices = Tensor(copying: spIndices, to: .defaultTFEager) - let spValues = Tensor(copying: spValues, to: .defaultTFEager) - let spShape = Tensor(copying: spShape, to: .defaultTFEager) - let dense = Tensor(copying: dense, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.sparseDenseCwiseMul( - spIndices: spIndices, spValues: spValues, spShape: spShape, dense: dense), - to: output_device) - case .TF_EAGER: - return _RawTFEager.sparseDenseCwiseMul( - spIndices: spIndices, spValues: spValues, spShape: spShape, dense: dense) - } - - } - - /// Fills empty rows in the input 2-D `SparseTensor` with a default value. - /// - /// The input `SparseTensor` is represented via the tuple of inputs - /// (`indices`, `values`, `dense_shape`). The output `SparseTensor` has the - /// same `dense_shape` but with indices `output_indices` and values - /// `output_values`. - /// - /// This op inserts a single entry for every row that doesn't have any values. - /// The index is created as `[row, 0, ..., 0]` and the inserted value - /// is `default_value`. - /// - /// For example, suppose `sp_input` has shape `[5, 6]` and non-empty values: - /// - /// [0, 1]: a - /// [0, 3]: b - /// [2, 0]: c - /// [3, 1]: d - /// - /// Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values: - /// - /// [0, 1]: a - /// [0, 3]: b - /// [1, 0]: default_value - /// [2, 0]: c - /// [3, 1]: d - /// [4, 0]: default_value - /// - /// The output `SparseTensor` will be in row-major order and will have the - /// same shape as the input. - /// - /// This op also returns an indicator vector shaped `[dense_shape[0]]` such that - /// - /// empty_row_indicator[i] = True iff row i was an empty row. - /// - /// And a reverse index map vector shaped `[indices.shape[0]]` that is used during - /// backpropagation, - /// - /// reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :] - /// - /// - Parameters: - /// - indices: 2-D. the indices of the sparse tensor. - /// - values: 1-D. the values of the sparse tensor. - /// - dense_shape: 1-D. the shape of the sparse tensor. - /// - default_value: 0-D. default value to insert into location `[row, 0, ..., 0]` - /// for rows missing from the input sparse tensor. - /// output indices: 2-D. the indices of the filled sparse tensor. - /// - /// - Outputs: - /// - output_values: 1-D. the values of the filled sparse tensor. - /// - empty_row_indicator: 1-D. whether the dense row was missing in the - /// input sparse tensor. - /// - reverse_index_map: 1-D. a map from the input indices to the output indices. - @inlinable @inline(__always) - public static func sparseFillEmptyRows( - indices: Tensor, - _ values: Tensor, - denseShape: Tensor, - defaultValue: Tensor - ) -> ( - outputIndices: Tensor, outputValues: Tensor, emptyRowIndicator: Tensor, - reverseIndexMap: Tensor - ) { - _RawTFEager.sparseFillEmptyRows( - indices: indices, values, denseShape: denseShape, defaultValue: defaultValue) - } - - /// The gradient of SparseFillEmptyRows. - /// - /// Takes vectors reverse_index_map, shaped `[N]`, and grad_values, - /// shaped `[N_full]`, where `N_full >= N` and copies data into either - /// `d_values` or `d_default_value`. Here `d_values` is shaped `[N]` and - /// `d_default_value` is a scalar. - /// - /// d_values[j] = grad_values[reverse_index_map[j]] - /// d_default_value = sum_{k : 0 .. N_full - 1} ( - /// grad_values[k] * 1{k not in reverse_index_map}) - /// - /// - Parameters: - /// - reverse_index_map: 1-D. The reverse index map from SparseFillEmptyRows. - /// - grad_values: 1-D. The gradients from backprop. - /// - /// - Outputs: - /// - d_values: 1-D. The backprop into values. - /// - d_default_value: 0-D. The backprop into default_value. - @inlinable @inline(__always) - public static func sparseFillEmptyRowsGrad( - reverseIndexMap: Tensor, - gradValues: Tensor - ) -> (dValues: Tensor, dDefaultValue: Tensor) { - _RawTFEager.sparseFillEmptyRowsGrad(reverseIndexMap: reverseIndexMap, gradValues: gradValues) - } - - /// Multiply matrix "a" by matrix "b". - /// - /// The inputs must be two-dimensional matrices and the inner dimension of "a" must - /// match the outer dimension of "b". Both "a" and "b" must be `Tensor`s not - /// `SparseTensor`s. This op is optimized for the case where at least one of "a" or - /// "b" is sparse, in the sense that they have a large proportion of zero values. - /// The breakeven for using this versus a dense matrix multiply on one platform was - /// 30% zero values in the sparse matrix. - /// - /// The gradient computation of this operation will only take advantage of sparsity - /// in the input gradient when that gradient comes from a Relu. - @inlinable @inline(__always) - public static func sparseMatMul< - Ta: FloatingPoint & TensorFlowScalar, - Tb: FloatingPoint & TensorFlowScalar - >( - _ a: Tensor, - _ b: Tensor, - transposeA: Bool = false, - transposeB: Bool = false, - aIsSparse: Bool = false, - bIsSparse: Bool = false - ) -> Tensor { - switch commonBackend(a.handle.backend, b.handle.backend) { - case .XLA: - let output_device = b.device - let a = Tensor(copying: a, to: .defaultTFEager) - let b = Tensor(copying: b, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.sparseMatMul( - a, b, transposeA: transposeA, transposeB: transposeB, aIsSparse: aIsSparse, - bIsSparse: bIsSparse), to: output_device) - case .TF_EAGER: - return _RawTFEager.sparseMatMul( - a, b, transposeA: transposeA, transposeB: transposeB, aIsSparse: aIsSparse, - bIsSparse: bIsSparse) - } - - } - - /// Sparse addition of two CSR matrices, C = alpha * A + beta * B. - /// - /// The gradients of SparseMatrixAdd outputs with respect to alpha and beta are not - /// currently defined (TensorFlow will return zeros for these entries). - /// - /// - Parameters: - /// - a: A CSRSparseMatrix. - /// - b: A CSRSparseMatrix. - /// - alpha: A constant scalar. - /// - beta: A constant scalar. - /// - /// - Output c: A CSRSparseMatrix. - @inlinable @inline(__always) - public static func sparseMatrixAdd( - _ a: VariantHandle, - _ b: VariantHandle, - alpha: Tensor, - beta: Tensor - ) -> VariantHandle { - _RawTFEager.sparseMatrixAdd(a, b, alpha: alpha, beta: beta) - } - - /// Matrix-multiplies a sparse matrix with a dense matrix. - /// - /// Returns a dense matrix. - /// For inputs A and B, where A is CSR and B is dense; this op returns a dense C; - /// - /// If transpose_output is false, returns: - /// ``` - /// C = A . B - /// ``` - /// - /// If transpose_output is `true`, returns: - /// ``` - /// C = transpose(A . B) = transpose(B) . transpose(A) - /// ``` - /// where the transposition is performed along the two innermost (matrix) - /// dimensions. - /// - /// If conjugate_output is `true`, returns: - /// ``` - /// C = conjugate(A . B) = conjugate(A) . conjugate(B) - /// ``` - /// - /// If both conjugate_output and transpose_output are `true`, returns: - /// ``` - /// C = conjugate(transpose(A . B)) = conjugate(transpose(B)) . - /// conjugate(transpose(A)) - /// ``` - /// - /// - Parameters: - /// - a: A CSRSparseMatrix. - /// - b: A dense tensor. - /// - /// - Attrs: - /// - transpose_a: Indicates whether `a` should be transposed. - /// - transpose_b: Indicates whether `b` should be transposed. - /// - adjoint_a: Indicates whether `a` should be conjugate-transposed. - /// - adjoint_b: Indicates whether `b` should be conjugate-transposed. - /// - transpose_output: Transposes the product of `a` and `b`. - /// - conjugate_output: Conjugates the product of `a` and `b`. - /// - /// - Output output: A dense output tensor. - @inlinable @inline(__always) - public static func sparseMatrixMatMul( - _ a: VariantHandle, - _ b: Tensor, - transposeA: Bool = false, - transposeB: Bool = false, - adjointA: Bool = false, - adjointB: Bool = false, - transposeOutput: Bool = false, - conjugateOutput: Bool = false - ) -> Tensor { - switch b.handle.backend { - case .XLA: - let output_device = b.device - let b = Tensor(copying: b, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.sparseMatrixMatMul( - a, b, transposeA: transposeA, transposeB: transposeB, adjointA: adjointA, - adjointB: adjointB, transposeOutput: transposeOutput, conjugateOutput: conjugateOutput), - to: output_device) - case .TF_EAGER: - return _RawTFEager.sparseMatrixMatMul( - a, b, transposeA: transposeA, transposeB: transposeB, adjointA: adjointA, - adjointB: adjointB, transposeOutput: transposeOutput, conjugateOutput: conjugateOutput) - } - - } - - /// Element-wise multiplication of a sparse matrix with a dense tensor. - /// - /// Returns a sparse matrix. - /// - /// The dense tensor `b` may be either a scalar; otherwise `a` must be a rank-3 - /// `SparseMatrix`; in this case `b` must be shaped `[batch_size, 1, 1]` and the - /// multiply operation broadcasts. - /// - /// **NOTE** even if `b` is zero, the sparsity structure of the output does not - /// change. - /// - /// - Parameters: - /// - a: A CSRSparseMatrix. - /// - b: A dense tensor. - /// - /// - Output output: A dense output tensor. - @inlinable @inline(__always) - public static func sparseMatrixMul( - _ a: VariantHandle, - _ b: Tensor - ) -> VariantHandle { - _RawTFEager.sparseMatrixMul(a, b) - } - - /// Returns the number of nonzeroes of `sparse_matrix`. - /// - /// - Parameter sparse_matrix: A CSRSparseMatrix. - /// - /// - Output nnz: The number of nonzeroes of `sparse_matrix`. - @inlinable @inline(__always) - public static func sparseMatrixNNZ( - sparseMatrix: VariantHandle - ) -> Tensor { - _RawTFEager.sparseMatrixNNZ(sparseMatrix: sparseMatrix) - } - - /// Computes the Approximate Minimum Degree (AMD) ordering of `input`. - /// - /// Computes the Approximate Minimum Degree (AMD) ordering for a sparse matrix. - /// - /// The returned permutation may be used to permute the rows and columns of the - /// given sparse matrix. This typically results in permuted sparse matrix's sparse - /// Cholesky (or other decompositions) in having fewer zero fill-in compared to - /// decomposition of the original matrix. - /// - /// The input sparse matrix may have rank 2 or rank 3. The output Tensor, - /// representing would then have rank 1 or 2 respectively, with the same batch - /// shape as the input. - /// - /// Each component of the input sparse matrix must represent a square symmetric - /// matrix; only the lower triangular part of the matrix is read. The values of the - /// sparse matrix does not affect the returned permutation, only the sparsity - /// pattern of the sparse matrix is used. Hence, a single AMD ordering may be - /// reused for the Cholesky decompositions of sparse matrices with the same sparsity - /// pattern but with possibly different values. - /// - /// Each batch component of the output permutation represents a permutation of `N` - /// elements, where the input sparse matrix components each have `N` rows. That is, - /// the component contains each of the integers `{0, .. N-1}` exactly once. The - /// `i`th element represents the row index that the `i`th row maps to. - /// - /// Usage example: - /// - /// ```python - /// from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops - /// - /// a_indices = np.array([[0, 0], [1, 1], [2, 1], [2, 2], [3, 3]]) - /// a_values = np.array([1.0, 2.0, 1.0, 3.0, 4.0], np.float32) - /// a_dense_shape = [4, 4] - /// - /// with tf.Session() as sess: - /// # Define (COO format) SparseTensor over Numpy array. - /// a_st = tf.SparseTensor(a_indices, a_values, a_dense_shape) - /// - /// # Convert SparseTensors to CSR SparseMatrix. - /// a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix( - /// a_st.indices, a_st.values, a_st.dense_shape) - /// - /// # Obtain the AMD Ordering for the CSR SparseMatrix. - /// ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(sparse_matrix) - /// - /// ordering_amd_value = sess.run(ordering_amd) - /// ``` - /// - /// `ordering_amd_value` stores the AMD ordering: `[1 2 3 0]`. - /// - /// input: A `CSRSparseMatrix`. - /// - /// - Parameter input: A `CSRSparseMatrix`. - /// - /// - Output output: The Approximate Minimum Degree (AMD) ordering of `input`. - @inlinable @inline(__always) - public static func sparseMatrixOrderingAMD( - _ input: VariantHandle - ) -> Tensor { - _RawTFEager.sparseMatrixOrderingAMD(input) - } - - /// Calculates the softmax of a CSRSparseMatrix. - /// - /// Calculate the softmax of the innermost dimensions of a SparseMatrix. - /// - /// Missing values are treated as `-inf` (i.e., logits of zero probability); and - /// the output has the same sparsity structure as the input (though missing values - /// in the output may now be treated as having probability zero). - /// - /// - Parameter logits: A CSRSparseMatrix. - /// - /// - Output softmax: A CSRSparseMatrix. - @inlinable @inline(__always) - public static func sparseMatrixSoftmax( - logits: VariantHandle, - type: TensorDataType - ) -> VariantHandle { - _RawTFEager.sparseMatrixSoftmax(logits: logits, type: type) - } - - /// Calculates the gradient of the SparseMatrixSoftmax op. - /// - /// - Parameters: - /// - softmax: A CSRSparseMatrix. - /// - grad_softmax: The gradient of `softmax`. - /// - /// - Output gradient: The output gradient. - @inlinable @inline(__always) - public static func sparseMatrixSoftmaxGrad( - softmax: VariantHandle, - gradSoftmax: VariantHandle, - type: TensorDataType - ) -> VariantHandle { - _RawTFEager.sparseMatrixSoftmaxGrad(softmax: softmax, gradSoftmax: gradSoftmax, type: type) - } - - /// Computes the sparse Cholesky decomposition of `input`. - /// - /// Computes the Sparse Cholesky decomposition of a sparse matrix, with the given - /// fill-in reducing permutation. - /// - /// The input sparse matrix and the fill-in reducing permutation `permutation` must - /// have compatible shapes. If the sparse matrix has rank 3; with the batch - /// dimension `B`, then the `permutation` must be of rank 2; with the same batch - /// dimension `B`. There is no support for broadcasting. - /// - /// Furthermore, each component vector of `permutation` must be of length `N`, - /// containing each of the integers {0, 1, ..., N - 1} exactly once, where `N` is - /// the number of rows of each component of the sparse matrix. - /// - /// Each component of the input sparse matrix must represent a symmetric positive - /// definite (SPD) matrix; although only the lower triangular part of the matrix is - /// read. If any individual component is not SPD, then an InvalidArgument error is - /// thrown. - /// - /// The returned sparse matrix has the same dense shape as the input sparse matrix. - /// For each component `A` of the input sparse matrix, the corresponding output - /// sparse matrix represents `L`, the lower triangular Cholesky factor satisfying - /// the following identity: - /// - /// ``` - /// A = L * Lt - /// ``` - /// - /// where Lt denotes the transpose of L (or its conjugate transpose, if `type` is - /// `complex64` or `complex128`). - /// - /// The `type` parameter denotes the type of the matrix elements. The supported - /// types are: `float32`, `float64`, `complex64` and `complex128`. - /// - /// Usage example: - /// - /// ```python - /// from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops - /// - /// a_indices = np.array([[0, 0], [1, 1], [2, 1], [2, 2], [3, 3]]) - /// a_values = np.array([1.0, 2.0, 1.0, 3.0, 4.0], np.float32) - /// a_dense_shape = [4, 4] - /// - /// with tf.Session() as sess: - /// # Define (COO format) SparseTensor over Numpy array. - /// a_st = tf.SparseTensor(a_indices, a_values, a_dense_shape) - /// - /// # Convert SparseTensors to CSR SparseMatrix. - /// a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix( - /// a_st.indices, a_st.values, a_st.dense_shape) - /// - /// # Obtain the Sparse Cholesky factor using AMD Ordering for reducing zero - /// # fill-in (number of structural non-zeros in the sparse Cholesky factor). - /// ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(sparse_matrix) - /// cholesky_sparse_matrices = ( - /// sparse_csr_matrix_ops.sparse_matrix_sparse_cholesky( - /// sparse_matrix, ordering_amd, type=tf.float32)) - /// - /// # Convert the CSRSparseMatrix Cholesky factor to a dense Tensor - /// dense_cholesky = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense( - /// cholesky_sparse_matrices, tf.float32) - /// - /// # Evaluate the dense Tensor value. - /// dense_cholesky_value = sess.run(dense_cholesky) - /// ``` - /// - /// `dense_cholesky_value` stores the dense Cholesky factor: - /// - /// ``` - /// [[ 1. 0. 0. 0.] - /// [ 0. 1.41 0. 0.] - /// [ 0. 0.70 1.58 0.] - /// [ 0. 0. 0. 2.]] - /// ``` - /// - /// - /// input: A `CSRSparseMatrix`. - /// permutation: A `Tensor`. - /// type: The type of `input`. - /// - /// - Parameters: - /// - input: A `CSRSparseMatrix`. - /// - permutation: A fill-in reducing permutation matrix. - /// - /// - Output output: The sparse Cholesky decompsition of `input`. - @inlinable @inline(__always) - public static func sparseMatrixSparseCholesky( - _ input: VariantHandle, - permutation: Tensor, - type: TensorDataType - ) -> VariantHandle { - _RawTFEager.sparseMatrixSparseCholesky(input, permutation: permutation, type: type) - } - - /// Sparse-matrix-multiplies two CSR matrices `a` and `b`. - /// - /// Performs a matrix multiplication of a sparse matrix `a` with a sparse matrix - /// `b`; returns a sparse matrix `a * b`, unless either `a` or `b` is transposed or - /// adjointed. - /// - /// Each matrix may be transposed or adjointed (conjugated and transposed) - /// according to the Boolean parameters `transpose_a`, `adjoint_a`, `transpose_b` - /// and `adjoint_b`. At most one of `transpose_a` or `adjoint_a` may be True. - /// Similarly, at most one of `transpose_b` or `adjoint_b` may be True. - /// - /// The inputs must have compatible shapes. That is, the inner dimension of `a` - /// must be equal to the outer dimension of `b`. This requirement is adjusted - /// according to whether either `a` or `b` is transposed or adjointed. - /// - /// The `type` parameter denotes the type of the matrix elements. Both `a` and `b` - /// must have the same type. The supported types are: `float32`, `float64`, - /// `complex64` and `complex128`. - /// - /// Both `a` and `b` must have the same rank. Broadcasting is not supported. If they - /// have rank 3, each batch of 2D CSRSparseMatrices within `a` and `b` must have the - /// same dense shape. - /// - /// The sparse matrix product may have numeric (non-structural) zeros. - /// TODO(anudhyan): Consider adding a boolean attribute to control whether to prune - /// zeros. - /// - /// Usage example: - /// - /// ```python - /// from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops - /// - /// a_indices = np.array([[0, 0], [2, 3], [2, 4], [3, 0]]) - /// a_values = np.array([1.0, 5.0, -1.0, -2.0], np.float32) - /// a_dense_shape = [4, 5] - /// - /// b_indices = np.array([[0, 0], [3, 0], [3, 1]]) - /// b_values = np.array([2.0, 7.0, 8.0], np.float32) - /// b_dense_shape = [5, 3] - /// - /// with tf.Session() as sess: - /// # Define (COO format) Sparse Tensors over Numpy arrays - /// a_st = tf.SparseTensor(a_indices, a_values, a_dense_shape) - /// b_st = tf.SparseTensor(b_indices, b_values, b_dense_shape) - /// - /// # Convert SparseTensors to CSR SparseMatrix - /// a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix( - /// a_st.indices, a_st.values, a_st.dense_shape) - /// b_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix( - /// b_st.indices, b_st.values, b_st.dense_shape) - /// - /// # Compute the CSR SparseMatrix matrix multiplication - /// c_sm = sparse_csr_matrix_ops.sparse_matrix_sparse_mat_mul( - /// a=a_sm, b=b_sm, type=tf.float32) - /// - /// # Convert the CSR SparseMatrix product to a dense Tensor - /// c_sm_dense = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense( - /// c_sm, tf.float32) - /// # Evaluate the dense Tensor value - /// c_sm_dense_value = sess.run(c_sm_dense) - /// ``` - /// - /// `c_sm_dense_value` stores the dense matrix product: - /// - /// ``` - /// [[ 2. 0. 0.] - /// [ 0. 0. 0.] - /// [ 35. 40. 0.] - /// [ -4. 0. 0.]] - /// ``` - /// - /// a: A `CSRSparseMatrix`. - /// b: A `CSRSparseMatrix` with the same type and rank as `a`. - /// type: The type of both `a` and `b`. - /// transpose_a: If True, `a` transposed before multiplication. - /// transpose_b: If True, `b` transposed before multiplication. - /// adjoint_a: If True, `a` adjointed before multiplication. - /// adjoint_b: If True, `b` adjointed before multiplication. - /// - /// - Parameters: - /// - a: A CSRSparseMatrix. - /// - b: A CSRSparseMatrix. - /// - /// - Attrs: - /// - transpose_a: Indicates whether `a` should be transposed. - /// - transpose_b: Indicates whether `b` should be transposed. - /// - adjoint_a: Indicates whether `a` should be conjugate-transposed. - /// - adjoint_b: Indicates whether `b` should be conjugate-transposed. - /// - /// - Output c: A CSRSparseMatrix. - @inlinable @inline(__always) - public static func sparseMatrixSparseMatMul( - _ a: VariantHandle, - _ b: VariantHandle, - type: TensorDataType, - transposeA: Bool = false, - transposeB: Bool = false, - adjointA: Bool = false, - adjointB: Bool = false - ) -> VariantHandle { - _RawTFEager.sparseMatrixSparseMatMul( - a, b, type: type, transposeA: transposeA, transposeB: transposeB, adjointA: adjointA, - adjointB: adjointB) - } + /// Receives the named tensor from another XLA computation. Wraps the XLA Recv + /// + /// operator documented at + /// https://www.tensorflow.org/performance/xla/operation_semantics#recv . + /// + /// - Attrs: + /// - dtype: The type of the tensor. + /// - tensor_name: A string key that identifies the channel. + /// - shape: The shape of the tensor. + /// + /// - Output tensor: The tensor to receive. + @inlinable @inline(__always) + public static func xlaRecv( + tensorName: String, + shape: TensorShape? + ) -> Tensor { + _RawTFEager.xlaRecv(tensorName: tensorName, shape: shape) + } - /// Transposes the inner (matrix) dimensions of a CSRSparseMatrix. - /// - /// Transposes the inner (matrix) dimensions of a SparseMatrix and optionally - /// conjugates its values. - /// - /// - Parameter input: A CSRSparseMatrix. - /// - /// - Attr conjugate: Indicates whether `input` should be conjugated. - /// - /// - Output output: A CSRSparseMatrix. - @inlinable @inline(__always) - public static func sparseMatrixTranspose( - _ input: VariantHandle, - conjugate: Bool = false, - type: TensorDataType - ) -> VariantHandle { - _RawTFEager.sparseMatrixTranspose(input, conjugate: conjugate, type: type) - } - - /// Creates an all-zeros CSRSparseMatrix with shape `dense_shape`. - /// - /// - Parameter dense_shape: The desired matrix shape. - /// - /// - Output sparse_matrix: An empty CSR matrix with shape `dense_shape`. - @inlinable @inline(__always) - public static func sparseMatrixZeros( - denseShape: Tensor, - type: TensorDataType - ) -> VariantHandle { - _RawTFEager.sparseMatrixZeros(denseShape: denseShape, type: type) - } - - /// Computes the max of elements across dimensions of a SparseTensor. - /// - /// This Op takes a SparseTensor and is the sparse counterpart to - /// `tf.reduce_max()`. In particular, this Op also returns a dense `Tensor` - /// instead of a sparse one. - /// - /// Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless - /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - /// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained - /// with length 1. - /// - /// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor - /// with a single element is returned. Additionally, the axes can be negative, - /// which are interpreted according to the indexing rules in Python. - /// - /// - Parameters: - /// - input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a - /// SparseTensor, possibly not in canonical ordering. - /// - input_values: 1-D. `N` non-empty values corresponding to `input_indices`. - /// - input_shape: 1-D. Shape of the input SparseTensor. - /// - reduction_axes: 1-D. Length-`K` vector containing the reduction axes. - /// - /// - Attr keep_dims: If true, retain reduced dimensions with length 1. - /// - /// - Output output: `R-K`-D. The reduced Tensor. - @inlinable @inline(__always) - public static func sparseReduceMax( - inputIndices: Tensor, - inputValues: Tensor, - inputShape: Tensor, - reductionAxes: Tensor, - keepDims: Bool = false - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(inputIndices.handle.backend, inputValues.handle.backend), - inputShape.handle.backend), reductionAxes.handle.backend) - { - case .XLA: - let output_device = reductionAxes.device - let inputIndices = Tensor(copying: inputIndices, to: .defaultTFEager) - let inputValues = Tensor(copying: inputValues, to: .defaultTFEager) - let inputShape = Tensor(copying: inputShape, to: .defaultTFEager) - let reductionAxes = Tensor(copying: reductionAxes, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.sparseReduceMax( - inputIndices: inputIndices, inputValues: inputValues, inputShape: inputShape, - reductionAxes: reductionAxes, keepDims: keepDims), to: output_device) - case .TF_EAGER: - return _RawTFEager.sparseReduceMax( - inputIndices: inputIndices, inputValues: inputValues, inputShape: inputShape, - reductionAxes: reductionAxes, keepDims: keepDims) - } - - } - - /// Computes the max of elements across dimensions of a SparseTensor. - /// - /// This Op takes a SparseTensor and is the sparse counterpart to - /// `tf.reduce_max()`. In contrast to SparseReduceMax, this Op returns a - /// SparseTensor. - /// - /// Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless - /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - /// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained - /// with length 1. - /// - /// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor - /// with a single element is returned. Additionally, the axes can be negative, - /// which are interpreted according to the indexing rules in Python. - /// - /// - Parameters: - /// - input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a - /// SparseTensor, possibly not in canonical ordering. - /// - input_values: 1-D. `N` non-empty values corresponding to `input_indices`. - /// - input_shape: 1-D. Shape of the input SparseTensor. - /// - reduction_axes: 1-D. Length-`K` vector containing the reduction axes. - /// - /// - Attr keep_dims: If true, retain reduced dimensions with length 1. - @inlinable @inline(__always) - public static func sparseReduceMaxSparse( - inputIndices: Tensor, - inputValues: Tensor, - inputShape: Tensor, - reductionAxes: Tensor, - keepDims: Bool = false - ) -> (outputIndices: Tensor, outputValues: Tensor, outputShape: Tensor) { - _RawTFEager.sparseReduceMaxSparse( - inputIndices: inputIndices, inputValues: inputValues, inputShape: inputShape, - reductionAxes: reductionAxes, keepDims: keepDims) + /// Wraps the XLA Reduce operator, documented at + /// + /// https://www.tensorflow.org/performance/xla/operation_semantics#reduce . + /// + /// - Parameters: + /// - input: the input tensor + /// - init_value: a scalar representing the initial value for the reduction + /// + /// - Attrs: + /// - dimensions_to_reduce: dimension numbers over which to reduce + /// - reducer: a reducer function to apply + @inlinable @inline(__always) + public static func xlaReduce< + T: TensorFlowNumeric, + ReducerIn: TensorGroup, + ReducerOut: TensorGroup + >( + _ input: Tensor, + initValue: Tensor, + dimensionsToReduce: [Int32], + reducer: (ReducerIn) -> ReducerOut + ) -> Tensor { + switch commonBackend(input.handle.backend, initValue.handle.backend) { + case .XLA: + let output_device = initValue.device + let input = Tensor(copying: input, to: .defaultTFEager) + let initValue = Tensor(copying: initValue, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.xlaReduce( + input, initValue: initValue, dimensionsToReduce: dimensionsToReduce, reducer: reducer), + to: output_device) + case .TF_EAGER: + return _RawTFEager.xlaReduce( + input, initValue: initValue, dimensionsToReduce: dimensionsToReduce, reducer: reducer) } - /// Computes the sum of elements across dimensions of a SparseTensor. - /// - /// This Op takes a SparseTensor and is the sparse counterpart to - /// `tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor` - /// instead of a sparse one. - /// - /// Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless - /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - /// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained - /// with length 1. - /// - /// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor - /// with a single element is returned. Additionally, the axes can be negative, - /// which are interpreted according to the indexing rules in Python. - /// - /// - Parameters: - /// - input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a - /// SparseTensor, possibly not in canonical ordering. - /// - input_values: 1-D. `N` non-empty values corresponding to `input_indices`. - /// - input_shape: 1-D. Shape of the input SparseTensor. - /// - reduction_axes: 1-D. Length-`K` vector containing the reduction axes. - /// - /// - Attr keep_dims: If true, retain reduced dimensions with length 1. - /// - /// - Output output: `R-K`-D. The reduced Tensor. - @inlinable @inline(__always) - public static func sparseReduceSum( - inputIndices: Tensor, - inputValues: Tensor, - inputShape: Tensor, - reductionAxes: Tensor, - keepDims: Bool = false - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(inputIndices.handle.backend, inputValues.handle.backend), - inputShape.handle.backend), reductionAxes.handle.backend) - { - case .XLA: - let output_device = reductionAxes.device - let inputIndices = Tensor(copying: inputIndices, to: .defaultTFEager) - let inputValues = Tensor(copying: inputValues, to: .defaultTFEager) - let inputShape = Tensor(copying: inputShape, to: .defaultTFEager) - let reductionAxes = Tensor(copying: reductionAxes, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.sparseReduceSum( - inputIndices: inputIndices, inputValues: inputValues, inputShape: inputShape, - reductionAxes: reductionAxes, keepDims: keepDims), to: output_device) - case .TF_EAGER: - return _RawTFEager.sparseReduceSum( - inputIndices: inputIndices, inputValues: inputValues, inputShape: inputShape, - reductionAxes: reductionAxes, keepDims: keepDims) - } - - } - - /// Computes the sum of elements across dimensions of a SparseTensor. - /// - /// This Op takes a SparseTensor and is the sparse counterpart to - /// `tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a - /// SparseTensor. - /// - /// Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless - /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - /// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained - /// with length 1. - /// - /// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor - /// with a single element is returned. Additionally, the axes can be negative, - /// which are interpreted according to the indexing rules in Python. - /// - /// - Parameters: - /// - input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a - /// SparseTensor, possibly not in canonical ordering. - /// - input_values: 1-D. `N` non-empty values corresponding to `input_indices`. - /// - input_shape: 1-D. Shape of the input SparseTensor. - /// - reduction_axes: 1-D. Length-`K` vector containing the reduction axes. - /// - /// - Attr keep_dims: If true, retain reduced dimensions with length 1. - @inlinable @inline(__always) - public static func sparseReduceSumSparse( - inputIndices: Tensor, - inputValues: Tensor, - inputShape: Tensor, - reductionAxes: Tensor, - keepDims: Bool = false - ) -> (outputIndices: Tensor, outputValues: Tensor, outputShape: Tensor) { - _RawTFEager.sparseReduceSumSparse( - inputIndices: inputIndices, inputValues: inputValues, inputShape: inputShape, - reductionAxes: reductionAxes, keepDims: keepDims) - } + } - /// Reorders a SparseTensor into the canonical, row-major ordering. - /// - /// Note that by convention, all sparse ops preserve the canonical ordering along - /// increasing dimension number. The only time ordering can be violated is during - /// manual manipulation of the indices and values vectors to add entries. - /// - /// Reordering does not affect the shape of the SparseTensor. - /// - /// If the tensor has rank `R` and `N` non-empty values, `input_indices` has - /// shape `[N, R]`, input_values has length `N`, and input_shape has length `R`. - /// - /// - Parameters: - /// - input_indices: 2-D. `N x R` matrix with the indices of non-empty values in a - /// SparseTensor, possibly not in canonical ordering. - /// - input_values: 1-D. `N` non-empty values corresponding to `input_indices`. - /// - input_shape: 1-D. Shape of the input SparseTensor. - /// - /// - Outputs: - /// - output_indices: 2-D. `N x R` matrix with the same indices as input_indices, but - /// in canonical row-major ordering. - /// - output_values: 1-D. `N` non-empty values corresponding to `output_indices`. - @inlinable @inline(__always) - public static func sparseReorder( - inputIndices: Tensor, - inputValues: Tensor, - inputShape: Tensor - ) -> (outputIndices: Tensor, outputValues: Tensor) { - _RawTFEager.sparseReorder( - inputIndices: inputIndices, inputValues: inputValues, inputShape: inputShape) - } - - /// Reshapes a SparseTensor to represent values in a new dense shape. - /// - /// This operation has the same semantics as reshape on the represented dense - /// tensor. The `input_indices` are recomputed based on the requested `new_shape`. - /// - /// If one component of `new_shape` is the special value -1, the size of that - /// dimension is computed so that the total dense size remains constant. At - /// most one component of `new_shape` can be -1. The number of dense elements - /// implied by `new_shape` must be the same as the number of dense elements - /// originally implied by `input_shape`. - /// - /// Reshaping does not affect the order of values in the SparseTensor. - /// - /// If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape` - /// has length `R_out`, then `input_indices` has shape `[N, R_in]`, - /// `input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and - /// `output_shape` has length `R_out`. - /// - /// - Parameters: - /// - input_indices: 2-D. `N x R_in` matrix with the indices of non-empty values in a - /// SparseTensor. - /// - input_shape: 1-D. `R_in` vector with the input SparseTensor's dense shape. - /// - new_shape: 1-D. `R_out` vector with the requested new dense shape. - /// - /// - Outputs: - /// - output_indices: 2-D. `N x R_out` matrix with the updated indices of non-empty - /// values in the output SparseTensor. - /// - output_shape: 1-D. `R_out` vector with the full dense shape of the output - /// SparseTensor. This is the same as `new_shape` but with any -1 dimensions - /// filled in. - @inlinable @inline(__always) - public static func sparseReshape( - inputIndices: Tensor, - inputShape: Tensor, - newShape: Tensor - ) -> (outputIndices: Tensor, outputShape: Tensor) { - _RawTFEager.sparseReshape( - inputIndices: inputIndices, inputShape: inputShape, newShape: newShape) - } - - /// Computes the mean along sparse segments of a tensor. - /// - /// See `tf.sparse.segment_sum` for usage examples. - /// - /// Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first - /// dimension, selecting a subset of dimension 0, specified by `indices`. - /// - /// - Parameters: - /// - indices: A 1-D tensor. Has same rank as `segment_ids`. - /// - segment_ids: A 1-D tensor. Values should be sorted and can be repeated. - /// - /// - Output output: Has same shape as data, except for dimension 0 which - /// has size `k`, the number of segments. - @inlinable @inline(__always) - public static func sparseSegmentMean< - T: FloatingPoint & TensorFlowScalar, - Tidx: TensorFlowIndex - >( - data: Tensor, - indices: Tensor, - segmentIds: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(data.handle.backend, indices.handle.backend), segmentIds.handle.backend) - { - case .XLA: - let output_device = segmentIds.device - let data = Tensor(copying: data, to: .defaultTFEager) - let indices = Tensor(copying: indices, to: .defaultTFEager) - let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.sparseSegmentMean( - data: data, indices: indices, segmentIds: segmentIds), to: output_device) - case .TF_EAGER: - return _RawTFEager.sparseSegmentMean(data: data, indices: indices, segmentIds: segmentIds) - } - - } - - /// Computes gradients for SparseSegmentMean. - /// - /// Returns tensor "output" with same shape as grad, except for dimension 0 whose - /// value is output_dim0. - /// - /// - Parameters: - /// - grad: gradient propagated to the SparseSegmentMean op. - /// - indices: indices passed to the corresponding SparseSegmentMean op. - /// - segment_ids: segment_ids passed to the corresponding SparseSegmentMean op. - /// - output_dim0: dimension 0 of "data" passed to SparseSegmentMean op. - @inlinable @inline(__always) - public static func sparseSegmentMeanGrad< - T: FloatingPoint & TensorFlowScalar, - Tidx: TensorFlowIndex - >( - grad: Tensor, - indices: Tensor, - segmentIds: Tensor, - outputDim0: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(grad.handle.backend, indices.handle.backend), segmentIds.handle.backend), - outputDim0.handle.backend) - { - case .XLA: - let output_device = outputDim0.device - let grad = Tensor(copying: grad, to: .defaultTFEager) - let indices = Tensor(copying: indices, to: .defaultTFEager) - let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) - let outputDim0 = Tensor(copying: outputDim0, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.sparseSegmentMeanGrad( - grad: grad, indices: indices, segmentIds: segmentIds, outputDim0: outputDim0), - to: output_device) - case .TF_EAGER: - return _RawTFEager.sparseSegmentMeanGrad( - grad: grad, indices: indices, segmentIds: segmentIds, outputDim0: outputDim0) - } - - } - - /// Computes the mean along sparse segments of a tensor. - /// - /// Like `SparseSegmentMean`, but allows missing ids in `segment_ids`. If an id is - /// misisng, the `output` tensor at that position will be zeroed. - /// - /// Read - /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) - /// for an explanation of segments. - /// - /// - Parameters: - /// - indices: A 1-D tensor. Has same rank as `segment_ids`. - /// - segment_ids: A 1-D tensor. Values should be sorted and can be repeated. - /// - num_segments: Should equal the number of distinct segment IDs. - /// - /// - Output output: Has same shape as data, except for dimension 0 which has size - /// `num_segments`. - @inlinable @inline(__always) - public static func sparseSegmentMeanWithNumSegments< - T: FloatingPoint & TensorFlowScalar, - Tidx: TensorFlowIndex, - Tnumsegments: TensorFlowIndex - >( - data: Tensor, - indices: Tensor, - segmentIds: Tensor, - numSegments: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(data.handle.backend, indices.handle.backend), segmentIds.handle.backend), - numSegments.handle.backend) - { - case .XLA: - let output_device = numSegments.device - let data = Tensor(copying: data, to: .defaultTFEager) - let indices = Tensor(copying: indices, to: .defaultTFEager) - let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) - let numSegments = Tensor(copying: numSegments, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.sparseSegmentMeanWithNumSegments( - data: data, indices: indices, segmentIds: segmentIds, numSegments: numSegments), - to: output_device) - case .TF_EAGER: - return _RawTFEager.sparseSegmentMeanWithNumSegments( - data: data, indices: indices, segmentIds: segmentIds, numSegments: numSegments) - } - - } - - /// Computes the sum along sparse segments of a tensor divided by the sqrt of N. - /// - /// N is the size of the segment being reduced. - /// - /// See `tf.sparse.segment_sum` for usage examples. - /// - /// - /// - Parameters: - /// - indices: A 1-D tensor. Has same rank as `segment_ids`. - /// - segment_ids: A 1-D tensor. Values should be sorted and can be repeated. - /// - /// - Output output: Has same shape as data, except for dimension 0 which - /// has size `k`, the number of segments. - @inlinable @inline(__always) - public static func sparseSegmentSqrtN< - T: FloatingPoint & TensorFlowScalar, - Tidx: TensorFlowIndex - >( - data: Tensor, - indices: Tensor, - segmentIds: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(data.handle.backend, indices.handle.backend), segmentIds.handle.backend) - { - case .XLA: - let output_device = segmentIds.device - let data = Tensor(copying: data, to: .defaultTFEager) - let indices = Tensor(copying: indices, to: .defaultTFEager) - let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.sparseSegmentSqrtN( - data: data, indices: indices, segmentIds: segmentIds), to: output_device) - case .TF_EAGER: - return _RawTFEager.sparseSegmentSqrtN(data: data, indices: indices, segmentIds: segmentIds) - } - - } - - /// Computes gradients for SparseSegmentSqrtN. - /// - /// Returns tensor "output" with same shape as grad, except for dimension 0 whose - /// value is output_dim0. - /// - /// - Parameters: - /// - grad: gradient propagated to the SparseSegmentSqrtN op. - /// - indices: indices passed to the corresponding SparseSegmentSqrtN op. - /// - segment_ids: segment_ids passed to the corresponding SparseSegmentSqrtN op. - /// - output_dim0: dimension 0 of "data" passed to SparseSegmentSqrtN op. - @inlinable @inline(__always) - public static func sparseSegmentSqrtNGrad< - T: FloatingPoint & TensorFlowScalar, - Tidx: TensorFlowIndex - >( - grad: Tensor, - indices: Tensor, - segmentIds: Tensor, - outputDim0: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(grad.handle.backend, indices.handle.backend), segmentIds.handle.backend), - outputDim0.handle.backend) - { - case .XLA: - let output_device = outputDim0.device - let grad = Tensor(copying: grad, to: .defaultTFEager) - let indices = Tensor(copying: indices, to: .defaultTFEager) - let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) - let outputDim0 = Tensor(copying: outputDim0, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.sparseSegmentSqrtNGrad( - grad: grad, indices: indices, segmentIds: segmentIds, outputDim0: outputDim0), - to: output_device) - case .TF_EAGER: - return _RawTFEager.sparseSegmentSqrtNGrad( - grad: grad, indices: indices, segmentIds: segmentIds, outputDim0: outputDim0) - } - - } - - /// Computes the sum along sparse segments of a tensor divided by the sqrt of N. - /// - /// N is the size of the segment being reduced. - /// - /// Like `SparseSegmentSqrtN`, but allows missing ids in `segment_ids`. If an id is - /// misisng, the `output` tensor at that position will be zeroed. - /// - /// Read - /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) - /// for an explanation of segments. - /// - /// - Parameters: - /// - indices: A 1-D tensor. Has same rank as `segment_ids`. - /// - segment_ids: A 1-D tensor. Values should be sorted and can be repeated. - /// - num_segments: Should equal the number of distinct segment IDs. - /// - /// - Output output: Has same shape as data, except for dimension 0 which - /// has size `k`, the number of segments. - @inlinable @inline(__always) - public static func sparseSegmentSqrtNWithNumSegments< - T: FloatingPoint & TensorFlowScalar, - Tidx: TensorFlowIndex, - Tnumsegments: TensorFlowIndex - >( - data: Tensor, - indices: Tensor, - segmentIds: Tensor, - numSegments: Tensor - ) -> Tensor { - switch commonBackend( + /// Wraps the XLA ReduceWindow operator, documented at + /// + /// https://www.tensorflow.org/performance/xla/operation_semantics#reducewindow . + /// + /// - Parameters: + /// - input: the input tensor + /// - init_value: a scalar representing the initial value for the reduction + /// - window_dimensions: the shape of the window + /// - window_strides: the inter-window strides + /// - padding: the padding to apply at the start and end of each input dimensions + /// + /// - Attr computation: a reducer function to apply + @inlinable @inline(__always) + public static func xlaReduceWindow< + T: TensorFlowNumeric, + Tindices: TensorFlowIndex, + ComputationIn: TensorGroup, + ComputationOut: TensorGroup + >( + _ input: Tensor, + initValue: Tensor, + windowDimensions: Tensor, + windowStrides: Tensor, + baseDilations: Tensor, + windowDilations: Tensor, + padding: Tensor, + computation: (ComputationIn) -> ComputationOut + ) -> Tensor { + switch commonBackend( + commonBackend( commonBackend( - commonBackend(data.handle.backend, indices.handle.backend), segmentIds.handle.backend), - numSegments.handle.backend) - { - case .XLA: - let output_device = numSegments.device - let data = Tensor(copying: data, to: .defaultTFEager) - let indices = Tensor(copying: indices, to: .defaultTFEager) - let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) - let numSegments = Tensor(copying: numSegments, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.sparseSegmentSqrtNWithNumSegments( - data: data, indices: indices, segmentIds: segmentIds, numSegments: numSegments), - to: output_device) - case .TF_EAGER: - return _RawTFEager.sparseSegmentSqrtNWithNumSegments( - data: data, indices: indices, segmentIds: segmentIds, numSegments: numSegments) - } - - } - - /// Computes the sum along sparse segments of a tensor. - /// - /// Read - /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) - /// for an explanation of segments. - /// - /// Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first - /// dimension, selecting a subset of dimension 0, specified by `indices`. - /// - /// For example: - /// - /// ```python - /// c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) - /// - /// # Select two rows, one segment. - /// tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])) - /// # => [[0 0 0 0]] - /// - /// # Select two rows, two segment. - /// tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1])) - /// # => [[ 1 2 3 4] - /// # [-1 -2 -3 -4]] - /// - /// # Select all rows, two segments. - /// tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) - /// # => [[0 0 0 0] - /// # [5 6 7 8]] - /// - /// # Which is equivalent to: - /// tf.segment_sum(c, tf.constant([0, 0, 1])) - /// ``` - /// - /// - Parameters: - /// - indices: A 1-D tensor. Has same rank as `segment_ids`. - /// - segment_ids: A 1-D tensor. Values should be sorted and can be repeated. - /// - /// - Output output: Has same shape as data, except for dimension 0 which - /// has size `k`, the number of segments. - @inlinable @inline(__always) - public static func sparseSegmentSum< - T: TensorFlowNumeric, - Tidx: TensorFlowIndex - >( - data: Tensor, - indices: Tensor, - segmentIds: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(data.handle.backend, indices.handle.backend), segmentIds.handle.backend) - { - case .XLA: - let output_device = segmentIds.device - let data = Tensor(copying: data, to: .defaultTFEager) - let indices = Tensor(copying: indices, to: .defaultTFEager) - let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.sparseSegmentSum( - data: data, indices: indices, segmentIds: segmentIds), to: output_device) - case .TF_EAGER: - return _RawTFEager.sparseSegmentSum(data: data, indices: indices, segmentIds: segmentIds) - } - - } - - /// Computes the sum along sparse segments of a tensor. - /// - /// Like `SparseSegmentSum`, but allows missing ids in `segment_ids`. If an id is - /// misisng, the `output` tensor at that position will be zeroed. - /// - /// Read - /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/sparse#Segmentation) - /// for an explanation of segments. - /// - /// For example: - /// - /// ```python - /// c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) - /// - /// tf.sparse_segment_sum_with_num_segments( - /// c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3) - /// # => [[0 0 0 0] - /// # [0 0 0 0] - /// # [0 0 0 0]] - /// - /// tf.sparse_segment_sum_with_num_segments(c, - /// tf.constant([0, 1]), - /// tf.constant([0, 2], - /// num_segments=4)) - /// # => [[ 1 2 3 4] - /// # [ 0 0 0 0] - /// # [-1 -2 -3 -4] - /// # [ 0 0 0 0]] - /// ``` - /// - /// - Parameters: - /// - indices: A 1-D tensor. Has same rank as `segment_ids`. - /// - segment_ids: A 1-D tensor. Values should be sorted and can be repeated. - /// - num_segments: Should equal the number of distinct segment IDs. - /// - /// - Output output: Has same shape as data, except for dimension 0 which - /// has size `num_segments`. - @inlinable @inline(__always) - public static func sparseSegmentSumWithNumSegments< - T: TensorFlowNumeric, - Tidx: TensorFlowIndex, - Tnumsegments: TensorFlowIndex - >( - data: Tensor, - indices: Tensor, - segmentIds: Tensor, - numSegments: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(data.handle.backend, indices.handle.backend), segmentIds.handle.backend), - numSegments.handle.backend) - { - case .XLA: - let output_device = numSegments.device - let data = Tensor(copying: data, to: .defaultTFEager) - let indices = Tensor(copying: indices, to: .defaultTFEager) - let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) - let numSegments = Tensor(copying: numSegments, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.sparseSegmentSumWithNumSegments( - data: data, indices: indices, segmentIds: segmentIds, numSegments: numSegments), - to: output_device) - case .TF_EAGER: - return _RawTFEager.sparseSegmentSumWithNumSegments( - data: data, indices: indices, segmentIds: segmentIds, numSegments: numSegments) - } - - } - - /// Slice a `SparseTensor` based on the `start` and `size`. - /// - /// For example, if the input is - /// - /// input_tensor = shape = [2, 7] - /// [ a d e ] - /// [b c ] - /// - /// Graphically the output tensors are: - /// - /// sparse_slice([0, 0], [2, 4]) = shape = [2, 4] - /// [ a ] - /// [b c ] - /// - /// sparse_slice([0, 4], [2, 3]) = shape = [2, 3] - /// [ d e ] - /// [ ] - /// - /// - Parameters: - /// - indices: 2-D tensor represents the indices of the sparse tensor. - /// - values: 1-D tensor represents the values of the sparse tensor. - /// - shape: 1-D. tensor represents the shape of the sparse tensor. - /// - start: 1-D. tensor represents the start of the slice. - /// - size: 1-D. tensor represents the size of the slice. - /// output indices: A list of 1-D tensors represents the indices of the output - /// sparse tensors. - /// - /// - Outputs: - /// - output_values: A list of 1-D tensors represents the values of the output sparse - /// tensors. - /// - output_shape: A list of 1-D tensors represents the shape of the output sparse - /// tensors. - @inlinable @inline(__always) - public static func sparseSlice( - indices: Tensor, - _ values: Tensor, - shape: Tensor, - start: Tensor, - size: Tensor - ) -> (outputIndices: Tensor, outputValues: Tensor, outputShape: Tensor) { - _RawTFEager.sparseSlice(indices: indices, values, shape: shape, start: start, size: size) - } - - /// The gradient operator for the SparseSlice op. - /// - /// This op takes in the upstream gradient w.r.t. non-empty values of - /// the sliced `SparseTensor`, and outputs the gradients w.r.t. - /// the non-empty values of input `SparseTensor`. - /// - /// - Parameters: - /// - backprop_val_grad: 1-D. The gradient with respect to - /// the non-empty values of the sliced `SparseTensor`. - /// - input_indices: 2-D. The `indices` of the input `SparseTensor`. - /// - input_start: 1-D. tensor represents the start of the slice. - /// - output_indices: 2-D. The `indices` of the sliced `SparseTensor`. - /// - /// - Output val_grad: 1-D. The gradient with respect to the non-empty values of input `SparseTensor`. - @inlinable @inline(__always) - public static func sparseSliceGrad( - backpropValGrad: Tensor, - inputIndices: Tensor, - inputStart: Tensor, - outputIndices: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(backpropValGrad.handle.backend, inputIndices.handle.backend), - inputStart.handle.backend), outputIndices.handle.backend) - { - case .XLA: - let output_device = outputIndices.device - let backpropValGrad = Tensor(copying: backpropValGrad, to: .defaultTFEager) - let inputIndices = Tensor(copying: inputIndices, to: .defaultTFEager) - let inputStart = Tensor(copying: inputStart, to: .defaultTFEager) - let outputIndices = Tensor(copying: outputIndices, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.sparseSliceGrad( - backpropValGrad: backpropValGrad, inputIndices: inputIndices, inputStart: inputStart, - outputIndices: outputIndices), to: output_device) - case .TF_EAGER: - return _RawTFEager.sparseSliceGrad( - backpropValGrad: backpropValGrad, inputIndices: inputIndices, inputStart: inputStart, - outputIndices: outputIndices) - } - - } - - /// Applies softmax to a batched N-D `SparseTensor`. - /// - /// The inputs represent an N-D SparseTensor with logical shape `[..., B, C]` - /// (where `N >= 2`), and with indices sorted in the canonical lexicographic order. - /// - /// This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost - /// logical submatrix with shape `[B, C]`, but with the catch that *the implicitly - /// zero elements do not participate*. Specifically, the algorithm is equivalent - /// to the following: - /// - /// (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix - /// with shape `[B, C]`, along the size-C dimension; - /// (2) Masks out the original implicitly-zero locations; - /// (3) Renormalizes the remaining elements. - /// - /// Hence, the `SparseTensor` result has exactly the same non-zero indices and - /// shape. - /// - /// - Parameters: - /// - sp_indices: 2-D. `NNZ x R` matrix with the indices of non-empty values in a - /// SparseTensor, in canonical ordering. - /// - sp_values: 1-D. `NNZ` non-empty values corresponding to `sp_indices`. - /// - sp_shape: 1-D. Shape of the input SparseTensor. - /// - /// - Output output: 1-D. The `NNZ` values for the result `SparseTensor`. - @inlinable @inline(__always) - public static func sparseSoftmax( - spIndices: Tensor, - spValues: Tensor, - spShape: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(spIndices.handle.backend, spValues.handle.backend), spShape.handle.backend) - { - case .XLA: - let output_device = spShape.device - let spIndices = Tensor(copying: spIndices, to: .defaultTFEager) - let spValues = Tensor(copying: spValues, to: .defaultTFEager) - let spShape = Tensor(copying: spShape, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.sparseSoftmax( - spIndices: spIndices, spValues: spValues, spShape: spShape), to: output_device) - case .TF_EAGER: - return _RawTFEager.sparseSoftmax(spIndices: spIndices, spValues: spValues, spShape: spShape) - } - - } - - /// Computes softmax cross entropy cost and gradients to backpropagate. - /// - /// Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept - /// a matrix of label probabilities, but rather a single label per row - /// of features. This label is considered to have probability 1.0 for the - /// given row. - /// - /// Inputs are the logits, not probabilities. - /// - /// - Parameters: - /// - features: batch_size x num_classes matrix - /// - labels: batch_size vector with values in [0, num_classes). - /// This is the label for the given minibatch entry. - /// - /// - Outputs: - /// - loss: Per example loss (batch_size vector). - /// - backprop: backpropagated gradients (batch_size x num_classes matrix). - @inlinable @inline(__always) - public static func sparseSoftmaxCrossEntropyWithLogits< - T: FloatingPoint & TensorFlowScalar, - Tlabels: TensorFlowIndex - >( - features: Tensor, - labels: Tensor - ) -> (loss: Tensor, backprop: Tensor) { - switch commonBackend(features.handle.backend, labels.handle.backend) { - case .XLA: - return _RawXLA.sparseSoftmaxCrossEntropyWithLogits(features: features, labels: labels) - case .TF_EAGER: - return _RawTFEager.sparseSoftmaxCrossEntropyWithLogits(features: features, labels: labels) - } - - } - - /// Returns the element-wise max of two SparseTensors. - /// - /// Assumes the two SparseTensors have the same shape, i.e., no broadcasting. - /// - /// - Parameters: - /// - a_indices: 2-D. `N x R` matrix with the indices of non-empty values in a - /// SparseTensor, in the canonical lexicographic ordering. - /// - a_values: 1-D. `N` non-empty values corresponding to `a_indices`. - /// - a_shape: 1-D. Shape of the input SparseTensor. - /// - b_indices: counterpart to `a_indices` for the other operand. - /// - b_values: counterpart to `a_values` for the other operand; must be of the same dtype. - /// - b_shape: counterpart to `a_shape` for the other operand; the two shapes must be equal. - /// - /// - Outputs: - /// - output_indices: 2-D. The indices of the output SparseTensor. - /// - output_values: 1-D. The values of the output SparseTensor. - @inlinable @inline(__always) - public static func sparseSparseMaximum( - aIndices: Tensor, - aValues: Tensor, - aShape: Tensor, - bIndices: Tensor, - bValues: Tensor, - bShape: Tensor - ) -> (outputIndices: Tensor, outputValues: Tensor) { - _RawTFEager.sparseSparseMaximum( - aIndices: aIndices, aValues: aValues, aShape: aShape, bIndices: bIndices, bValues: bValues, - bShape: bShape) - } - - /// Returns the element-wise min of two SparseTensors. - /// - /// Assumes the two SparseTensors have the same shape, i.e., no broadcasting. - /// - /// - Parameters: - /// - a_indices: 2-D. `N x R` matrix with the indices of non-empty values in a - /// SparseTensor, in the canonical lexicographic ordering. - /// - a_values: 1-D. `N` non-empty values corresponding to `a_indices`. - /// - a_shape: 1-D. Shape of the input SparseTensor. - /// - b_indices: counterpart to `a_indices` for the other operand. - /// - b_values: counterpart to `a_values` for the other operand; must be of the same dtype. - /// - b_shape: counterpart to `a_shape` for the other operand; the two shapes must be equal. - /// - /// - Outputs: - /// - output_indices: 2-D. The indices of the output SparseTensor. - /// - output_values: 1-D. The values of the output SparseTensor. - @inlinable @inline(__always) - public static func sparseSparseMinimum( - aIndices: Tensor, - aValues: Tensor, - aShape: Tensor, - bIndices: Tensor, - bValues: Tensor, - bShape: Tensor - ) -> (outputIndices: Tensor, outputValues: Tensor) { - _RawTFEager.sparseSparseMinimum( - aIndices: aIndices, aValues: aValues, aShape: aShape, bIndices: bIndices, bValues: bValues, - bShape: bShape) - } - - /// Split a `SparseTensor` into `num_split` tensors along one dimension. - /// - /// If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices - /// `[0 : shape[split_dim] % num_split]` gets one extra dimension. - /// For example, if `split_dim = 1` and `num_split = 2` and the input is - /// - /// input_tensor = shape = [2, 7] - /// [ a d e ] - /// [b c ] - /// - /// Graphically the output tensors are: - /// - /// output_tensor[0] = shape = [2, 4] - /// [ a ] - /// [b c ] - /// - /// output_tensor[1] = shape = [2, 3] - /// [ d e ] - /// [ ] - /// - /// - Parameters: - /// - split_dim: 0-D. The dimension along which to split. Must be in the range - /// `[0, rank(shape))`. - /// - indices: 2-D tensor represents the indices of the sparse tensor. - /// - values: 1-D tensor represents the values of the sparse tensor. - /// - shape: 1-D. tensor represents the shape of the sparse tensor. - /// output indices: A list of 1-D tensors represents the indices of the output - /// sparse tensors. - /// - /// - Attr num_split: The number of ways to split. - /// - /// - Outputs: - /// - output_values: A list of 1-D tensors represents the values of the output sparse - /// tensors. - /// - output_shape: A list of 1-D tensors represents the shape of the output sparse - /// tensors. - @inlinable @inline(__always) - public static func sparseSplit( - splitDim: Tensor, - indices: Tensor, - _ values: Tensor, - shape: Tensor, - numSplit: Int64 - ) -> (outputIndices: [Tensor], outputValues: [Tensor], outputShape: [Tensor]) { - _RawTFEager.sparseSplit( - splitDim: splitDim, indices: indices, values, shape: shape, numSplit: numSplit) - } - - /// Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`. - /// - /// This Op does not require `a_indices` be sorted in standard lexicographic order. - /// - /// - Parameters: - /// - a_indices: 2-D. The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`. - /// - a_values: 1-D. The `values` of the `SparseTensor`, with shape `[nnz]`. - /// - a_shape: 1-D. The `shape` of the `SparseTensor`, with shape `[ndims]`. - /// - b: `ndims`-D Tensor. With shape `a_shape`. - @inlinable @inline(__always) - public static func sparseTensorDenseAdd< - T: TensorFlowNumeric, - Tindices: TensorFlowIndex - >( - aIndices: Tensor, - aValues: Tensor, - aShape: Tensor, - _ b: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(aIndices.handle.backend, aValues.handle.backend), aShape.handle.backend), - b.handle.backend) - { - case .XLA: - let output_device = b.device - let aIndices = Tensor(copying: aIndices, to: .defaultTFEager) - let aValues = Tensor(copying: aValues, to: .defaultTFEager) - let aShape = Tensor(copying: aShape, to: .defaultTFEager) - let b = Tensor(copying: b, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.sparseTensorDenseAdd( - aIndices: aIndices, aValues: aValues, aShape: aShape, b), to: output_device) - case .TF_EAGER: - return _RawTFEager.sparseTensorDenseAdd( - aIndices: aIndices, aValues: aValues, aShape: aShape, b) - } - - } - - /// Multiply SparseTensor (of rank 2) "A" by dense matrix "B". - /// - /// No validity checking is performed on the indices of A. However, the following - /// input format is recommended for optimal behavior: - /// - /// if adjoint_a == false: - /// A should be sorted in lexicographically increasing order. Use SparseReorder - /// if you're not sure. - /// if adjoint_a == true: - /// A should be sorted in order of increasing dimension 1 (i.e., "column major" - /// order instead of "row major" order). - /// - /// - Parameters: - /// - a_indices: 2-D. The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix. - /// - a_values: 1-D. The `values` of the `SparseTensor`, size `[nnz]` Vector. - /// - a_shape: 1-D. The `shape` of the `SparseTensor`, size `[2]` Vector. - /// - b: 2-D. A dense Matrix. - /// - /// - Attrs: - /// - adjoint_a: Use the adjoint of A in the matrix multiply. If A is complex, this - /// is transpose(conj(A)). Otherwise it's transpose(A). - /// - adjoint_b: Use the adjoint of B in the matrix multiply. If B is complex, this - /// is transpose(conj(B)). Otherwise it's transpose(B). - @inlinable @inline(__always) - public static func sparseTensorDenseMatMul< - T: TensorFlowScalar, - Tindices: TensorFlowIndex - >( - aIndices: Tensor, - aValues: Tensor, - aShape: Tensor, - _ b: Tensor, - adjointA: Bool = false, - adjointB: Bool = false - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(aIndices.handle.backend, aValues.handle.backend), aShape.handle.backend), - b.handle.backend) - { - case .XLA: - let output_device = b.device - let aIndices = Tensor(copying: aIndices, to: .defaultTFEager) - let aValues = Tensor(copying: aValues, to: .defaultTFEager) - let aShape = Tensor(copying: aShape, to: .defaultTFEager) - let b = Tensor(copying: b, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.sparseTensorDenseMatMul( - aIndices: aIndices, aValues: aValues, aShape: aShape, b, adjointA: adjointA, - adjointB: adjointB), to: output_device) - case .TF_EAGER: - return _RawTFEager.sparseTensorDenseMatMul( - aIndices: aIndices, aValues: aValues, aShape: aShape, b, adjointA: adjointA, - adjointB: adjointB) - } - - } - - /// Creates a dataset that splits a SparseTensor into elements row-wise. - @inlinable @inline(__always) - public static func sparseTensorSliceDataset( - indices: Tensor, - _ values: Tensor, - denseShape: Tensor - ) -> VariantHandle { - _RawTFEager.sparseTensorSliceDataset(indices: indices, values, denseShape: denseShape) - } - - /// Converts a SparseTensor to a (possibly batched) CSRSparseMatrix. - /// - /// - Parameters: - /// - indices: SparseTensor indices. - /// - values: SparseTensor values. - /// - dense_shape: SparseTensor dense shape. - /// - /// - Output sparse_matrix: A (possibly batched) CSRSparseMatrix. - @inlinable @inline(__always) - public static func sparseTensorToCSRSparseMatrix( - indices: Tensor, - _ values: Tensor, - denseShape: Tensor - ) -> VariantHandle { - _RawTFEager.sparseTensorToCSRSparseMatrix(indices: indices, values, denseShape: denseShape) - } - - /// Converts a sparse representation into a dense tensor. - /// - /// Builds an array `dense` with shape `output_shape` such that - /// - /// ``` - /// # If sparse_indices is scalar - /// dense[i] = (i == sparse_indices ? sparse_values : default_value) - /// - /// # If sparse_indices is a vector, then for each i - /// dense[sparse_indices[i]] = sparse_values[i] - /// - /// # If sparse_indices is an n by d matrix, then for each i in [0, n) - /// dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i] - /// ``` - /// - /// All other values in `dense` are set to `default_value`. If `sparse_values` is a - /// scalar, all sparse indices are set to this single value. - /// - /// Indices should be sorted in lexicographic order, and indices must not - /// contain any repeats. If `validate_indices` is true, these properties - /// are checked during execution. - /// - /// - Parameters: - /// - sparse_indices: 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete - /// index where `sparse_values[i]` will be placed. - /// - output_shape: 1-D. Shape of the dense output tensor. - /// - sparse_values: 1-D. Values corresponding to each row of `sparse_indices`, - /// or a scalar value to be used for all sparse indices. - /// - default_value: Scalar value to set for indices not specified in - /// `sparse_indices`. - /// - /// - Attr validate_indices: If true, indices are checked to make sure they are sorted in - /// lexicographic order and that there are no repeats. - /// - /// - Output dense: Dense output tensor of shape `output_shape`. - @inlinable @inline(__always) - public static func sparseToDense< - T: TensorFlowScalar, - Tindices: TensorFlowIndex - >( - sparseIndices: Tensor, - outputShape: Tensor, - sparseValues: Tensor, - defaultValue: Tensor, - validateIndices: Bool = true - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(sparseIndices.handle.backend, outputShape.handle.backend), - sparseValues.handle.backend), defaultValue.handle.backend) - { - case .XLA: - let output_device = defaultValue.device - let sparseIndices = Tensor(copying: sparseIndices, to: .defaultTFEager) - let outputShape = Tensor(copying: outputShape, to: .defaultTFEager) - let sparseValues = Tensor(copying: sparseValues, to: .defaultTFEager) - let defaultValue = Tensor(copying: defaultValue, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.sparseToDense( - sparseIndices: sparseIndices, outputShape: outputShape, sparseValues: sparseValues, - defaultValue: defaultValue, validateIndices: validateIndices), to: output_device) - case .TF_EAGER: - return _RawTFEager.sparseToDense( - sparseIndices: sparseIndices, outputShape: outputShape, sparseValues: sparseValues, - defaultValue: defaultValue, validateIndices: validateIndices) - } - - } - - /// Applies set operation along last dimension of 2 `SparseTensor` inputs. - /// - /// See SetOperationOp::SetOperationFromContext for values of `set_operation`. - /// - /// If `validate_indices` is `True`, `SparseToSparseSetOperation` validates the - /// order and range of `set1` and `set2` indices. - /// - /// Input `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`, - /// and `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same - /// as `set2`. Dimension `n` contains values in a set, duplicates are allowed but - /// ignored. - /// - /// Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`, - /// and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same - /// as `set1`. Dimension `n` contains values in a set, duplicates are allowed but - /// ignored. - /// - /// If `validate_indices` is `True`, this op validates the order and range of `set1` - /// and `set2` indices. - /// - /// Output `result` is a `SparseTensor` represented by `result_indices`, - /// `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this - /// has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` - /// dimension contains the result of `set_operation` applied to the corresponding - /// `[0...n-1]` dimension of `set`. - /// - /// - Parameters: - /// - set1_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major - /// order. - /// - set1_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major - /// order. - /// - set1_shape: 1D `Tensor`, shape of a `SparseTensor`. `set1_shape[0...n-1]` must - /// be the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the - /// max set size across `0...n-1` dimensions. - /// - set2_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major - /// order. - /// - set2_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major - /// order. - /// - set2_shape: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must - /// be the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the - /// max set size across `0...n-1` dimensions. - /// - /// - Outputs: - /// - result_indices: 2D indices of a `SparseTensor`. - /// - result_values: 1D values of a `SparseTensor`. - /// - result_shape: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is - /// the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]` - /// is the max result set size across all `0...n-1` dimensions. - @inlinable @inline(__always) - public static func sparseToSparseSetOperation( - set1Indices: Tensor, - set1Values: Tensor, - set1Shape: Tensor, - set2Indices: Tensor, - set2Values: Tensor, - set2Shape: Tensor, - setOperation: String, - validateIndices: Bool = true - ) -> (resultIndices: Tensor, resultValues: Tensor, resultShape: Tensor) { - _RawTFEager.sparseToSparseSetOperation( - set1Indices: set1Indices, set1Values: set1Values, set1Shape: set1Shape, - set2Indices: set2Indices, set2Values: set2Values, set2Shape: set2Shape, - setOperation: setOperation, validateIndices: validateIndices) - } - - /// Applies set operation along last dimension of 2 `SparseTensor` inputs. - /// - /// See SetOperationOp::SetOperationFromContext for values of `set_operation`. - /// - /// If `validate_indices` is `True`, `SparseToSparseSetOperation` validates the - /// order and range of `set1` and `set2` indices. - /// - /// Input `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`, - /// and `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same - /// as `set2`. Dimension `n` contains values in a set, duplicates are allowed but - /// ignored. - /// - /// Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`, - /// and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same - /// as `set1`. Dimension `n` contains values in a set, duplicates are allowed but - /// ignored. - /// - /// If `validate_indices` is `True`, this op validates the order and range of `set1` - /// and `set2` indices. - /// - /// Output `result` is a `SparseTensor` represented by `result_indices`, - /// `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this - /// has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` - /// dimension contains the result of `set_operation` applied to the corresponding - /// `[0...n-1]` dimension of `set`. - /// - /// - Parameters: - /// - set1_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major - /// order. - /// - set1_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major - /// order. - /// - set1_shape: 1D `Tensor`, shape of a `SparseTensor`. `set1_shape[0...n-1]` must - /// be the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the - /// max set size across `0...n-1` dimensions. - /// - set2_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major - /// order. - /// - set2_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major - /// order. - /// - set2_shape: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must - /// be the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the - /// max set size across `0...n-1` dimensions. - /// - /// - Outputs: - /// - result_indices: 2D indices of a `SparseTensor`. - /// - result_values: 1D values of a `SparseTensor`. - /// - result_shape: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is - /// the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]` - /// is the max result set size across all `0...n-1` dimensions. - @inlinable @inline(__always) - public static func sparseToSparseSetOperation( - set1Indices: Tensor, - set1Values: StringTensor, - set1Shape: Tensor, - set2Indices: Tensor, - set2Values: StringTensor, - set2Shape: Tensor, - setOperation: String, - validateIndices: Bool = true - ) -> (resultIndices: Tensor, resultValues: StringTensor, resultShape: Tensor) { - _RawTFEager.sparseToSparseSetOperation( - set1Indices: set1Indices, set1Values: set1Values, set1Shape: set1Shape, - set2Indices: set2Indices, set2Values: set2Values, set2Shape: set2Shape, - setOperation: setOperation, validateIndices: validateIndices) - } - - /// Splits a tensor into `num_split` tensors along one dimension. - /// - /// - Parameters: - /// - split_dim: 0-D. The dimension along which to split. Must be in the range - /// `[-rank(value), rank(value))`. - /// - value: The tensor to split. - /// - /// - Attr num_split: The number of ways to split. Must evenly divide - /// `value.shape[split_dim]`. - /// - /// - Output output: They are identically shaped tensors, whose shape matches that of `value` - /// except along `axis`, where their sizes are - /// `values.shape[split_dim] / num_split`. - @inlinable @inline(__always) - public static func split( - splitDim: Tensor, - value: Tensor, - numSplit: Int64 - ) -> [Tensor] { - switch commonBackend(splitDim.handle.backend, value.handle.backend) { - case .XLA: - return _RawXLA.split(splitDim: splitDim, value: value, numSplit: numSplit) - case .TF_EAGER: - return _RawTFEager.split(splitDim: splitDim, value: value, numSplit: numSplit) - } - - } - - /// Splits a tensor into `num_split` tensors along one dimension. - /// - /// - Parameters: - /// - value: The tensor to split. - /// - size_splits: list containing the sizes of each output tensor along the split - /// dimension. Must sum to the dimension of value along split_dim. - /// Can contain one -1 indicating that dimension is to be inferred. - /// - split_dim: 0-D. The dimension along which to split. Must be in the range - /// `[-rank(value), rank(value))`. - /// - /// - Output output: Tensors whose shape matches that of `value` - /// except along `axis`, where their sizes are - /// `size_splits[i]`. - @inlinable @inline(__always) - public static func splitV< - T: TensorFlowScalar, - Tlen: TensorFlowIndex - >( - value: Tensor, - sizeSplits: Tensor, - splitDim: Tensor, - numSplit: Int64 - ) -> [Tensor] { - switch commonBackend( - commonBackend(value.handle.backend, sizeSplits.handle.backend), splitDim.handle.backend) - { - case .XLA: - return _RawXLA.splitV( - value: value, sizeSplits: sizeSplits, splitDim: splitDim, numSplit: numSplit) - case .TF_EAGER: - return _RawTFEager.splitV( - value: value, sizeSplits: sizeSplits, splitDim: splitDim, numSplit: numSplit) - } - - } - - /// Creates a dataset that executes a SQL query and emits rows of the result set. - /// - /// - Parameters: - /// - driver_name: The database type. Currently, the only supported type is 'sqlite'. - /// - data_source_name: A connection string to connect to the database. - /// - query: A SQL query to execute. - @inlinable @inline(__always) - public static func sqlDataset( - driverName: StringTensor, - dataSourceName: StringTensor, - query: StringTensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.sqlDataset( - driverName: driverName, dataSourceName: dataSourceName, query: query, - outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Computes square root of x element-wise. - /// - /// I.e., \\(y = \sqrt{x} = x^{1/2}\\). - @inlinable @inline(__always) - public static func sqrt( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.sqrt(x) - case .TF_EAGER: - return _RawTFEager.sqrt(x) - } - - } - - /// Computes the gradient for the sqrt of `x` wrt its input. - /// - /// Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy` - /// is the corresponding input gradient. - @inlinable @inline(__always) - public static func sqrtGrad( - _ y: Tensor, - dy: Tensor - ) -> Tensor { - switch commonBackend(y.handle.backend, dy.handle.backend) { - case .XLA: - let output_device = dy.device - let y = Tensor(copying: y, to: .defaultTFEager) - let dy = Tensor(copying: dy, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.sqrtGrad(y, dy: dy), to: output_device) - case .TF_EAGER: - return _RawTFEager.sqrtGrad(y, dy: dy) - } - - } - - /// Computes square of x element-wise. - /// - /// I.e., \\(y = x * x = x^2\\). - @inlinable @inline(__always) - public static func square( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.square(x) - case .TF_EAGER: - return _RawTFEager.square(x) - } - - } - - /// Returns (x - y)(x - y) element-wise. - /// - /// *NOTE*: `SquaredDifference` supports broadcasting. More about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - @inlinable @inline(__always) - public static func squaredDifference( - _ x: Tensor, - _ y: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - return _RawXLA.squaredDifference(x, y) - case .TF_EAGER: - return _RawTFEager.squaredDifference(x, y) - } - - } - - /// Removes dimensions of size 1 from the shape of a tensor. - /// - /// Given a tensor `input`, this operation returns a tensor of the same type with - /// all dimensions of size 1 removed. If you don't want to remove all size 1 - /// dimensions, you can remove specific size 1 dimensions by specifying - /// `axis`. - /// - /// For example: - /// - /// ``` - /// # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] - /// shape(squeeze(t)) ==> [2, 3] - /// ``` - /// - /// Or, to remove specific size 1 dimensions: - /// - /// ``` - /// # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] - /// shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1] - /// ``` - /// - /// - Parameter input: The `input` to squeeze. - /// - /// - Attr squeeze_dims: If specified, only squeezes the dimensions listed. The dimension - /// index starts at 0. It is an error to squeeze a dimension that is not 1. Must - /// be in the range `[-rank(input), rank(input))`. - /// - /// - Output output: Contains the same data as `input`, but has one or more dimensions of - /// size 1 removed. - @inlinable @inline(__always) - public static func squeeze( - _ input: Tensor, - squeezeDims: [Int32] - ) -> Tensor { - switch input.handle.backend { - case .XLA: - return _RawXLA.squeeze(input, squeezeDims: squeezeDims) - case .TF_EAGER: - return _RawTFEager.squeeze(input, squeezeDims: squeezeDims) - } - - } - - /// Delete the stack from its resource container. - /// - /// - Parameter handle: The handle to a stack. - @inlinable @inline(__always) - public static func stackCloseV2( - handle: ResourceHandle - ) { - _RawTFEager.stackCloseV2(handle: handle) - } - - /// Pop the element at the top of the stack. - /// - /// - Parameter handle: The handle to a stack. - /// - /// - Attr elem_type: The type of the elem that is popped. - /// - /// - Output elem: The tensor that is popped from the top of the stack. - @inlinable @inline(__always) - public static func stackPopV2( - handle: ResourceHandle - ) -> Tensor { - _RawTFEager.stackPopV2(handle: handle) - } - - /// Push an element onto the stack. - /// - /// - Parameters: - /// - handle: The handle to a stack. - /// - elem: The tensor to be pushed onto the stack. - /// - /// - Attr swap_memory: Swap `elem` to CPU. Default to false. - /// - /// - Output output: The same tensor as the input 'elem'. - @inlinable @inline(__always) - public static func stackPushV2( - handle: ResourceHandle, - elem: Tensor, - swapMemory: Bool = false - ) -> Tensor { - switch elem.handle.backend { - case .XLA: - let output_device = elem.device - let elem = Tensor(copying: elem, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.stackPushV2(handle: handle, elem: elem, swapMemory: swapMemory), - to: output_device) - case .TF_EAGER: - return _RawTFEager.stackPushV2(handle: handle, elem: elem, swapMemory: swapMemory) - } - - } - - /// A stack that produces elements in first-in last-out order. - /// - /// - Parameter max_size: The maximum size of the stack if non-negative. If negative, the stack - /// size is unlimited. - /// - /// - Attrs: - /// - elem_type: The type of the elements on the stack. - /// - stack_name: Overrides the name used for the temporary stack resource. Default - /// value is the name of the 'Stack' op (which is guaranteed unique). - /// - /// - Output handle: The handle to the stack. - @inlinable @inline(__always) - public static func stackV2( - maxSize: Tensor, - elemType: TensorDataType, - stackName: String - ) -> ResourceHandle { - _RawTFEager.stackV2(maxSize: maxSize, elemType: elemType, stackName: stackName) - } - - /// Stage values similar to a lightweight Enqueue. - /// - /// The basic functionality of this Op is similar to a queue with many - /// fewer capabilities and options. This Op is optimized for performance. - /// - /// - Parameter values: a list of tensors - /// dtypes A list of data types that inserted values should adhere to. - /// - /// - Attrs: - /// - capacity: Maximum number of elements in the Staging Area. If > 0, inserts - /// on the container will block when the capacity is reached. - /// - memory_limit: The maximum number of bytes allowed for Tensors in the Staging Area. - /// If > 0, inserts will block until sufficient space is available. - /// - container: If non-empty, this queue is placed in the given container. Otherwise, - /// a default container is used. - /// - shared_name: It is necessary to match this name to the matching Unstage Op. - @inlinable @inline(__always) - public static func stage( - _ values: Dtypes, - capacity: Int64 = 0, - memoryLimit: Int64 = 0, - container: String, - sharedName: String - ) { - _RawTFEager.stage( - values, capacity: capacity, memoryLimit: memoryLimit, container: container, - sharedName: sharedName) - } - - /// Op removes all elements in the underlying container. - @inlinable @inline(__always) - public static func stageClear( - capacity: Int64 = 0, - memoryLimit: Int64 = 0, - dtypes: [TensorDataType], - container: String, - sharedName: String - ) { - _RawTFEager.stageClear( - capacity: capacity, memoryLimit: memoryLimit, dtypes: dtypes, container: container, - sharedName: sharedName) - } - - /// Op peeks at the values at the specified index. If the - /// - /// underlying container does not contain sufficient elements - /// this op will block until it does. This Op is optimized for - /// performance. - @inlinable @inline(__always) - public static func stagePeek( - index: Tensor, - capacity: Int64 = 0, - memoryLimit: Int64 = 0, - container: String, - sharedName: String - ) -> Dtypes { - _RawTFEager.stagePeek( - index: index, capacity: capacity, memoryLimit: memoryLimit, container: container, - sharedName: sharedName) - } - - /// Op returns the number of elements in the underlying container. - @inlinable @inline(__always) - public static func stageSize( - capacity: Int64 = 0, - memoryLimit: Int64 = 0, - dtypes: [TensorDataType], - container: String, - sharedName: String - ) -> Tensor { - _RawTFEager.stageSize( - capacity: capacity, memoryLimit: memoryLimit, dtypes: dtypes, container: container, - sharedName: sharedName) - } - - /// returns `f(inputs)`, where `f`'s body is placed and partitioned. - /// - /// - Parameter args: A list of input tensors. - /// - /// - Attrs: - /// - Tin: A list of input types. - /// - Tout: A list of output types. - /// - f: A function that takes 'args', a list of tensors, and returns 'output', - /// another list of tensors. Input and output types are specified by 'Tin' - /// and 'Tout'. The function body of f will be placed and partitioned across - /// devices, setting this op apart from the regular Call op. This op is - /// stateful. - /// - /// - Output output: A list of return values. - @inlinable @inline(__always) - public static func statefulPartitionedCall< - Tin: TensorArrayProtocol, - Tout: TensorGroup, - FIn: TensorGroup, - FOut: TensorGroup - >( - args: Tin, - f: (FIn) -> FOut, - config: String, - configProto: String, - executorType: String - ) -> Tout { - _RawTFEager.statefulPartitionedCall( - args: args, f: f, config: config, configProto: configProto, executorType: executorType) - } - - @inlinable @inline(__always) - public static func statefulRandomBinomial< - S: TensorFlowIndex, - T: TensorFlowNumeric, - Dtype: TensorFlowNumeric - >( - resource: ResourceHandle, - algorithm: Tensor, - shape: Tensor, - counts: Tensor, - probs: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(algorithm.handle.backend, shape.handle.backend), counts.handle.backend), - probs.handle.backend) - { - case .XLA: - let output_device = probs.device - let algorithm = Tensor(copying: algorithm, to: .defaultTFEager) - let shape = Tensor(copying: shape, to: .defaultTFEager) - let counts = Tensor(copying: counts, to: .defaultTFEager) - let probs = Tensor(copying: probs, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.statefulRandomBinomial( - resource: resource, algorithm: algorithm, shape: shape, counts: counts, probs: probs), - to: output_device) - case .TF_EAGER: - return _RawTFEager.statefulRandomBinomial( - resource: resource, algorithm: algorithm, shape: shape, counts: counts, probs: probs) - } - - } - - /// Outputs random values from a normal distribution. This op is deprecated in favor of op 'StatefulStandardNormalV2' - /// - /// The generated values will have mean 0 and standard deviation 1. - /// - /// - Parameters: - /// - resource: The handle of the resource variable that stores the state of the RNG. - /// - shape: The shape of the output tensor. - /// - /// - Attr dtype: The type of the output. - /// - /// - Output output: A tensor of the specified shape filled with random normal values. - @inlinable @inline(__always) - public static func statefulStandardNormal< - Dtype: TensorFlowScalar, - ShapeDtype: TensorFlowScalar - >( - resource: ResourceHandle, - shape: Tensor - ) -> Tensor { - switch shape.handle.backend { - case .XLA: - let output_device = shape.device - let shape = Tensor(copying: shape, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.statefulStandardNormal(resource: resource, shape: shape), - to: output_device) - case .TF_EAGER: - return _RawTFEager.statefulStandardNormal(resource: resource, shape: shape) - } - - } - - /// Outputs random values from a normal distribution. - /// - /// The generated values will have mean 0 and standard deviation 1. - /// - /// - Parameters: - /// - resource: The handle of the resource variable that stores the state of the RNG. - /// - algorithm: The RNG algorithm. - /// - shape: The shape of the output tensor. - /// - /// - Attr dtype: The type of the output. - /// - /// - Output output: A tensor of the specified shape filled with random normal values. - @inlinable @inline(__always) - public static func statefulStandardNormalV2< - Dtype: TensorFlowScalar, - ShapeDtype: TensorFlowScalar - >( - resource: ResourceHandle, - algorithm: Tensor, - shape: Tensor - ) -> Tensor { - switch commonBackend(algorithm.handle.backend, shape.handle.backend) { - case .XLA: - let output_device = shape.device - let algorithm = Tensor(copying: algorithm, to: .defaultTFEager) - let shape = Tensor(copying: shape, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.statefulStandardNormalV2( - resource: resource, algorithm: algorithm, shape: shape), to: output_device) - case .TF_EAGER: - return _RawTFEager.statefulStandardNormalV2( - resource: resource, algorithm: algorithm, shape: shape) - } - - } - - /// Outputs random values from a truncated normal distribution. - /// - /// The generated values follow a normal distribution with mean 0 and standard - /// deviation 1, except that values whose magnitude is more than 2 standard - /// deviations from the mean are dropped and re-picked. - /// - /// - Parameters: - /// - resource: The handle of the resource variable that stores the state of the RNG. - /// - algorithm: The RNG algorithm. - /// - shape: The shape of the output tensor. - /// - /// - Attr dtype: The type of the output. - /// - /// - Output output: Random values with specified shape. - @inlinable @inline(__always) - public static func statefulTruncatedNormal< - Dtype: TensorFlowScalar, - ShapeDtype: TensorFlowScalar - >( - resource: ResourceHandle, - algorithm: Tensor, - shape: Tensor - ) -> Tensor { - switch commonBackend(algorithm.handle.backend, shape.handle.backend) { - case .XLA: - let output_device = shape.device - let algorithm = Tensor(copying: algorithm, to: .defaultTFEager) - let shape = Tensor(copying: shape, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.statefulTruncatedNormal( - resource: resource, algorithm: algorithm, shape: shape), to: output_device) - case .TF_EAGER: - return _RawTFEager.statefulTruncatedNormal( - resource: resource, algorithm: algorithm, shape: shape) - } - - } - - /// Outputs random values from a uniform distribution. - /// - /// The generated values follow a uniform distribution in the range `[0, 1)`. The - /// lower bound 0 is included in the range, while the upper bound 1 is excluded. - /// - /// - Parameters: - /// - resource: The handle of the resource variable that stores the state of the RNG. - /// - algorithm: The RNG algorithm. - /// - shape: The shape of the output tensor. - /// - /// - Attr dtype: The type of the output. - /// - /// - Output output: Random values with specified shape. - @inlinable @inline(__always) - public static func statefulUniform< - Dtype: TensorFlowScalar, - ShapeDtype: TensorFlowScalar - >( - resource: ResourceHandle, - algorithm: Tensor, - shape: Tensor - ) -> Tensor { - switch commonBackend(algorithm.handle.backend, shape.handle.backend) { - case .XLA: - let output_device = shape.device - let algorithm = Tensor(copying: algorithm, to: .defaultTFEager) - let shape = Tensor(copying: shape, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.statefulUniform( - resource: resource, algorithm: algorithm, shape: shape), to: output_device) - case .TF_EAGER: - return _RawTFEager.statefulUniform(resource: resource, algorithm: algorithm, shape: shape) - } - - } - - /// Outputs random integers from a uniform distribution. - /// - /// The generated values are uniform integers covering the whole range of `dtype`. - /// - /// - Parameters: - /// - resource: The handle of the resource variable that stores the state of the RNG. - /// - algorithm: The RNG algorithm. - /// - shape: The shape of the output tensor. - /// - /// - Attr dtype: The type of the output. - /// - /// - Output output: Random values with specified shape. - @inlinable @inline(__always) - public static func statefulUniformFullInt< - Dtype: TensorFlowScalar, - ShapeDtype: TensorFlowScalar - >( - resource: ResourceHandle, - algorithm: Tensor, - shape: Tensor - ) -> Tensor { - switch commonBackend(algorithm.handle.backend, shape.handle.backend) { - case .XLA: - let output_device = shape.device - let algorithm = Tensor(copying: algorithm, to: .defaultTFEager) - let shape = Tensor(copying: shape, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.statefulUniformFullInt( - resource: resource, algorithm: algorithm, shape: shape), to: output_device) - case .TF_EAGER: - return _RawTFEager.statefulUniformFullInt( - resource: resource, algorithm: algorithm, shape: shape) - } - - } - - /// Outputs random integers from a uniform distribution. - /// - /// The generated values are uniform integers in the range `[minval, maxval)`. - /// The lower bound `minval` is included in the range, while the upper bound - /// `maxval` is excluded. - /// - /// The random integers are slightly biased unless `maxval - minval` is an exact - /// power of two. The bias is small for values of `maxval - minval` significantly - /// smaller than the range of the output (either `2^32` or `2^64`). - /// - /// - Parameters: - /// - resource: The handle of the resource variable that stores the state of the RNG. - /// - algorithm: The RNG algorithm. - /// - shape: The shape of the output tensor. - /// - minval: Minimum value (inclusive, scalar). - /// - maxval: Maximum value (exclusive, scalar). - /// - /// - Attr dtype: The type of the output. - /// - /// - Output output: Random values with specified shape. - @inlinable @inline(__always) - public static func statefulUniformInt< - Dtype: TensorFlowScalar, - ShapeDtype: TensorFlowScalar - >( - resource: ResourceHandle, - algorithm: Tensor, - shape: Tensor, - minval: Tensor, - maxval: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(algorithm.handle.backend, shape.handle.backend), minval.handle.backend), - maxval.handle.backend) - { - case .XLA: - let output_device = maxval.device - let algorithm = Tensor(copying: algorithm, to: .defaultTFEager) - let shape = Tensor(copying: shape, to: .defaultTFEager) - let minval = Tensor(copying: minval, to: .defaultTFEager) - let maxval = Tensor(copying: maxval, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.statefulUniformInt( - resource: resource, algorithm: algorithm, shape: shape, minval: minval, maxval: maxval), - to: output_device) - case .TF_EAGER: - return _RawTFEager.statefulUniformInt( - resource: resource, algorithm: algorithm, shape: shape, minval: minval, maxval: maxval) - } - - } - - /// output = cond ? then_branch(input) : else_branch(input) - /// - /// - Parameters: - /// - cond: A Tensor. If the tensor is a scalar of non-boolean type, the - /// scalar is converted to a boolean according to the - /// following rule: if the scalar is a numerical value, non-zero means - /// `True` and zero means False; if the scalar is a string, non-empty - /// means `True` and empty means `False`. If the tensor is not a scalar, - /// being empty means False and being non-empty means True. - /// - /// This should only be used when the if then/else body functions do not - /// have stateful ops. - /// - input: A list of input tensors. - /// - /// - Attrs: - /// - Tin: A list of input types. - /// - Tout: A list of output types. - /// - then_branch: A function that takes 'inputs' and returns a list of tensors, whose - /// types are the same as what else_branch returns. - /// - else_branch: A function that takes 'inputs' and returns a list of tensors, whose - /// types are the same as what then_branch returns. - /// - /// - Output output: A list of return values. - @inlinable @inline(__always) - public static func statelessIf< - Tcond: TensorFlowScalar, - Tin: TensorArrayProtocol, - Tout: TensorGroup, - ThenbranchIn: TensorGroup, - ThenbranchOut: TensorGroup, - ElsebranchIn: TensorGroup, - ElsebranchOut: TensorGroup - >( - cond: Tensor, - _ input: Tin, - thenBranch: (ThenbranchIn) -> ThenbranchOut, - elseBranch: (ElsebranchIn) -> ElsebranchOut, - outputShapes: [TensorShape?] - ) -> Tout { - _RawTFEager.statelessIf( - cond: cond, input, thenBranch: thenBranch, elseBranch: elseBranch, - outputShapes: outputShapes) - } - - /// Draws samples from a multinomial distribution. - /// - /// - Parameters: - /// - logits: 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` - /// represents the unnormalized log probabilities for all classes. - /// - num_samples: 0-D. Number of independent samples to draw for each row slice. - /// - seed: 2 seeds (shape [2]). - /// - /// - Output output: 2-D Tensor with shape `[batch_size, num_samples]`. Each slice `[i, :]` - /// contains the drawn class labels with range `[0, num_classes)`. - @inlinable @inline(__always) - public static func statelessMultinomial< - T: TensorFlowNumeric, - Tseed: TensorFlowIndex, - OutputDtype: TensorFlowIndex - >( - logits: Tensor, - numSamples: Tensor, - seed: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(logits.handle.backend, numSamples.handle.backend), seed.handle.backend) - { - case .XLA: - return _RawXLA.statelessMultinomial(logits: logits, numSamples: numSamples, seed: seed) - case .TF_EAGER: - return _RawTFEager.statelessMultinomial(logits: logits, numSamples: numSamples, seed: seed) - } - - } - - /// Outputs deterministic pseudorandom values from a normal distribution. - /// - /// The generated values will have mean 0 and standard deviation 1. - /// - /// The outputs are a deterministic function of `shape` and `seed`. - /// - /// - Parameters: - /// - shape: The shape of the output tensor. - /// - seed: 2 seeds (shape [2]). - /// - /// - Attr dtype: The type of the output. - /// - /// - Output output: Random values with specified shape. - @inlinable @inline(__always) - public static func statelessRandomNormal< - Dtype: FloatingPoint & TensorFlowScalar, - T: TensorFlowIndex, - Tseed: TensorFlowIndex - >( - shape: Tensor, - seed: Tensor - ) -> Tensor { - switch commonBackend(shape.handle.backend, seed.handle.backend) { - case .XLA: - return _RawXLA.statelessRandomNormal(shape: shape, seed: seed) - case .TF_EAGER: - return _RawTFEager.statelessRandomNormal(shape: shape, seed: seed) - } - - } - - /// Outputs deterministic pseudorandom random values from a uniform distribution. - /// - /// The generated values follow a uniform distribution in the range `[0, 1)`. The - /// lower bound 0 is included in the range, while the upper bound 1 is excluded. - /// - /// The outputs are a deterministic function of `shape` and `seed`. - /// - /// - Parameters: - /// - shape: The shape of the output tensor. - /// - seed: 2 seeds (shape [2]). - /// - /// - Attr dtype: The type of the output. - /// - /// - Output output: Random values with specified shape. - @inlinable @inline(__always) - public static func statelessRandomUniform< - Dtype: FloatingPoint & TensorFlowScalar, - T: TensorFlowIndex, - Tseed: TensorFlowIndex - >( - shape: Tensor, - seed: Tensor - ) -> Tensor { - switch commonBackend(shape.handle.backend, seed.handle.backend) { - case .XLA: - return _RawXLA.statelessRandomUniform(shape: shape, seed: seed) - case .TF_EAGER: - return _RawTFEager.statelessRandomUniform(shape: shape, seed: seed) - } - - } - - /// Outputs deterministic pseudorandom random integers from a uniform distribution. - /// - /// The generated values follow a uniform distribution in the range `[minval, maxval)`. - /// - /// The outputs are a deterministic function of `shape`, `seed`, `minval`, and `maxval`. - /// - /// - Parameters: - /// - shape: The shape of the output tensor. - /// - seed: 2 seeds (shape [2]). - /// - minval: Minimum value (inclusive, scalar). - /// - maxval: Maximum value (exclusive, scalar). - /// - /// - Attr dtype: The type of the output. - /// - /// - Output output: Random values with specified shape. - @inlinable @inline(__always) - public static func statelessRandomUniformInt< - Dtype: TensorFlowIndex, - T: TensorFlowIndex, - Tseed: TensorFlowIndex - >( - shape: Tensor, - seed: Tensor, - minval: Tensor, - maxval: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(shape.handle.backend, seed.handle.backend), minval.handle.backend), - maxval.handle.backend) - { - case .XLA: - return _RawXLA.statelessRandomUniformInt( - shape: shape, seed: seed, minval: minval, maxval: maxval) - case .TF_EAGER: - return _RawTFEager.statelessRandomUniformInt( - shape: shape, seed: seed, minval: minval, maxval: maxval) - } - - } - - /// Outputs deterministic pseudorandom values from a truncated normal distribution. - /// - /// The generated values follow a normal distribution with mean 0 and standard - /// deviation 1, except that values whose magnitude is more than 2 standard - /// deviations from the mean are dropped and re-picked. - /// - /// The outputs are a deterministic function of `shape` and `seed`. - /// - /// - Parameters: - /// - shape: The shape of the output tensor. - /// - seed: 2 seeds (shape [2]). - /// - /// - Attr dtype: The type of the output. - /// - /// - Output output: Random values with specified shape. - @inlinable @inline(__always) - public static func statelessTruncatedNormal< - Dtype: FloatingPoint & TensorFlowScalar, - T: TensorFlowIndex, - Tseed: TensorFlowIndex - >( - shape: Tensor, - seed: Tensor - ) -> Tensor { - switch commonBackend(shape.handle.backend, seed.handle.backend) { - case .XLA: - return _RawXLA.statelessTruncatedNormal(shape: shape, seed: seed) - case .TF_EAGER: - return _RawTFEager.statelessTruncatedNormal(shape: shape, seed: seed) - } - - } - - /// output = input; While (Cond(output)) { output = Body(output) } - /// - /// - Parameter input: A list of input tensors whose types are T. - /// - /// - Attrs: - /// - T: dtype in use. - /// - cond: A function takes 'input' and returns a tensor. If the tensor is - /// a scalar of non-boolean, the scalar is converted to a boolean - /// according to the following rule: if the scalar is a numerical - /// value, non-zero means True and zero means False; if the scalar is - /// a string, non-empty means True and empty means False. If the - /// tensor is not a scalar, non-emptiness means True and False - /// otherwise. - /// - /// This should only be used when the while condition and body functions - /// do not have stateful ops. - /// - body: A function that takes a list of tensors and returns another - /// list of tensors. Both lists have the same types as specified - /// by T. - /// - /// - Output output: A list of output tensors whose types are T. - @inlinable @inline(__always) - public static func statelessWhile< - T: TensorArrayProtocol, - CondIn: TensorGroup, - CondOut: TensorGroup, - BodyIn: TensorGroup, - BodyOut: TensorGroup - >( - _ input: T, - cond: (CondIn) -> CondOut, - body: (BodyIn) -> BodyOut, - outputShapes: [TensorShape?], - parallelIterations: Int64 = 10 - ) -> T { - _RawTFEager.statelessWhile( - input, cond: cond, body: body, outputShapes: outputShapes, - parallelIterations: parallelIterations) + commonBackend( + commonBackend( + commonBackend(input.handle.backend, initValue.handle.backend), + windowDimensions.handle.backend), windowStrides.handle.backend), + baseDilations.handle.backend), windowDilations.handle.backend), padding.handle.backend) + { + case .XLA: + let output_device = padding.device + let input = Tensor(copying: input, to: .defaultTFEager) + let initValue = Tensor(copying: initValue, to: .defaultTFEager) + let windowDimensions = Tensor(copying: windowDimensions, to: .defaultTFEager) + let windowStrides = Tensor(copying: windowStrides, to: .defaultTFEager) + let baseDilations = Tensor(copying: baseDilations, to: .defaultTFEager) + let windowDilations = Tensor(copying: windowDilations, to: .defaultTFEager) + let padding = Tensor(copying: padding, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.xlaReduceWindow( + input, initValue: initValue, windowDimensions: windowDimensions, + windowStrides: windowStrides, baseDilations: baseDilations, + windowDilations: windowDilations, padding: padding, computation: computation), + to: output_device) + case .TF_EAGER: + return _RawTFEager.xlaReduceWindow( + input, initValue: initValue, windowDimensions: windowDimensions, + windowStrides: windowStrides, baseDilations: baseDilations, + windowDilations: windowDilations, padding: padding, computation: computation) } - /// Check if the input matches the regex pattern. - /// - /// The input is a string tensor of any shape. The pattern is the - /// regular expression to be matched with every element of the input tensor. - /// The boolean values (True or False) of the output tensor indicate - /// if the input matches the regex pattern provided. - /// - /// The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) - /// - /// - Parameter input: A string tensor of the text to be processed. - /// - /// - Attr pattern: The regular expression to match the input. - /// - /// - Output output: A bool tensor with the same shape as `input`. - @inlinable @inline(__always) - public static func staticRegexFullMatch( - _ input: StringTensor, - pattern: String - ) -> Tensor { - _RawTFEager.staticRegexFullMatch(input, pattern: pattern) - } - - /// Replaces the match of pattern in input with rewrite. - /// - /// It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) - /// - /// - Parameter input: The text to be processed. - /// - /// - Attrs: - /// - pattern: The regular expression to match the input. - /// - rewrite: The rewrite to be applied to the matched expression. - /// - replace_global: If True, the replacement is global, otherwise the replacement - /// is done only on the first match. - /// - /// - Output output: The text after applying pattern and rewrite. - @inlinable @inline(__always) - public static func staticRegexReplace( - _ input: StringTensor, - pattern: String, - rewrite: String, - replaceGlobal: Bool = true - ) -> StringTensor { - _RawTFEager.staticRegexReplace( - input, pattern: pattern, rewrite: rewrite, replaceGlobal: replaceGlobal) - } - - /// Creates a statistics manager resource. - @inlinable @inline(__always) - public static func statsAggregatorHandle( - container: String, - sharedName: String - ) -> ResourceHandle { - _RawTFEager.statsAggregatorHandle(container: container, sharedName: sharedName) - } - - @inlinable @inline(__always) - public static func statsAggregatorHandleV2( - container: String, - sharedName: String - ) -> ResourceHandle { - _RawTFEager.statsAggregatorHandleV2(container: container, sharedName: sharedName) - } - - /// Set a summary_writer_interface to record statistics using given stats_aggregator. - @inlinable @inline(__always) - public static func statsAggregatorSetSummaryWriter( - statsAggregator: ResourceHandle, - summary: ResourceHandle - ) { - _RawTFEager.statsAggregatorSetSummaryWriter( - statsAggregator: statsAggregator, summary: summary) - } - - /// Produces a summary of any statistics recorded by the given statistics manager. - @inlinable @inline(__always) - public static func statsAggregatorSummary( - iterator: ResourceHandle - ) -> StringTensor { - _RawTFEager.statsAggregatorSummary(iterator: iterator) - } - - /// Stops gradient computation. - /// - /// When executed in a graph, this op outputs its input tensor as-is. - /// - /// When building ops to compute gradients, this op prevents the contribution of - /// its inputs to be taken into account. Normally, the gradient generator adds ops - /// to a graph to compute the derivatives of a specified 'loss' by recursively - /// finding out inputs that contributed to its computation. If you insert this op - /// in the graph it inputs are masked from the gradient generator. They are not - /// taken into account for computing gradients. - /// - /// This is useful any time you want to compute a value with TensorFlow but need - /// to pretend that the value was a constant. Some examples include: - /// - /// * The *EM* algorithm where the *M-step* should not involve backpropagation - /// through the output of the *E-step*. - /// * Contrastive divergence training of Boltzmann machines where, when - /// differentiating the energy function, the training must not backpropagate - /// through the graph that generated the samples from the model. - /// * Adversarial training, where no backprop should happen through the adversarial - /// example generation process. - @inlinable @inline(__always) - public static func stopGradient( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.stopGradient(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.stopGradient(input) - } - - } - - /// Return a strided slice from `input`. - /// - /// Note, most python users will want to use the Python `Tensor.__getitem__` - /// or `Variable.__getitem__` rather than this op directly. - /// - /// The goal of this op is to produce a new tensor with a subset of - /// the elements from the `n` dimensional `input` tensor. The subset is chosen using - /// a sequence of `m` sparse range specifications encoded into the arguments - /// of this function. Note, in some cases - /// `m` could be equal to `n`, but this need not be the case. Each - /// range specification entry can be one of the following: - /// - /// - An ellipsis (...). Ellipses are used to imply zero or more - /// dimensions of full-dimension selection and are produced using - /// `ellipsis_mask`. For example, `foo[...]` is the identity slice. - /// - /// - A new axis. This is used to insert a new shape=1 dimension and is - /// produced using `new_axis_mask`. For example, `foo[:, ...]` where - /// `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor. - /// - /// - /// - A range `begin:end:stride`. This is used to specify how much to choose from - /// a given dimension. `stride` can be any integer but 0. `begin` is an integer - /// which represents the index of the first value to select while `end` represents - /// the index of the last value to select. The number of values selected in each - /// dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`. - /// `begin` and `end` can be negative where `-1` is the last element, `-2` is - /// the second to last. `begin_mask` controls whether to replace the explicitly - /// given `begin` with an implicit effective value of `0` if `stride > 0` and - /// `-1` if `stride < 0`. `end_mask` is analogous but produces the number - /// required to create the largest open interval. For example, given a shape - /// `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do - /// not assume this is equivalent to `foo[0:-1]` which has an effective `begin` - /// and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the - /// first dimension of a tensor while dropping the last two (in the original - /// order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`. - /// - /// - A single index. This is used to keep only elements that have a given - /// index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a - /// shape `(6,)` tensor. This is encoded in `begin` and `end` and - /// `shrink_axis_mask`. - /// - /// Each conceptual range specification is encoded in the op's argument. This - /// encoding is best understand by considering a non-trivial example. In - /// particular, - /// `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as - /// - /// ``` - /// begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0) - /// end = [2, 4, x, x, -3, x] - /// strides = [1, 1, x, x, -1, 1] - /// begin_mask = 1<<4 | 1 << 5 = 48 - /// end_mask = 1<<5 = 32 - /// ellipsis_mask = 1<<3 = 8 - /// new_axis_mask = 1<<2 4 - /// shrink_axis_mask = 1<<0 - /// ``` - /// - /// In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of - /// the slice becomes (2, 1, 5, 5, 2, 5). - /// Let us walk step by step through each argument specification. - /// - /// 1. The first argument in the example slice is turned into `begin = 1` and - /// `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we - /// also set the appropriate bit in `shrink_axis_mask`. - /// - /// 2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have - /// zero bits contributed. - /// - /// 3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1 - /// dimension in the final shape. Dummy values are contributed to begin, - /// end and stride, while the new_axis_mask bit is set. - /// - /// 4. `...` grab the full ranges from as many dimensions as needed to - /// fully specify a slice for every dimension of the input shape. - /// - /// 5. `:-3:-1` shows the use of negative indices. A negative index `i` associated - /// with a dimension that has shape `s` is converted to a positive index - /// `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion - /// is done internally so begin, end and strides receive x, -3, and -1. - /// The appropriate begin_mask bit is set to indicate the start range is the - /// full range (ignoring the x). - /// - /// 6. `:` indicates that the entire contents of the corresponding dimension - /// is selected. This is equivalent to `::` or `0::1`. begin, end, and strides - /// receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and - /// `end_mask` are also set. - /// - /// *Requirements*: - /// `0 != strides[i] for i in [0, m)` - /// `ellipsis_mask must be a power of two (only one ellipsis)` - /// - /// - Parameters: - /// - begin: `begin[k]` specifies the offset into the `k`th range specification. - /// The exact dimension this corresponds to will be determined by context. - /// Out-of-bounds values will be silently clamped. If the `k`th bit of - /// `begin_mask` then `begin[k]` is ignored and the full range of the - /// appropriate dimension is used instead. Negative values causes indexing - /// to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`. - /// - end: `end[i]` is like `begin` with the exception that `end_mask` is - /// used to determine full ranges. - /// - strides: `strides[i]` specifies the increment in the `i`th specification - /// after extracting a given element. Negative indices will reverse - /// the original order. Out or range values are - /// clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0` - /// - /// - Attrs: - /// - begin_mask: a bitmask where a bit i being 1 means to ignore the begin - /// value and instead use the largest interval possible. At runtime - /// begin[i] will be replaced with `[0, n-1)` if `stride[i] > 0` or - /// `[-1, n-1]` if `stride[i] < 0` - /// - end_mask: analogous to `begin_mask` - /// - ellipsis_mask: a bitmask where bit `i` being 1 means the `i`th - /// position is actually an ellipsis. One bit at most can be 1. - /// If `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)` - /// is provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis - /// implicitly creates as many range specifications as necessary to fully - /// specify the sliced range for every dimension. For example for a 4-dimensional - /// tensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`. - /// - new_axis_mask: a bitmask where bit `i` being 1 means the `i`th - /// specification creates a new shape 1 dimension. For example - /// `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor. - /// - shrink_axis_mask: a bitmask where bit `i` implies that the `i`th - /// specification should shrink the dimensionality. begin and end - /// must imply a slice of size 1 in the dimension. For example in - /// python one might do `foo[:, 3, :]` which would result in - /// `shrink_axis_mask` being 2. - @inlinable @inline(__always) - public static func stridedSlice< - T: TensorFlowScalar, - Index: TensorFlowIndex - >( - _ input: Tensor, - begin: Tensor, - end: Tensor, - strides: Tensor, - beginMask: Int64 = 0, - endMask: Int64 = 0, - ellipsisMask: Int64 = 0, - newAxisMask: Int64 = 0, - shrinkAxisMask: Int64 = 0 - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(input.handle.backend, begin.handle.backend), end.handle.backend), - strides.handle.backend) - { - case .XLA: - return _RawXLA.stridedSlice( - input, begin: begin, end: end, strides: strides, beginMask: beginMask, endMask: endMask, - ellipsisMask: ellipsisMask, newAxisMask: newAxisMask, shrinkAxisMask: shrinkAxisMask) - case .TF_EAGER: - return _RawTFEager.stridedSlice( - input, begin: begin, end: end, strides: strides, beginMask: beginMask, endMask: endMask, - ellipsisMask: ellipsisMask, newAxisMask: newAxisMask, shrinkAxisMask: shrinkAxisMask) - } - - } - - /// Returns the gradient of `StridedSlice`. - /// - /// Since `StridedSlice` cuts out pieces of its `input` which is size - /// `shape`, its gradient will have the same shape (which is passed here - /// as `shape`). The gradient will be zero in any element that the slice - /// does not select. - /// - /// Arguments are the same as StridedSliceGrad with the exception that - /// `dy` is the input gradient to be propagated and `shape` is the - /// shape of `StridedSlice`'s `input`. - @inlinable @inline(__always) - public static func stridedSliceGrad< - T: TensorFlowScalar, - Index: TensorFlowIndex - >( - shape: Tensor, - begin: Tensor, - end: Tensor, - strides: Tensor, - dy: Tensor, - beginMask: Int64 = 0, - endMask: Int64 = 0, - ellipsisMask: Int64 = 0, - newAxisMask: Int64 = 0, - shrinkAxisMask: Int64 = 0 - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend( - commonBackend(shape.handle.backend, begin.handle.backend), end.handle.backend), - strides.handle.backend), dy.handle.backend) - { - case .XLA: - return _RawXLA.stridedSliceGrad( - shape: shape, begin: begin, end: end, strides: strides, dy: dy, beginMask: beginMask, - endMask: endMask, ellipsisMask: ellipsisMask, newAxisMask: newAxisMask, - shrinkAxisMask: shrinkAxisMask) - case .TF_EAGER: - return _RawTFEager.stridedSliceGrad( - shape: shape, begin: begin, end: end, strides: strides, dy: dy, beginMask: beginMask, - endMask: endMask, ellipsisMask: ellipsisMask, newAxisMask: newAxisMask, - shrinkAxisMask: shrinkAxisMask) - } - - } - - /// Formats a string template using a list of tensors. - /// - /// Formats a string template using a list of tensors, pretty-printing tensor summaries. - /// - /// - Parameter inputs: The list of tensors to format into the placeholder string. - /// - /// - Attrs: - /// - template: A string, the template to format tensor summaries into. - /// - placeholder: A string, at each placeholder in the template a subsequent tensor summary will be inserted. - /// - summarize: When formatting the tensor summaries print the first and last summarize entries of each tensor dimension. - /// - /// - Output output: = The resulting string scalar. - @inlinable @inline(__always) - public static func stringFormat( - inputs: T, - template: String = "%s", - placeholder: String = "%s", - summarize: Int64 = 3 - ) -> StringTensor { - _RawTFEager.stringFormat( - inputs: inputs, template: template, placeholder: placeholder, summarize: summarize) - } - - /// Joins the strings in the given list of string tensors into one tensor; - /// - /// with the given separator (default is an empty separator). - /// - /// Examples: - /// - /// >>> s = ["hello", "world", "tensorflow"] - /// >>> tf.strings.join(s, " ") - /// - /// - /// - Parameter inputs: A list of string tensors. The tensors must all have the same shape, - /// or be scalars. Scalars may be mixed in; these will be broadcast to the shape - /// of non-scalar inputs. - /// - /// - Attr separator: string, an optional join separator. - @inlinable @inline(__always) - public static func stringJoin( - inputs: [StringTensor], - separator: String - ) -> StringTensor { - _RawTFEager.stringJoin(inputs: inputs, separator: separator) - } - - /// String lengths of `input`. - /// - /// Computes the length of each string given in the input tensor. - /// - /// >>> strings = tf.constant(['Hello','TensorFlow', '\U0001F642']) - /// >>> tf.strings.length(strings).numpy() # default counts bytes - /// array([ 5, 10, 4], dtype=int32) - /// >>> tf.strings.length(strings, unit="UTF8_CHAR").numpy() - /// array([ 5, 10, 1], dtype=int32) - /// - /// - /// - Parameter input: The strings for which to compute the length for each element. - /// - /// - Attr unit: The unit that is counted to compute string length. One of: `"BYTE"` (for - /// the number of bytes in each string) or `"UTF8_CHAR"` (for the number of UTF-8 - /// encoded Unicode code points in each string). Results are undefined - /// if `unit=UTF8_CHAR` and the `input` strings do not contain structurally - /// valid UTF-8. - /// - /// - Output output: Integer tensor that has the same shape as `input`. The output contains the - /// element-wise string lengths of `input`. - @inlinable @inline(__always) - public static func stringLength( - _ input: StringTensor, - unit: Unit = .byte - ) -> Tensor { - _RawTFEager.stringLength(input, unit: unit) - } - - @inlinable @inline(__always) - public static func stringListAttr( - _ a: [String], - _ b: String - ) { - _RawTFEager.stringListAttr(a, b) - } - - /// Converts all uppercase characters into their respective lowercase replacements. - /// - /// Example: - /// - /// >>> tf.strings.lower("CamelCase string and ALL CAPS") - /// - /// - @inlinable @inline(__always) - public static func stringLower( - _ input: StringTensor, - encoding: String - ) -> StringTensor { - _RawTFEager.stringLower(input, encoding: encoding) - } - - /// Creates ngrams from ragged string data. - /// - /// This op accepts a ragged tensor with 1 ragged dimension containing only - /// strings and outputs a ragged tensor with 1 ragged dimension containing ngrams - /// of that string, joined along the innermost axis. - /// - /// - Parameters: - /// - data: The values tensor of the ragged string tensor to make ngrams out of. Must be a - /// 1D string tensor. - /// - data_splits: The splits tensor of the ragged string tensor to make ngrams out of. - /// - /// - Attrs: - /// - separator: The string to append between elements of the token. Use "" for no separator. - /// - ngram_widths: The sizes of the ngrams to create. - /// - left_pad: The string to use to pad the left side of the ngram sequence. Only used if - /// pad_width != 0. - /// - right_pad: The string to use to pad the right side of the ngram sequence. Only used if - /// pad_width != 0. - /// - pad_width: The number of padding elements to add to each side of each - /// sequence. Note that padding will never be greater than 'ngram_widths'-1 - /// regardless of this value. If `pad_width=-1`, then add `max(ngram_widths)-1` - /// elements. - /// - /// - Outputs: - /// - ngrams: The values tensor of the output ngrams ragged tensor. - /// - ngrams_splits: The splits tensor of the output ngrams ragged tensor. - @inlinable @inline(__always) - public static func stringNGrams( - data: StringTensor, - dataSplits: Tensor, - separator: String, - ngramWidths: [Int32], - leftPad: String, - rightPad: String, - padWidth: Int64, - preserveShortSequences: Bool - ) -> (ngrams: StringTensor, ngramsSplits: Tensor) { - _RawTFEager.stringNGrams( - data: data, dataSplits: dataSplits, separator: separator, ngramWidths: ngramWidths, - leftPad: leftPad, rightPad: rightPad, padWidth: padWidth, - preserveShortSequences: preserveShortSequences) - } - - /// Split elements of `input` based on `delimiter` into a `SparseTensor`. - /// - /// Let N be the size of source (typically N will be the batch size). Split each - /// element of `input` based on `delimiter` and return a `SparseTensor` - /// containing the splitted tokens. Empty tokens are ignored. - /// - /// `delimiter` can be empty, or a string of split characters. If `delimiter` is an - /// empty string, each element of `input` is split into individual single-byte - /// character strings, including splitting of UTF-8 multibyte sequences. Otherwise - /// every character of `delimiter` is a potential split point. - /// - /// For example: - /// N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output - /// will be - /// - /// indices = [0, 0; - /// 0, 1; - /// 1, 0; - /// 1, 1; - /// 1, 2] - /// shape = [2, 3] - /// values = ['hello', 'world', 'a', 'b', 'c'] - /// - /// - Parameters: - /// - input: 1-D. Strings to split. - /// - delimiter: 0-D. Delimiter characters (bytes), or empty string. - /// - /// - Attr skip_empty: A `bool`. If `True`, skip the empty strings from the result. - /// - /// - Outputs: - /// - indices: A dense matrix of int64 representing the indices of the sparse tensor. - /// - values: A vector of strings corresponding to the splited values. - /// - shape: a length-2 vector of int64 representing the shape of the sparse - /// tensor, where the first value is N and the second value is the maximum number - /// of tokens in a single input entry. - @inlinable @inline(__always) - public static func stringSplit( - _ input: StringTensor, - delimiter: StringTensor, - skipEmpty: Bool = true - ) -> (indices: Tensor, values: StringTensor, shape: Tensor) { - _RawTFEager.stringSplit(input, delimiter: delimiter, skipEmpty: skipEmpty) - } - - /// Split elements of `source` based on `sep` into a `SparseTensor`. - /// - /// Let N be the size of source (typically N will be the batch size). Split each - /// element of `source` based on `sep` and return a `SparseTensor` - /// containing the split tokens. Empty tokens are ignored. - /// - /// For example, N = 2, source[0] is 'hello world' and source[1] is 'a b c', - /// then the output will be - /// ``` - /// st.indices = [0, 0; - /// 0, 1; - /// 1, 0; - /// 1, 1; - /// 1, 2] - /// st.shape = [2, 3] - /// st.values = ['hello', 'world', 'a', 'b', 'c'] - /// ``` - /// - /// If `sep` is given, consecutive delimiters are not grouped together and are - /// deemed to delimit empty strings. For example, source of `"1<>2<><>3"` and - /// sep of `"<>"` returns `["1", "2", "", "3"]`. If `sep` is None or an empty - /// string, consecutive whitespace are regarded as a single separator, and the - /// result will contain no empty strings at the startor end if the string has - /// leading or trailing whitespace. - /// - /// Note that the above mentioned behavior matches python's str.split. - /// - /// - Parameters: - /// - input: `1-D` string `Tensor`, the strings to split. - /// - sep: `0-D` string `Tensor`, the delimiter character. - /// - /// - Attr maxsplit: An `int`. If `maxsplit > 0`, limit of the split of the result. - @inlinable @inline(__always) - public static func stringSplitV2( - _ input: StringTensor, - sep: StringTensor, - maxsplit: Int64 = -1 - ) -> (indices: Tensor, values: StringTensor, shape: Tensor) { - _RawTFEager.stringSplitV2(input, sep: sep, maxsplit: maxsplit) - } - - /// Strip leading and trailing whitespaces from the Tensor. - /// - /// - Parameter input: A string `Tensor` of any shape. - /// - /// - Output output: A string `Tensor` of the same shape as the input. - /// - /// Examples: - /// - /// >>> tf.strings.strip(["\nTensorFlow", " The python library "]).numpy() - /// array([b'TensorFlow', b'The python library'], dtype=object) - @inlinable @inline(__always) - public static func stringStrip( - _ input: StringTensor - ) -> StringTensor { - _RawTFEager.stringStrip(input) - } - - /// Converts each string in the input Tensor to its hash mod by a number of buckets. - /// - /// The hash function is deterministic on the content of the string within the - /// process. - /// - /// Note that the hash function may change from time to time. - /// This functionality will be deprecated and it's recommended to use - /// `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`. - /// - /// - Attr num_buckets: The number of buckets. - /// - /// - Output output: A Tensor of the same shape as the input `string_tensor`. - @inlinable @inline(__always) - public static func stringToHashBucket( - stringTensor: StringTensor, - numBuckets: Int64 - ) -> Tensor { - _RawTFEager.stringToHashBucket(stringTensor: stringTensor, numBuckets: numBuckets) - } - - /// Converts each string in the input Tensor to its hash mod by a number of buckets. - /// - /// The hash function is deterministic on the content of the string within the - /// process and will never change. However, it is not suitable for cryptography. - /// This function may be used when CPU time is scarce and inputs are trusted or - /// unimportant. There is a risk of adversaries constructing inputs that all hash - /// to the same bucket. To prevent this problem, use a strong hash function with - /// `tf.string_to_hash_bucket_strong`. - /// - /// Examples: - /// - /// >>> tf.strings.to_hash_bucket_fast(["Hello", "TensorFlow", "2.x"], 3).numpy() - /// array([0, 2, 2]) - /// - /// - Parameter input: The strings to assign a hash bucket. - /// - /// - Attr num_buckets: The number of buckets. - /// - /// - Output output: A Tensor of the same shape as the input `string_tensor`. - @inlinable @inline(__always) - public static func stringToHashBucketFast( - _ input: StringTensor, - numBuckets: Int64 - ) -> Tensor { - _RawTFEager.stringToHashBucketFast(input, numBuckets: numBuckets) - } - - /// Converts each string in the input Tensor to its hash mod by a number of buckets. - /// - /// The hash function is deterministic on the content of the string within the - /// process. The hash function is a keyed hash function, where attribute `key` - /// defines the key of the hash function. `key` is an array of 2 elements. - /// - /// A strong hash is important when inputs may be malicious, e.g. URLs with - /// additional components. Adversaries could try to make their inputs hash to the - /// same bucket for a denial-of-service attack or to skew the results. A strong - /// hash can be used to make it difficult to find inputs with a skewed hash value - /// distribution over buckets. This requires that the hash function is - /// seeded by a high-entropy (random) "key" unknown to the adversary. - /// - /// The additional robustness comes at a cost of roughly 4x higher compute - /// time than `tf.string_to_hash_bucket_fast`. - /// - /// Examples: - /// - /// >>> tf.strings.to_hash_bucket_strong(["Hello", "TF"], 3, [1, 2]).numpy() - /// array([2, 0]) - /// - /// - Parameter input: The strings to assign a hash bucket. - /// - /// - Attrs: - /// - num_buckets: The number of buckets. - /// - key: The key used to seed the hash function, passed as a list of two uint64 - /// elements. - /// - /// - Output output: A Tensor of the same shape as the input `string_tensor`. - @inlinable @inline(__always) - public static func stringToHashBucketStrong( - _ input: StringTensor, - numBuckets: Int64, - key: [Int32] - ) -> Tensor { - _RawTFEager.stringToHashBucketStrong(input, numBuckets: numBuckets, key: key) - } - - /// Converts each string in the input Tensor to the specified numeric type. - /// - /// (Note that int32 overflow results in an error while float overflow - /// results in a rounded value.) - /// - /// Example: - /// - /// >>> strings = ["5.0", "3.0", "7.0"] - /// >>> tf.strings.to_number(strings) - /// - /// - /// - /// - Attr out_type: The numeric type to interpret each string in `string_tensor` as. - /// - /// - Output output: A Tensor of the same shape as the input `string_tensor`. - @inlinable @inline(__always) - public static func stringToNumber( - stringTensor: StringTensor - ) -> Tensor { - _RawTFEager.stringToNumber(stringTensor: stringTensor) - } - - /// Converts all lowercase characters into their respective uppercase replacements. - /// - /// Example: - /// - /// >>> tf.strings.upper("CamelCase string and ALL CAPS") - /// - /// - @inlinable @inline(__always) - public static func stringUpper( - _ input: StringTensor, - encoding: String - ) -> StringTensor { - _RawTFEager.stringUpper(input, encoding: encoding) - } - - @inlinable @inline(__always) - public static func stubResourceHandleOp( - container: String, - sharedName: String - ) -> ResourceHandle { - _RawTFEager.stubResourceHandleOp(container: container, sharedName: sharedName) - } - - /// Returns x - y element-wise. - /// - /// *NOTE*: `Subtract` supports broadcasting. More about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - @inlinable @inline(__always) - public static func sub( - _ x: Tensor, - _ y: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - return _RawXLA.sub(x, y) - case .TF_EAGER: - return _RawTFEager.sub(x, y) - } - - } - - /// Return substrings from `Tensor` of strings. - /// - /// For each string in the input `Tensor`, creates a substring starting at index - /// `pos` with a total length of `len`. - /// - /// If `len` defines a substring that would extend beyond the length of the input - /// string, or if `len` is negative, then as many characters as possible are used. - /// - /// A negative `pos` indicates distance within the string backwards from the end. - /// - /// If `pos` specifies an index which is out of range for any of the input strings, - /// then an `InvalidArgumentError` is thrown. - /// - /// `pos` and `len` must have the same shape, otherwise a `ValueError` is thrown on - /// Op creation. - /// - /// *NOTE*: `Substr` supports broadcasting up to two dimensions. More about - /// broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - /// - /// --- - /// - /// Examples - /// - /// Using scalar `pos` and `len`: - /// - /// ```python - /// input = [b'Hello', b'World'] - /// position = 1 - /// length = 3 - /// - /// output = [b'ell', b'orl'] - /// ``` - /// - /// Using `pos` and `len` with same shape as `input`: - /// - /// ```python - /// input = [[b'ten', b'eleven', b'twelve'], - /// [b'thirteen', b'fourteen', b'fifteen'], - /// [b'sixteen', b'seventeen', b'eighteen']] - /// position = [[1, 2, 3], - /// [1, 2, 3], - /// [1, 2, 3]] - /// length = [[2, 3, 4], - /// [4, 3, 2], - /// [5, 5, 5]] - /// - /// output = [[b'en', b'eve', b'lve'], - /// [b'hirt', b'urt', b'te'], - /// [b'ixtee', b'vente', b'hteen']] - /// ``` - /// - /// Broadcasting `pos` and `len` onto `input`: - /// - /// ``` - /// input = [[b'ten', b'eleven', b'twelve'], - /// [b'thirteen', b'fourteen', b'fifteen'], - /// [b'sixteen', b'seventeen', b'eighteen'], - /// [b'nineteen', b'twenty', b'twentyone']] - /// position = [1, 2, 3] - /// length = [1, 2, 3] - /// - /// output = [[b'e', b'ev', b'lve'], - /// [b'h', b'ur', b'tee'], - /// [b'i', b've', b'hte'], - /// [b'i', b'en', b'nty']] - /// ``` - /// - /// Broadcasting `input` onto `pos` and `len`: - /// - /// ``` - /// input = b'thirteen' - /// position = [1, 5, 7] - /// length = [3, 2, 1] - /// - /// output = [b'hir', b'ee', b'n'] - /// ``` - /// - /// Raises: - /// - /// * `ValueError`: If the first argument cannot be converted to a - /// Tensor of `dtype string`. - /// * `InvalidArgumentError`: If indicies are out of range. - /// * `ValueError`: If `pos` and `len` are not the same shape. - /// - /// - /// - Parameters: - /// - input: Tensor of strings - /// - pos: Scalar defining the position of first character in each substring - /// - len: Scalar defining the number of characters to include in each substring - /// - /// - Attr unit: The unit that is used to create the substring. One of: `"BYTE"` (for - /// defining position and length by bytes) or `"UTF8_CHAR"` (for the UTF-8 - /// encoded Unicode code points). The default is `"BYTE"`. Results are undefined if - /// `unit=UTF8_CHAR` and the `input` strings do not contain structurally valid - /// UTF-8. - /// - /// - Output output: Tensor of substrings - @inlinable @inline(__always) - public static func substr( - _ input: StringTensor, - pos: Tensor, - len: Tensor, - unit: Unit = .byte - ) -> StringTensor { - _RawTFEager.substr(input, pos: pos, len: len, unit: unit) - } - - /// Computes the sum of elements across dimensions of a tensor. - /// - /// Reduces `input` along the dimensions given in `axis`. Unless - /// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - /// `axis`. If `keep_dims` is true, the reduced dimensions are - /// retained with length 1. - /// - /// - Parameters: - /// - input: The tensor to reduce. - /// - reduction_indices: The dimensions to reduce. Must be in the range - /// `[-rank(input), rank(input))`. - /// - /// - Attr keep_dims: If true, retain reduced dimensions with length 1. - /// - /// - Output output: The reduced tensor. - @inlinable @inline(__always) - public static func sum< - T: TensorFlowNumeric, - Tidx: TensorFlowIndex - >( - _ input: Tensor, - reductionIndices: Tensor, - keepDims: Bool = false - ) -> Tensor { - switch commonBackend(input.handle.backend, reductionIndices.handle.backend) { - case .XLA: - return _RawXLA.sum(input, reductionIndices: reductionIndices, keepDims: keepDims) - case .TF_EAGER: - return _RawTFEager.sum(input, reductionIndices: reductionIndices, keepDims: keepDims) - } - - } - - @inlinable @inline(__always) - public static func summaryWriter( - sharedName: String, - container: String - ) -> ResourceHandle { - _RawTFEager.summaryWriter(sharedName: sharedName, container: container) - } - - /// Computes the singular value decompositions of one or more matrices. - /// - /// Computes the SVD of each inner matrix in `input` such that - /// `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])` - /// - /// ```python - /// # a is a tensor containing a batch of matrices. - /// # s is a tensor of singular values for each matrix. - /// # u is the tensor containing the left singular vectors for each matrix. - /// # v is the tensor containing the right singular vectors for each matrix. - /// s, u, v = svd(a) - /// s, _, _ = svd(a, compute_uv=False) - /// ``` - /// - /// - Parameter input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions - /// form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`. - /// - /// - Attrs: - /// - compute_uv: If true, left and right singular vectors will be - /// computed and returned in `u` and `v`, respectively. - /// If false, `u` and `v` are not set and should never referenced. - /// - full_matrices: If true, compute full-sized `u` and `v`. If false - /// (the default), compute only the leading `P` singular vectors. - /// Ignored if `compute_uv` is `False`. - /// - /// - Outputs: - /// - s: Singular values. Shape is `[..., P]`. - /// - u: Left singular vectors. If `full_matrices` is `False` then shape is - /// `[..., M, P]`; if `full_matrices` is `True` then shape is - /// `[..., M, M]`. Undefined if `compute_uv` is `False`. - /// - v: Left singular vectors. If `full_matrices` is `False` then shape is - /// `[..., N, P]`. If `full_matrices` is `True` then shape is `[..., N, N]`. - /// Undefined if `compute_uv` is false. - @inlinable @inline(__always) - public static func svd( - _ input: Tensor, - computeUv: Bool = true, - fullMatrices: Bool = false - ) -> (s: Tensor, u: Tensor, v: Tensor) { - switch input.handle.backend { - case .XLA: - return _RawXLA.svd(input, computeUv: computeUv, fullMatrices: fullMatrices) - case .TF_EAGER: - return _RawTFEager.svd(input, computeUv: computeUv, fullMatrices: fullMatrices) - } - } - - /// Forwards `data` to the output port determined by `pred`. - /// - /// If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise, - /// the data goes to `output_false`. - /// - /// See also `RefSwitch` and `Merge`. - /// - /// - Parameters: - /// - data: The tensor to be forwarded to the appropriate output. - /// - pred: A scalar that specifies which output port will receive data. - /// - /// - Outputs: - /// - output_false: If `pred` is false, data will be forwarded to this output. - /// - output_true: If `pred` is true, data will be forwarded to this output. - @inlinable @inline(__always) - public static func switch_( - data: Tensor, - pred: Tensor - ) -> (outputFalse: Tensor, outputTrue: Tensor) { - _RawTFEager.switch_(data: data, pred: pred) - } - - /// Computes the gradient function for function f via backpropagation. - /// - /// - Parameter input: a list of input tensors of size N + M; - /// - /// - Attrs: - /// - Tin: the type list for the input list. - /// - Tout: the type list for the input list. - /// - f: The function we want to compute the gradient for. - /// - /// The function 'f' must be a numerical function which takes N inputs and - /// produces M outputs. Its gradient function 'g', which is computed by - /// this SymbolicGradient op is a function taking N + M inputs and - /// produces N outputs. - /// - /// I.e. if we have - /// (y1, y2, ..., y_M) = f(x1, x2, ..., x_N), - /// then, g is - /// (dL/dx1, dL/dx2, ..., dL/dx_N) = g(x1, x2, ..., x_N, - /// dL/dy1, dL/dy2, ..., dL/dy_M), - /// - /// where L is a scalar-value function of (x1, x2, ..., xN) (e.g., the - /// loss function). dL/dx_i is the partial derivative of L with respect - /// to x_i. - /// - /// (Needs some math expert to say the comment above better.) - /// - /// - Output output: a list of output tensors of size N; - @inlinable @inline(__always) - public static func symbolicGradient< - Tin: TensorArrayProtocol, - Tout: TensorGroup, - FIn: TensorGroup, - FOut: TensorGroup - >( - _ input: Tin, - f: (FIn) -> FOut - ) -> Tout { - _RawTFEager.symbolicGradient(input, f: f) - } - - /// Creates a dataset that emits the records from one or more TFRecord files. - /// - /// - Parameters: - /// - filenames: A scalar or vector containing the name(s) of the file(s) to be - /// read. - /// - compression_type: A scalar containing either (i) the empty string (no - /// compression), (ii) "ZLIB", or (iii) "GZIP". - /// - buffer_size: A scalar representing the number of bytes to buffer. A value of - /// 0 means no buffering will be performed. - @inlinable @inline(__always) - public static func tFRecordDataset( - filenames: StringTensor, - compressionType: StringTensor, - bufferSize: Tensor - ) -> VariantHandle { - _RawTFEager.tFRecordDataset( - filenames: filenames, compressionType: compressionType, bufferSize: bufferSize) - } - - /// A Reader that outputs the records from a TensorFlow Records file. - /// - /// - Attrs: - /// - container: If non-empty, this reader is placed in the given container. - /// Otherwise, a default container is used. - /// - shared_name: If non-empty, this reader is named in the given bucket - /// with this shared_name. Otherwise, the node name is used instead. - /// - /// - Output reader_handle: The handle to reference the Reader. - @inlinable @inline(__always) - public static func tFRecordReaderV2( - container: String, - sharedName: String, - compressionType: String - ) -> ResourceHandle { - _RawTFEager.tFRecordReaderV2( - container: container, sharedName: sharedName, compressionType: compressionType) - } - - /// Returns the result of a TPU compilation. - /// - /// This operation returns the result of a TPU compilation as a serialized - /// CompilationResultProto, which holds a status and an error message if an error - /// occurred during compilation. - @inlinable @inline(__always) - public static func tPUCompilationResult() -> StringTensor { - _RawTFEager.tPUCompilationResult() - } - - /// An op enabling differentiation of TPU Embeddings. - /// - /// This op simply returns its first input, which is assumed to have been sliced - /// from the Tensors returned by TPUEmbeddingDequeueActivations. The presence of - /// this op, and its first argument being a trainable Variable, enables automatic - /// differentiation of graphs containing embeddings via the TPU Embedding Python - /// libraries. - /// - /// - Parameters: - /// - embedding_variable: A trainable variable, enabling optimizers to find this op. - /// - sliced_activations: The embedding activations Tensor to return. - /// - /// - Attrs: - /// - table_id: The id of the table in the embedding layer configuration from which - /// these activations were computed. - /// - lookup_id: Identifier of the set of embedding indices which produced these - /// activations. - @inlinable @inline(__always) - public static func tPUEmbeddingActivations( - embeddingVariable: Tensor, - slicedActivations: Tensor, - tableId: Int64, - lookupId: Int64 - ) -> Tensor { - switch commonBackend(embeddingVariable.handle.backend, slicedActivations.handle.backend) { - case .XLA: - let output_device = slicedActivations.device - let embeddingVariable = Tensor(copying: embeddingVariable, to: .defaultTFEager) - let slicedActivations = Tensor(copying: slicedActivations, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.tPUEmbeddingActivations( - embeddingVariable: embeddingVariable, slicedActivations: slicedActivations, - tableId: tableId, lookupId: lookupId), to: output_device) - case .TF_EAGER: - return _RawTFEager.tPUEmbeddingActivations( - embeddingVariable: embeddingVariable, slicedActivations: slicedActivations, - tableId: tableId, lookupId: lookupId) - } - - } - - /// A TPU core selector Op. - /// - /// This Op produces a set of TPU cores (for warm-up) or a single TPU core - /// (for regular inference) to execute the TPU program on. The output is - /// consumed by TPUPartitionedCall. - /// - /// - Output device_ordinals: A vector 1 or more TPU cores. - @inlinable @inline(__always) - public static func tPUOrdinalSelector() -> Tensor { - _RawTFEager.tPUOrdinalSelector() - } - - /// Calls a function placed on a specified TPU device. - /// - /// - Parameters: - /// - args: The arguments to the function. - /// - device_ordinal: The TPU device ordinal to run the function on. - /// - /// - Attrs: - /// - Tin: The types of the arguments to the function. - /// - Tout: The types of the outputs of the function. - /// - f: The function to call. - /// - /// - Output output: The output of the function call. - @inlinable @inline(__always) - public static func tPUPartitionedCall< - Tin: TensorArrayProtocol, - Tout: TensorGroup, - FIn: TensorGroup, - FOut: TensorGroup - >( - args: Tin, - deviceOrdinal: Tensor, - f: (FIn) -> FOut, - autotunerThresh: Int64 = 0 - ) -> Tout { - _RawTFEager.tPUPartitionedCall( - args: args, deviceOrdinal: deviceOrdinal, f: f, autotunerThresh: autotunerThresh) - } - - /// Metadata indicating how the TPU computation should be replicated. - /// - /// This operation holds the metadata common to operations of a `tpu.replicate()` computation subgraph. - /// - /// - Attrs: - /// - num_replicas: Number of replicas of the computation - /// - num_cores_per_replica: Number of cores per replica. Used for model parallelism. - /// - topology: TopologyProto indicating the topology of the TPU pod slice. - /// - use_tpu: Whether to place the computation on the TPU. - /// - device_assignment: The assignment of devices for the computation. - /// - computation_shape: DEPRECATED. Use num_cores_per_replica instead. - @inlinable @inline(__always) - public static func tPUReplicateMetadata( - numReplicas: Int64, - numCoresPerReplica: Int64 = 1, - topology: String, - useTpu: Bool = true, - deviceAssignment: [Int32], - computationShape: [Int32], - hostComputeCore: [String], - paddingMap: [String], - stepMarkerLocation: String = "STEP_MARK_AT_ENTRY", - allowSoftPlacement: Bool = false - ) { - _RawTFEager.tPUReplicateMetadata( - numReplicas: numReplicas, numCoresPerReplica: numCoresPerReplica, topology: topology, - useTpu: useTpu, deviceAssignment: deviceAssignment, computationShape: computationShape, - hostComputeCore: hostComputeCore, paddingMap: paddingMap, - stepMarkerLocation: stepMarkerLocation, allowSoftPlacement: allowSoftPlacement) - } - - /// Connects N inputs to an N-way replicated TPU computation. - /// - /// This operation holds a replicated input to a `tpu.replicate()` computation subgraph. - /// Each replicated input has the same shape and type alongside the output. - /// - /// For example: - /// ``` - /// %a = "tf.opA"() - /// %b = "tf.opB"() - /// %replicated_input = "tf.TPUReplicatedInput"(%a, %b) - /// %computation = "tf.Computation"(%replicated_input) - /// ``` - /// The above computation has a replicated input of two replicas. - @inlinable @inline(__always) - public static func tPUReplicatedInput( - inputs: [Tensor], - isMirroredVariable: Bool = false, - index: Int64 = -1 - ) -> Tensor { - _RawTFEager.tPUReplicatedInput( - inputs: inputs, isMirroredVariable: isMirroredVariable, index: index) - } - - /// Connects N outputs from an N-way replicated TPU computation. - /// - /// This operation holds a replicated output from a `tpu.replicate()` computation subgraph. - /// Each replicated output has the same shape and type alongside the input. - /// - /// For example: - /// ``` - /// %computation = "tf.Computation"() - /// %replicated_output:2 = "tf.TPUReplicatedOutput"(%computation) - /// ``` - /// The above computation has a replicated output of two replicas. - @inlinable @inline(__always) - public static func tPUReplicatedOutput( - _ input: Tensor, - numReplicas: Int64 - ) -> [Tensor] { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return [Tensor]( - copying: _RawTFEager.tPUReplicatedOutput(input, numReplicas: numReplicas), - to: output_device) - case .TF_EAGER: - return _RawTFEager.tPUReplicatedOutput(input, numReplicas: numReplicas) - } - - } - - @inlinable @inline(__always) - public static func tRTEngineOp< - SegmentfuncIn: TensorGroup, - SegmentfuncOut: TensorGroup, - Intt: TensorArrayProtocol, - Outt: TensorGroup - >( - inTensor: Intt, - serializedSegment: String, - segmentFunc: (SegmentfuncIn) -> SegmentfuncOut, - maxCachedEnginesCount: Int64 = 1, - workspaceSizeBytes: Int64, - precisionMode: PrecisionMode, - calibrationData: String, - useCalibration: Bool = true, - segmentFuncdefName: String, - cachedEngineBatches: [Int32], - fixedInputSize: Bool = true, - inputShapes: [TensorShape?], - outputShapes: [TensorShape?], - staticEngine: Bool = true - ) -> Outt { - _RawTFEager.tRTEngineOp( - inTensor: inTensor, serializedSegment: serializedSegment, segmentFunc: segmentFunc, - maxCachedEnginesCount: maxCachedEnginesCount, workspaceSizeBytes: workspaceSizeBytes, - precisionMode: precisionMode, calibrationData: calibrationData, - useCalibration: useCalibration, segmentFuncdefName: segmentFuncdefName, - cachedEngineBatches: cachedEngineBatches, fixedInputSize: fixedInputSize, - inputShapes: inputShapes, outputShapes: outputShapes, staticEngine: staticEngine) - } - - /// Creates a dataset that contains `count` elements from the `input_dataset`. - /// - /// - Parameter count: A scalar representing the number of elements from the `input_dataset` - /// that should be taken. A value of `-1` indicates that all of `input_dataset` - /// is taken. - @inlinable @inline(__always) - public static func takeDataset( - inputDataset: VariantHandle, - count: Tensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.takeDataset( - inputDataset: inputDataset, count: count, outputTypes: outputTypes, - outputShapes: outputShapes) - } - - /// Read `SparseTensors` from a `SparseTensorsMap` and concatenate them. - /// - /// The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where - /// `N` is the minibatch size and the rows correspond to the output handles of - /// `AddSparseToTensorsMap` or `AddManySparseToTensorsMap`. The ranks of the - /// original `SparseTensor` objects that went into the given input ops must all - /// match. When the final `SparseTensor` is created, it has rank one - /// higher than the ranks of the incoming `SparseTensor` objects - /// (they have been concatenated along a new row dimension on the left). - /// - /// The output `SparseTensor` object's shape values for all dimensions but the - /// first are the max across the input `SparseTensor` objects' shape values - /// for the corresponding dimensions. Its first shape value is `N`, the minibatch - /// size. - /// - /// The input `SparseTensor` objects' indices are assumed ordered in - /// standard lexicographic order. If this is not the case, after this - /// step run `SparseReorder` to restore index ordering. - /// - /// For example, if the handles represent an input, which is a `[2, 3]` matrix - /// representing two original `SparseTensor` objects: - /// - /// ``` - /// index = [ 0] - /// [10] - /// [20] - /// values = [1, 2, 3] - /// shape = [50] - /// ``` - /// - /// and - /// - /// ``` - /// index = [ 2] - /// [10] - /// values = [4, 5] - /// shape = [30] - /// ``` - /// - /// then the final `SparseTensor` will be: - /// - /// ``` - /// index = [0 0] - /// [0 10] - /// [0 20] - /// [1 2] - /// [1 10] - /// values = [1, 2, 3, 4, 5] - /// shape = [2 50] - /// ``` - /// - /// - Parameter sparse_handles: 1-D, The `N` serialized `SparseTensor` objects. - /// Shape: `[N]`. - /// - /// - Attrs: - /// - dtype: The `dtype` of the `SparseTensor` objects stored in the - /// `SparseTensorsMap`. - /// - container: The container name for the `SparseTensorsMap` read by this op. - /// - shared_name: The shared name for the `SparseTensorsMap` read by this op. - /// It should not be blank; rather the `shared_name` or unique Operation name - /// of the Op that created the original `SparseTensorsMap` should be used. - /// - /// - Outputs: - /// - sparse_indices: 2-D. The `indices` of the minibatch `SparseTensor`. - /// - sparse_values: 1-D. The `values` of the minibatch `SparseTensor`. - /// - sparse_shape: 1-D. The `shape` of the minibatch `SparseTensor`. - @inlinable @inline(__always) - public static func takeManySparseFromTensorsMap( - sparseHandles: Tensor, - container: String, - sharedName: String - ) -> (sparseIndices: Tensor, sparseValues: Tensor, sparseShape: Tensor) { - _RawTFEager.takeManySparseFromTensorsMap( - sparseHandles: sparseHandles, container: container, sharedName: sharedName) - } - - /// Creates a dataset that stops iteration when predicate` is false. - /// - /// The `predicate` function must return a scalar boolean and accept the - /// following arguments: - /// - /// * One tensor for each component of an element of `input_dataset`. - /// * One tensor for each value in `other_arguments`. - /// - /// - Parameter other_arguments: A list of tensors, typically values that were captured when - /// building a closure for `predicate`. - /// - /// - Attr predicate: A function returning a scalar boolean. - @inlinable @inline(__always) - public static func takeWhileDataset< - PredicateIn: TensorGroup, - PredicateOut: TensorGroup, - Targuments: TensorArrayProtocol - >( - inputDataset: VariantHandle, - otherArguments: Targuments, - predicate: (PredicateIn) -> PredicateOut, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.takeWhileDataset( - inputDataset: inputDataset, otherArguments: otherArguments, predicate: predicate, - outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Computes tan of x element-wise. - /// - /// Given an input tensor, this function computes tangent of every - /// element in the tensor. Input range is `(-inf, inf)` and - /// output range is `(-inf, inf)`. If input lies outside the boundary, `nan` - /// is returned. - /// - /// ```python - /// x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")]) - /// tf.math.tan(x) ==> [nan 0.45231566 -0.5463025 1.5574077 2.572152 -1.7925274 0.32097113 nan] - /// ``` - @inlinable @inline(__always) - public static func tan( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.tan(x) - case .TF_EAGER: - return _RawTFEager.tan(x) - } - - } - - /// Computes hyperbolic tangent of `x` element-wise. - /// - /// Given an input tensor, this function computes hyperbolic tangent of every - /// element in the tensor. Input range is `[-inf, inf]` and - /// output range is `[-1,1]`. - /// - /// ```python - /// x = tf.constant([-float("inf"), -5, -0.5, 1, 1.2, 2, 3, float("inf")]) - /// tf.math.tanh(x) ==> [-1. -0.99990916 -0.46211717 0.7615942 0.8336547 0.9640276 0.9950547 1.] - /// ``` - @inlinable @inline(__always) - public static func tanh( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.tanh(x) - case .TF_EAGER: - return _RawTFEager.tanh(x) - } - - } - - /// Computes the gradient for the tanh of `x` wrt its input. - /// - /// Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy` - /// is the corresponding input gradient. - @inlinable @inline(__always) - public static func tanhGrad( - _ y: Tensor, - dy: Tensor - ) -> Tensor { - switch commonBackend(y.handle.backend, dy.handle.backend) { - case .XLA: - let output_device = dy.device - let y = Tensor(copying: y, to: .defaultTFEager) - let dy = Tensor(copying: dy, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.tanhGrad(y, dy: dy), to: output_device) - case .TF_EAGER: - return _RawTFEager.tanhGrad(y, dy: dy) - } - - } - - /// Deprecated. Use TensorArrayCloseV3 - @inlinable @inline(__always) - public static func tensorArrayCloseV2( - handle: StringTensor - ) { - _RawTFEager.tensorArrayCloseV2(handle: handle) - } - - /// Delete the TensorArray from its resource container. - /// - /// This enables the user to close and release the resource in the middle - /// of a step/run. - /// - /// - Parameter handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad). - @inlinable @inline(__always) - public static func tensorArrayCloseV3( - handle: ResourceHandle - ) { - _RawTFEager.tensorArrayCloseV3(handle: handle) - } - - /// Deprecated. Use TensorArrayConcatV3 - @inlinable @inline(__always) - public static func tensorArrayConcatV2( - handle: StringTensor, - flowIn: Tensor, - elementShapeExcept0: TensorShape? - ) -> (value: Tensor, lengths: Tensor) { - _RawTFEager.tensorArrayConcatV2( - handle: handle, flowIn: flowIn, elementShapeExcept0: elementShapeExcept0) - } - - /// Concat the elements from the TensorArray into value `value`. - /// - /// Takes `T` elements of shapes - /// - /// ``` - /// (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...) - /// ``` - /// - /// and concatenates them into a Tensor of shape: - /// - /// ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)``` - /// - /// All elements must have the same shape (excepting the first dimension). - /// - /// - Parameters: - /// - handle: The handle to a TensorArray. - /// - flow_in: A float scalar that enforces proper chaining of operations. - /// - /// - Attrs: - /// - dtype: The type of the elem that is returned. - /// - element_shape_except0: The expected shape of an element, if known, - /// excluding the first dimension. Used to validate the shapes of - /// TensorArray elements. If this shape is not fully specified, concatenating - /// zero-size TensorArrays is an error. - /// - /// - Outputs: - /// - value: All of the elements in the TensorArray, concatenated along the first - /// axis. - /// - lengths: A vector of the row sizes of the original T elements in the - /// value output. In the example above, this would be the values: - /// `(n1, n2, ..., n(T-1))`. - @inlinable @inline(__always) - public static func tensorArrayConcatV3( - handle: ResourceHandle, - flowIn: Tensor, - elementShapeExcept0: TensorShape? - ) -> (value: Tensor, lengths: Tensor) { - _RawTFEager.tensorArrayConcatV3( - handle: handle, flowIn: flowIn, elementShapeExcept0: elementShapeExcept0) - } - - /// Deprecated. Use TensorArrayGatherV3 - @inlinable @inline(__always) - public static func tensorArrayGatherV2( - handle: StringTensor, - indices: Tensor, - flowIn: Tensor, - elementShape: TensorShape? - ) -> Tensor { - switch commonBackend(indices.handle.backend, flowIn.handle.backend) { - case .XLA: - let output_device = flowIn.device - let indices = Tensor(copying: indices, to: .defaultTFEager) - let flowIn = Tensor(copying: flowIn, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.tensorArrayGatherV2( - handle: handle, indices: indices, flowIn: flowIn, elementShape: elementShape), - to: output_device) - case .TF_EAGER: - return _RawTFEager.tensorArrayGatherV2( - handle: handle, indices: indices, flowIn: flowIn, elementShape: elementShape) - } - - } - - /// Gather specific elements from the TensorArray into output `value`. - /// - /// All elements selected by `indices` must have the same shape. - /// - /// - Parameters: - /// - handle: The handle to a TensorArray. - /// - indices: The locations in the TensorArray from which to read tensor elements. - /// - flow_in: A float scalar that enforces proper chaining of operations. - /// - /// - Attrs: - /// - dtype: The type of the elem that is returned. - /// - element_shape: The expected shape of an element, if known. Used to - /// validate the shapes of TensorArray elements. If this shape is not - /// fully specified, gathering zero-size TensorArrays is an error. - /// - /// - Output value: All of the elements in the TensorArray, concatenated along a new - /// axis (the new dimension 0). - @inlinable @inline(__always) - public static func tensorArrayGatherV3( - handle: ResourceHandle, - indices: Tensor, - flowIn: Tensor, - elementShape: TensorShape? - ) -> Tensor { - switch commonBackend(indices.handle.backend, flowIn.handle.backend) { - case .XLA: - let output_device = flowIn.device - let indices = Tensor(copying: indices, to: .defaultTFEager) - let flowIn = Tensor(copying: flowIn, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.tensorArrayGatherV3( - handle: handle, indices: indices, flowIn: flowIn, elementShape: elementShape), - to: output_device) - case .TF_EAGER: - return _RawTFEager.tensorArrayGatherV3( - handle: handle, indices: indices, flowIn: flowIn, elementShape: elementShape) - } - - } - - /// Deprecated. Use TensorArrayGradV3 - @inlinable @inline(__always) - public static func tensorArrayGradV2( - handle: StringTensor, - flowIn: Tensor, - source: String - ) -> StringTensor { - _RawTFEager.tensorArrayGradV2(handle: handle, flowIn: flowIn, source: source) - } - - /// Creates a TensorArray for storing the gradients of values in the given handle. - /// - /// If the given TensorArray gradient already exists, returns a reference to it. - /// - /// Locks the size of the original TensorArray by disabling its dynamic size flag. - /// - /// **A note about the input flow_in:** - /// - /// The handle flow_in forces the execution of the gradient lookup to occur - /// only after certain other operations have occurred. For example, when - /// the forward TensorArray is dynamically sized, writes to this TensorArray - /// may resize the object. The gradient TensorArray is statically sized based - /// on the size of the forward TensorArray when this operation executes. - /// Furthermore, the size of the forward TensorArray is frozen by this call. - /// As a result, the flow is used to ensure that the call to generate the gradient - /// TensorArray only happens after all writes are executed. - /// - /// In the case of dynamically sized TensorArrays, gradient computation should - /// only be performed on read operations that have themselves been chained via - /// flow to occur only after all writes have executed. That way the final size - /// of the forward TensorArray is known when this operation is called. - /// - /// **A note about the source attribute:** - /// - /// TensorArray gradient calls use an accumulator TensorArray object. If - /// multiple gradients are calculated and run in the same session, the multiple - /// gradient nodes may accidentally flow through the same accumulator TensorArray. - /// This double counts and generally breaks the TensorArray gradient flow. - /// - /// The solution is to identify which gradient call this particular - /// TensorArray gradient is being called in. This is performed by identifying - /// a unique string (e.g. "gradients", "gradients_1", ...) from the input - /// gradient Tensor's name. This string is used as a suffix when creating - /// the TensorArray gradient object here (the attribute `source`). - /// - /// The attribute `source` is added as a suffix to the forward TensorArray's - /// name when performing the creation / lookup, so that each separate gradient - /// calculation gets its own TensorArray accumulator. - /// - /// - Parameters: - /// - handle: The handle to the forward TensorArray. - /// - flow_in: A float scalar that enforces proper chaining of operations. - /// - /// - Attr source: The gradient source string, used to decide which gradient TensorArray - /// to return. - @inlinable @inline(__always) - public static func tensorArrayGradV3( - handle: ResourceHandle, - flowIn: Tensor, - source: String - ) -> (gradHandle: ResourceHandle, flowOut: Tensor) { - _RawTFEager.tensorArrayGradV3(handle: handle, flowIn: flowIn, source: source) - } - - /// Creates a TensorArray for storing multiple gradients of values in the given handle. - /// - /// Similar to TensorArrayGradV3. However it creates an accumulator with an - /// expanded shape compared to the input TensorArray whose gradient is being - /// computed. This enables multiple gradients for the same TensorArray to be - /// calculated using the same accumulator. - /// - /// - Parameters: - /// - handle: The handle to the forward TensorArray. - /// - flow_in: A float scalar that enforces proper chaining of operations. - /// - shape_to_prepend: An int32 vector representing a shape. Elements in the gradient accumulator will - /// have shape which is this shape_to_prepend value concatenated with shape of the - /// elements in the TensorArray corresponding to the input handle. - /// - /// - Attr source: The gradient source string, used to decide which gradient TensorArray - /// to return. - @inlinable @inline(__always) - public static func tensorArrayGradWithShape( - handle: ResourceHandle, - flowIn: Tensor, - shapeToPrepend: Tensor, - source: String - ) -> (gradHandle: ResourceHandle, flowOut: Tensor) { - _RawTFEager.tensorArrayGradWithShape( - handle: handle, flowIn: flowIn, shapeToPrepend: shapeToPrepend, source: source) - } - - /// Deprecated. Use TensorArrayReadV3 - @inlinable @inline(__always) - public static func tensorArrayReadV2( - handle: StringTensor, - index: Tensor, - flowIn: Tensor - ) -> Tensor { - switch commonBackend(index.handle.backend, flowIn.handle.backend) { - case .XLA: - let output_device = flowIn.device - let index = Tensor(copying: index, to: .defaultTFEager) - let flowIn = Tensor(copying: flowIn, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.tensorArrayReadV2(handle: handle, index: index, flowIn: flowIn), - to: output_device) - case .TF_EAGER: - return _RawTFEager.tensorArrayReadV2(handle: handle, index: index, flowIn: flowIn) - } - - } - - /// Read an element from the TensorArray into output `value`. - /// - /// - Parameters: - /// - handle: The handle to a TensorArray. - /// - flow_in: A float scalar that enforces proper chaining of operations. - /// - /// - Attr dtype: The type of the elem that is returned. - /// - /// - Output value: The tensor that is read from the TensorArray. - @inlinable @inline(__always) - public static func tensorArrayReadV3( - handle: ResourceHandle, - index: Tensor, - flowIn: Tensor - ) -> Tensor { - switch commonBackend(index.handle.backend, flowIn.handle.backend) { - case .XLA: - let output_device = flowIn.device - let index = Tensor(copying: index, to: .defaultTFEager) - let flowIn = Tensor(copying: flowIn, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.tensorArrayReadV3(handle: handle, index: index, flowIn: flowIn), - to: output_device) - case .TF_EAGER: - return _RawTFEager.tensorArrayReadV3(handle: handle, index: index, flowIn: flowIn) - } - - } - - /// Deprecated. Use TensorArrayScatterV3 - @inlinable @inline(__always) - public static func tensorArrayScatterV2( - handle: StringTensor, - indices: Tensor, - value: Tensor, - flowIn: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(indices.handle.backend, value.handle.backend), flowIn.handle.backend) - { - case .XLA: - let output_device = flowIn.device - let indices = Tensor(copying: indices, to: .defaultTFEager) - let value = Tensor(copying: value, to: .defaultTFEager) - let flowIn = Tensor(copying: flowIn, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.tensorArrayScatterV2( - handle: handle, indices: indices, value: value, flowIn: flowIn), to: output_device) - case .TF_EAGER: - return _RawTFEager.tensorArrayScatterV2( - handle: handle, indices: indices, value: value, flowIn: flowIn) - } - - } - - /// Scatter the data from the input value into specific TensorArray elements. - /// - /// `indices` must be a vector, its length must match the first dim of `value`. - /// - /// - Parameters: - /// - handle: The handle to a TensorArray. - /// - indices: The locations at which to write the tensor elements. - /// - value: The concatenated tensor to write to the TensorArray. - /// - flow_in: A float scalar that enforces proper chaining of operations. - /// - /// - Output flow_out: A float scalar that enforces proper chaining of operations. - @inlinable @inline(__always) - public static func tensorArrayScatterV3( - handle: ResourceHandle, - indices: Tensor, - value: Tensor, - flowIn: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(indices.handle.backend, value.handle.backend), flowIn.handle.backend) - { - case .XLA: - let output_device = flowIn.device - let indices = Tensor(copying: indices, to: .defaultTFEager) - let value = Tensor(copying: value, to: .defaultTFEager) - let flowIn = Tensor(copying: flowIn, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.tensorArrayScatterV3( - handle: handle, indices: indices, value: value, flowIn: flowIn), to: output_device) - case .TF_EAGER: - return _RawTFEager.tensorArrayScatterV3( - handle: handle, indices: indices, value: value, flowIn: flowIn) - } - - } - - /// Deprecated. Use TensorArraySizeV3 - @inlinable @inline(__always) - public static func tensorArraySizeV2( - handle: StringTensor, - flowIn: Tensor - ) -> Tensor { - switch flowIn.handle.backend { - case .XLA: - let output_device = flowIn.device - let flowIn = Tensor(copying: flowIn, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.tensorArraySizeV2(handle: handle, flowIn: flowIn), to: output_device) - case .TF_EAGER: - return _RawTFEager.tensorArraySizeV2(handle: handle, flowIn: flowIn) - } - - } - - /// Get the current size of the TensorArray. - /// - /// - Parameters: - /// - handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad). - /// - flow_in: A float scalar that enforces proper chaining of operations. - /// - /// - Output size: The current size of the TensorArray. - @inlinable @inline(__always) - public static func tensorArraySizeV3( - handle: ResourceHandle, - flowIn: Tensor - ) -> Tensor { - switch flowIn.handle.backend { - case .XLA: - let output_device = flowIn.device - let flowIn = Tensor(copying: flowIn, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.tensorArraySizeV3(handle: handle, flowIn: flowIn), to: output_device) - case .TF_EAGER: - return _RawTFEager.tensorArraySizeV3(handle: handle, flowIn: flowIn) - } - - } - - /// Deprecated. Use TensorArraySplitV3 - @inlinable @inline(__always) - public static func tensorArraySplitV2( - handle: StringTensor, - value: Tensor, - lengths: Tensor, - flowIn: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(value.handle.backend, lengths.handle.backend), flowIn.handle.backend) - { - case .XLA: - let output_device = flowIn.device - let value = Tensor(copying: value, to: .defaultTFEager) - let lengths = Tensor(copying: lengths, to: .defaultTFEager) - let flowIn = Tensor(copying: flowIn, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.tensorArraySplitV2( - handle: handle, value: value, lengths: lengths, flowIn: flowIn), to: output_device) - case .TF_EAGER: - return _RawTFEager.tensorArraySplitV2( - handle: handle, value: value, lengths: lengths, flowIn: flowIn) - } - - } - - /// Split the data from the input value into TensorArray elements. - /// - /// Assuming that `lengths` takes on values - /// - /// ```(n0, n1, ..., n(T-1))``` - /// - /// and that `value` has shape - /// - /// ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```, - /// - /// this splits values into a TensorArray with T tensors. - /// - /// TensorArray index t will be the subtensor of values with starting position - /// - /// ```(n0 + n1 + ... + n(t-1), 0, 0, ...)``` - /// - /// and having size - /// - /// ```nt x d0 x d1 x ...``` - /// - /// - Parameters: - /// - handle: The handle to a TensorArray. - /// - value: The concatenated tensor to write to the TensorArray. - /// - lengths: The vector of lengths, how to split the rows of value into the - /// TensorArray. - /// - flow_in: A float scalar that enforces proper chaining of operations. - /// - /// - Output flow_out: A float scalar that enforces proper chaining of operations. - @inlinable @inline(__always) - public static func tensorArraySplitV3( - handle: ResourceHandle, - value: Tensor, - lengths: Tensor, - flowIn: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(value.handle.backend, lengths.handle.backend), flowIn.handle.backend) - { - case .XLA: - let output_device = flowIn.device - let value = Tensor(copying: value, to: .defaultTFEager) - let lengths = Tensor(copying: lengths, to: .defaultTFEager) - let flowIn = Tensor(copying: flowIn, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.tensorArraySplitV3( - handle: handle, value: value, lengths: lengths, flowIn: flowIn), to: output_device) - case .TF_EAGER: - return _RawTFEager.tensorArraySplitV3( - handle: handle, value: value, lengths: lengths, flowIn: flowIn) - } - - } - - /// Deprecated. Use TensorArrayV3 - @inlinable @inline(__always) - public static func tensorArrayV2( - size: Tensor, - dtype: TensorDataType, - elementShape: TensorShape?, - dynamicSize: Bool = false, - clearAfterRead: Bool = true, - tensorArrayName: String - ) -> StringTensor { - _RawTFEager.tensorArrayV2( - size: size, dtype: dtype, elementShape: elementShape, dynamicSize: dynamicSize, - clearAfterRead: clearAfterRead, tensorArrayName: tensorArrayName) - } - - /// An array of Tensors of given size. - /// - /// Write data via Write and read via Read or Pack. - /// - /// - Parameter size: The size of the array. - /// - /// - Attrs: - /// - dtype: The type of the elements on the tensor_array. - /// - element_shape: The expected shape of an element, if known. Used to - /// validate the shapes of TensorArray elements. If this shape is not - /// fully specified, gathering zero-size TensorArrays is an error. - /// - dynamic_size: A boolean that determines whether writes to the TensorArray - /// are allowed to grow the size. By default, this is not allowed. - /// - clear_after_read: If true (default), Tensors in the TensorArray are cleared - /// after being read. This disables multiple read semantics but allows early - /// release of memory. - /// - identical_element_shapes: If true (default is false), then all - /// elements in the TensorArray will be expected to have have identical shapes. - /// This allows certain behaviors, like dynamically checking for - /// consistent shapes on write, and being able to fill in properly - /// shaped zero tensors on stack -- even if the element_shape attribute - /// is not fully defined. - /// - tensor_array_name: Overrides the name used for the temporary tensor_array - /// resource. Default value is the name of the 'TensorArray' op (which - /// is guaranteed unique). - /// - /// - Outputs: - /// - handle: The handle to the TensorArray. - /// - flow: A scalar used to control gradient flow. - @inlinable @inline(__always) - public static func tensorArrayV3( - size: Tensor, - dtype: TensorDataType, - elementShape: TensorShape?, - dynamicSize: Bool = false, - clearAfterRead: Bool = true, - identicalElementShapes: Bool = false, - tensorArrayName: String - ) -> (handle: ResourceHandle, flow: Tensor) { - _RawTFEager.tensorArrayV3( - size: size, dtype: dtype, elementShape: elementShape, dynamicSize: dynamicSize, - clearAfterRead: clearAfterRead, identicalElementShapes: identicalElementShapes, - tensorArrayName: tensorArrayName) - } - - /// Deprecated. Use TensorArrayGradV3 - @inlinable @inline(__always) - public static func tensorArrayWriteV2( - handle: StringTensor, - index: Tensor, - value: Tensor, - flowIn: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(index.handle.backend, value.handle.backend), flowIn.handle.backend) - { - case .XLA: - let output_device = flowIn.device - let index = Tensor(copying: index, to: .defaultTFEager) - let value = Tensor(copying: value, to: .defaultTFEager) - let flowIn = Tensor(copying: flowIn, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.tensorArrayWriteV2( - handle: handle, index: index, value: value, flowIn: flowIn), to: output_device) - case .TF_EAGER: - return _RawTFEager.tensorArrayWriteV2( - handle: handle, index: index, value: value, flowIn: flowIn) - } - - } - - /// Push an element onto the tensor_array. - /// - /// - Parameters: - /// - handle: The handle to a TensorArray. - /// - index: The position to write to inside the TensorArray. - /// - value: The tensor to write to the TensorArray. - /// - flow_in: A float scalar that enforces proper chaining of operations. - /// - /// - Output flow_out: A float scalar that enforces proper chaining of operations. - @inlinable @inline(__always) - public static func tensorArrayWriteV3( - handle: ResourceHandle, - index: Tensor, - value: Tensor, - flowIn: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(index.handle.backend, value.handle.backend), flowIn.handle.backend) - { - case .XLA: - let output_device = flowIn.device - let index = Tensor(copying: index, to: .defaultTFEager) - let value = Tensor(copying: value, to: .defaultTFEager) - let flowIn = Tensor(copying: flowIn, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.tensorArrayWriteV3( - handle: handle, index: index, value: value, flowIn: flowIn), to: output_device) - case .TF_EAGER: - return _RawTFEager.tensorArrayWriteV3( - handle: handle, index: index, value: value, flowIn: flowIn) - } - - } - - /// Creates a dataset that emits `components` as a tuple of tensors once. - @inlinable @inline(__always) - public static func tensorDataset( - components: ToutputTypes, - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.tensorDataset(components: components, outputShapes: outputShapes) - } - - /// Creates a tree resource and returns a handle to it. - /// - /// - Parameters: - /// - tree_handle: Handle to the tree resource to be created. - /// - tree_config: Serialized proto string of the boosted_trees.Tree. - @inlinable @inline(__always) - public static func tensorForestCreateTreeVariable( - treeHandle: ResourceHandle, - treeConfig: StringTensor - ) { - _RawTFEager.tensorForestCreateTreeVariable(treeHandle: treeHandle, treeConfig: treeConfig) - } - - /// Deserializes a proto into the tree handle - /// - /// - Parameters: - /// - tree_handle: Handle to the tree resource to be restored. - /// - tree_config: Serialied proto string of the boosted_trees.Tree proto. - @inlinable @inline(__always) - public static func tensorForestTreeDeserialize( - treeHandle: ResourceHandle, - treeConfig: StringTensor - ) { - _RawTFEager.tensorForestTreeDeserialize(treeHandle: treeHandle, treeConfig: treeConfig) - } - - /// Checks whether a tree has been initialized. - /// - /// - Parameter tree_handle: Handle to the tree. - /// - /// - Output is_initialized: Whether the tree is initialized. - @inlinable @inline(__always) - public static func tensorForestTreeIsInitializedOp( - treeHandle: ResourceHandle - ) -> Tensor { - _RawTFEager.tensorForestTreeIsInitializedOp(treeHandle: treeHandle) - } - - /// Output the logits for the given input data - /// - /// - Parameters: - /// - tree_handle: Handle to the tree resource. - /// - dense_features: Rank 2 dense features tensor. - /// - /// - Attr logits_dimension: Scalar, dimension of the logits. - /// - /// - Output logits: The logits predictions from the tree for each instance in the batch. - @inlinable @inline(__always) - public static func tensorForestTreePredict( - treeHandle: ResourceHandle, - denseFeatures: Tensor, - logitsDimension: Int64 - ) -> Tensor { - switch denseFeatures.handle.backend { - case .XLA: - let output_device = denseFeatures.device - let denseFeatures = Tensor(copying: denseFeatures, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.tensorForestTreePredict( - treeHandle: treeHandle, denseFeatures: denseFeatures, logitsDimension: logitsDimension), - to: output_device) - case .TF_EAGER: - return _RawTFEager.tensorForestTreePredict( - treeHandle: treeHandle, denseFeatures: denseFeatures, logitsDimension: logitsDimension) - } - - } - - /// Creates a handle to a TensorForestTreeResource - @inlinable @inline(__always) - public static func tensorForestTreeResourceHandleOp( - container: String, - sharedName: String - ) -> ResourceHandle { - _RawTFEager.tensorForestTreeResourceHandleOp(container: container, sharedName: sharedName) - } - - /// Serializes the tree handle to a proto - /// - /// - Parameter tree_handle: Handle to the tree resource to be serialized. - /// - /// - Output tree_config: Serialied proto string of the tree resource. - @inlinable @inline(__always) - public static func tensorForestTreeSerialize( - treeHandle: ResourceHandle - ) -> StringTensor { - _RawTFEager.tensorForestTreeSerialize(treeHandle: treeHandle) - } - - /// Get the number of nodes in a tree - /// - /// - Parameter tree_handle: Handle to the tree resource. - /// - /// - Output tree_size: The size of the tree. - @inlinable @inline(__always) - public static func tensorForestTreeSize( - treeHandle: ResourceHandle - ) -> Tensor { - _RawTFEager.tensorForestTreeSize(treeHandle: treeHandle) - } - - /// Concats all tensors in the list along the 0th dimension. - /// - /// Requires that all tensors have the same shape except the first dimension. - /// - /// input_handle: The input list. - /// tensor: The concated result. - /// lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient. - /// - @inlinable @inline(__always) - public static func tensorListConcat( - inputHandle: VariantHandle, - elementShape: TensorShape? - ) -> (tensor: Tensor, lengths: Tensor) { - _RawTFEager.tensorListConcat(inputHandle: inputHandle, elementShape: elementShape) - } - - @inlinable @inline(__always) - public static func tensorListConcatLists( - inputA: VariantHandle, - inputB: VariantHandle, - elementDtype: TensorDataType - ) -> VariantHandle { - _RawTFEager.tensorListConcatLists(inputA: inputA, inputB: inputB, elementDtype: elementDtype) - } - - /// Concats all tensors in the list along the 0th dimension. - /// - /// Requires that all tensors have the same shape except the first dimension. - /// - /// input_handle: The input list. - /// element_shape: The shape of the uninitialized elements in the list. If the first - /// dimension is not -1, it is assumed that all list elements have the same - /// leading dim. - /// leading_dims: The list of leading dims of uninitialized list elements. Used if - /// the leading dim of input_handle.element_shape or the element_shape input arg - /// is not already set. - /// tensor: The concated result. - /// lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient. - /// - @inlinable @inline(__always) - public static func tensorListConcatV2< - ElementDtype: TensorFlowScalar, - ShapeType: TensorFlowIndex - >( - inputHandle: VariantHandle, - elementShape: Tensor, - leadingDims: Tensor - ) -> (tensor: Tensor, lengths: Tensor) { - _RawTFEager.tensorListConcatV2( - inputHandle: inputHandle, elementShape: elementShape, leadingDims: leadingDims) - } - - /// The shape of the elements of the given list, as a tensor. - /// - /// input_handle: the list - /// element_shape: the shape of elements of the list - @inlinable @inline(__always) - public static func tensorListElementShape( - inputHandle: VariantHandle - ) -> Tensor { - _RawTFEager.tensorListElementShape(inputHandle: inputHandle) - } - - /// Creates a TensorList which, when stacked, has the value of `tensor`. - /// - /// Each tensor in the result list corresponds to one row of the input tensor. - /// - /// tensor: The input tensor. - /// output_handle: The list. - @inlinable @inline(__always) - public static func tensorListFromTensor< - ElementDtype: TensorFlowScalar, - ShapeType: TensorFlowIndex - >( - _ tensor: Tensor, - elementShape: Tensor - ) -> VariantHandle { - _RawTFEager.tensorListFromTensor(tensor, elementShape: elementShape) - } - - /// Creates a Tensor by indexing into the TensorList. - /// - /// Each row in the produced Tensor corresponds to the element in the TensorList - /// specified by the given index (see `tf.gather`). - /// - /// input_handle: The input tensor list. - /// indices: The indices used to index into the list. - /// values: The tensor. - @inlinable @inline(__always) - public static func tensorListGather( - inputHandle: VariantHandle, - indices: Tensor, - elementShape: Tensor - ) -> Tensor { - switch commonBackend(indices.handle.backend, elementShape.handle.backend) { - case .XLA: - let output_device = elementShape.device - let indices = Tensor(copying: indices, to: .defaultTFEager) - let elementShape = Tensor(copying: elementShape, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.tensorListGather( - inputHandle: inputHandle, indices: indices, elementShape: elementShape), - to: output_device) - case .TF_EAGER: - return _RawTFEager.tensorListGather( - inputHandle: inputHandle, indices: indices, elementShape: elementShape) - } - - } - - @inlinable @inline(__always) - public static func tensorListGetItem( - inputHandle: VariantHandle, - index: Tensor, - elementShape: Tensor - ) -> Tensor { - switch commonBackend(index.handle.backend, elementShape.handle.backend) { - case .XLA: - let output_device = elementShape.device - let index = Tensor(copying: index, to: .defaultTFEager) - let elementShape = Tensor(copying: elementShape, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.tensorListGetItem( - inputHandle: inputHandle, index: index, elementShape: elementShape), to: output_device) - case .TF_EAGER: - return _RawTFEager.tensorListGetItem( - inputHandle: inputHandle, index: index, elementShape: elementShape) - } - - } - - /// Returns the number of tensors in the input tensor list. - /// - /// input_handle: the input list - /// length: the number of tensors in the list - @inlinable @inline(__always) - public static func tensorListLength( - inputHandle: VariantHandle - ) -> Tensor { - _RawTFEager.tensorListLength(inputHandle: inputHandle) - } - - /// Returns the last element of the input list as well as a list with all but that element. - /// - /// Fails if the list is empty. - /// - /// input_handle: the input list - /// tensor: the withdrawn last element of the list - /// element_dtype: the type of elements in the list - /// element_shape: the shape of the output tensor - @inlinable @inline(__always) - public static func tensorListPopBack( - inputHandle: VariantHandle, - elementShape: Tensor - ) -> (outputHandle: VariantHandle, tensor: Tensor) { - _RawTFEager.tensorListPopBack(inputHandle: inputHandle, elementShape: elementShape) - } - - /// Returns a list which has the passed-in `Tensor` as last element and the other elements of the given list in `input_handle`. - /// - /// tensor: The tensor to put on the list. - /// input_handle: The old list. - /// output_handle: A list with the elements of the old list followed by tensor. - /// element_dtype: the type of elements in the list. - /// element_shape: a shape compatible with that of elements in the list. - @inlinable @inline(__always) - public static func tensorListPushBack( - inputHandle: VariantHandle, - _ tensor: Tensor - ) -> VariantHandle { - _RawTFEager.tensorListPushBack(inputHandle: inputHandle, tensor) - } - - @inlinable @inline(__always) - public static func tensorListPushBackBatch( - inputHandles: VariantHandle, - _ tensor: Tensor - ) -> VariantHandle { - _RawTFEager.tensorListPushBackBatch(inputHandles: inputHandles, tensor) - } - - /// List of the given size with empty elements. - /// - /// element_shape: the shape of the future elements of the list - /// num_elements: the number of elements to reserve - /// handle: the output list - /// element_dtype: the desired type of elements in the list. - @inlinable @inline(__always) - public static func tensorListReserve( - elementShape: Tensor, - numElements: Tensor, - elementDtype: TensorDataType - ) -> VariantHandle { - _RawTFEager.tensorListReserve( - elementShape: elementShape, numElements: numElements, elementDtype: elementDtype) - } - - /// Resizes the list. - /// - /// - /// input_handle: the input list - /// size: size of the output list - /// - @inlinable @inline(__always) - public static func tensorListResize( - inputHandle: VariantHandle, - size: Tensor - ) -> VariantHandle { - _RawTFEager.tensorListResize(inputHandle: inputHandle, size: size) - } - - /// Creates a TensorList by indexing into a Tensor. - /// - /// Each member of the TensorList corresponds to one row of the input tensor, - /// specified by the given index (see `tf.gather`). - /// - /// tensor: The input tensor. - /// indices: The indices used to index into the list. - /// element_shape: The shape of the elements in the list (can be less specified than - /// the shape of the tensor). - /// output_handle: The TensorList. - @inlinable @inline(__always) - public static func tensorListScatter< - ElementDtype: TensorFlowScalar, - ShapeType: TensorFlowIndex - >( - _ tensor: Tensor, - indices: Tensor, - elementShape: Tensor - ) -> VariantHandle { - _RawTFEager.tensorListScatter(tensor, indices: indices, elementShape: elementShape) - } - - /// Scatters tensor at indices in an input list. - /// - /// Each member of the TensorList corresponds to one row of the input tensor, - /// specified by the given index (see `tf.gather`). - /// - /// input_handle: The list to scatter into. - /// tensor: The input tensor. - /// indices: The indices used to index into the list. - /// output_handle: The TensorList. - @inlinable @inline(__always) - public static func tensorListScatterIntoExistingList( - inputHandle: VariantHandle, - _ tensor: Tensor, - indices: Tensor - ) -> VariantHandle { - _RawTFEager.tensorListScatterIntoExistingList( - inputHandle: inputHandle, tensor, indices: indices) - } - - /// Creates a TensorList by indexing into a Tensor. - /// - /// Each member of the TensorList corresponds to one row of the input tensor, - /// specified by the given index (see `tf.gather`). - /// - /// tensor: The input tensor. - /// indices: The indices used to index into the list. - /// element_shape: The shape of the elements in the list (can be less specified than - /// the shape of the tensor). - /// num_elements: The size of the output list. Must be large enough to accommodate - /// the largest index in indices. If -1, the list is just large enough to include - /// the largest index in indices. - /// output_handle: The TensorList. - @inlinable @inline(__always) - public static func tensorListScatterV2< - ElementDtype: TensorFlowScalar, - ShapeType: TensorFlowIndex - >( - _ tensor: Tensor, - indices: Tensor, - elementShape: Tensor, - numElements: Tensor - ) -> VariantHandle { - _RawTFEager.tensorListScatterV2( - tensor, indices: indices, elementShape: elementShape, numElements: numElements) - } - - @inlinable @inline(__always) - public static func tensorListSetItem( - inputHandle: VariantHandle, - index: Tensor, - item: Tensor - ) -> VariantHandle { - _RawTFEager.tensorListSetItem(inputHandle: inputHandle, index: index, item: item) - } - - /// Splits a tensor into a list. - /// - /// list[i] corresponds to lengths[i] tensors from the input tensor. - /// The tensor must have rank at least 1 and contain exactly sum(lengths) elements. - /// - /// tensor: The input tensor. - /// element_shape: A shape compatible with that of elements in the tensor. - /// lengths: Vector of sizes of the 0th dimension of tensors in the list. - /// output_handle: The list. - @inlinable @inline(__always) - public static func tensorListSplit< - ElementDtype: TensorFlowScalar, - ShapeType: TensorFlowIndex - >( - _ tensor: Tensor, - elementShape: Tensor, - lengths: Tensor - ) -> VariantHandle { - _RawTFEager.tensorListSplit(tensor, elementShape: elementShape, lengths: lengths) - } - - /// Stacks all tensors in the list. - /// - /// Requires that all tensors have the same shape. - /// - /// input_handle: the input list - /// tensor: the gathered result - /// num_elements: optional. If not -1, the number of elements in the list. - /// - @inlinable @inline(__always) - public static func tensorListStack( - inputHandle: VariantHandle, - elementShape: Tensor, - numElements: Int64 = -1 - ) -> Tensor { - switch elementShape.handle.backend { - case .XLA: - let output_device = elementShape.device - let elementShape = Tensor(copying: elementShape, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.tensorListStack( - inputHandle: inputHandle, elementShape: elementShape, numElements: numElements), - to: output_device) - case .TF_EAGER: - return _RawTFEager.tensorListStack( - inputHandle: inputHandle, elementShape: elementShape, numElements: numElements) - } - - } - - /// Adds sparse `updates` to an existing tensor according to `indices`. - /// - /// This operation creates a new tensor by adding sparse `updates` to the passed - /// in `tensor`. - /// This operation is very similar to `tf.scatter_nd_add`, except that the updates - /// are added onto an existing tensor (as opposed to a variable). If the memory - /// for the existing tensor cannot be re-used, a copy is made and updated. - /// - /// `indices` is an integer tensor containing indices into a new tensor of shape - /// `shape`. The last dimension of `indices` can be at most the rank of `shape`: - /// - /// indices.shape[-1] <= shape.rank - /// - /// The last dimension of `indices` corresponds to indices into elements - /// (if `indices.shape[-1] = shape.rank`) or slices - /// (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of - /// `shape`. `updates` is a tensor with shape - /// - /// indices.shape[:-1] + shape[indices.shape[-1]:] - /// - /// The simplest form of tensor_scatter_add is to add individual elements to a - /// tensor by index. For example, say we want to add 4 elements in a rank-1 - /// tensor with 8 elements. - /// - /// In Python, this scatter add operation would look like this: - /// - /// ```python - /// indices = tf.constant([[4], [3], [1], [7]]) - /// updates = tf.constant([9, 10, 11, 12]) - /// tensor = tf.ones([8], dtype=tf.int32) - /// updated = tf.tensor_scatter_nd_add(tensor, indices, updates) - /// print(updated) - /// ``` - /// - /// The resulting tensor would look like this: - /// - /// [1, 12, 1, 11, 10, 1, 1, 13] - /// - /// We can also, insert entire slices of a higher rank tensor all at once. For - /// example, if we wanted to insert two slices in the first dimension of a - /// rank-3 tensor with two matrices of new values. - /// - /// In Python, this scatter add operation would look like this: - /// - /// ```python - /// indices = tf.constant([[0], [2]]) - /// updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], - /// [7, 7, 7, 7], [8, 8, 8, 8]], - /// [[5, 5, 5, 5], [6, 6, 6, 6], - /// [7, 7, 7, 7], [8, 8, 8, 8]]]) - /// tensor = tf.ones([4, 4, 4],dtype=tf.int32) - /// updated = tf.tensor_scatter_nd_add(tensor, indices, updates) - /// print(updated) - /// ``` - /// - /// The resulting tensor would look like this: - /// - /// [[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], - /// [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], - /// [[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], - /// [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] - /// - /// Note that on CPU, if an out of bound index is found, an error is returned. - /// On GPU, if an out of bound index is found, the index is ignored. - /// - /// - Parameters: - /// - tensor: Tensor to copy/update. - /// - indices: Index tensor. - /// - updates: Updates to scatter into output. - /// - /// - Output output: A new tensor copied from tensor and updates added according to the indices. - @inlinable @inline(__always) - public static func tensorScatterAdd< - T: TensorFlowScalar, - Tindices: TensorFlowIndex - >( - _ tensor: Tensor, - indices: Tensor, - updates: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(tensor.handle.backend, indices.handle.backend), updates.handle.backend) - { - case .XLA: - let output_device = updates.device - let tensor = Tensor(copying: tensor, to: .defaultTFEager) - let indices = Tensor(copying: indices, to: .defaultTFEager) - let updates = Tensor(copying: updates, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.tensorScatterAdd(tensor, indices: indices, updates: updates), - to: output_device) - case .TF_EAGER: - return _RawTFEager.tensorScatterAdd(tensor, indices: indices, updates: updates) - } - - } - - /// Subtracts sparse `updates` from an existing tensor according to `indices`. - /// - /// This operation creates a new tensor by subtracting sparse `updates` from the - /// passed in `tensor`. - /// This operation is very similar to `tf.scatter_nd_sub`, except that the updates - /// are subtracted from an existing tensor (as opposed to a variable). If the memory - /// for the existing tensor cannot be re-used, a copy is made and updated. - /// - /// `indices` is an integer tensor containing indices into a new tensor of shape - /// `shape`. The last dimension of `indices` can be at most the rank of `shape`: - /// - /// indices.shape[-1] <= shape.rank - /// - /// The last dimension of `indices` corresponds to indices into elements - /// (if `indices.shape[-1] = shape.rank`) or slices - /// (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of - /// `shape`. `updates` is a tensor with shape - /// - /// indices.shape[:-1] + shape[indices.shape[-1]:] - /// - /// The simplest form of tensor_scatter_sub is to subtract individual elements - /// from a tensor by index. For example, say we want to insert 4 scattered elements - /// in a rank-1 tensor with 8 elements. - /// - /// In Python, this scatter subtract operation would look like this: - /// - /// ```python - /// indices = tf.constant([[4], [3], [1], [7]]) - /// updates = tf.constant([9, 10, 11, 12]) - /// tensor = tf.ones([8], dtype=tf.int32) - /// updated = tf.tensor_scatter_nd_sub(tensor, indices, updates) - /// print(updated) - /// ``` - /// - /// The resulting tensor would look like this: - /// - /// [1, -10, 1, -9, -8, 1, 1, -11] - /// - /// We can also, insert entire slices of a higher rank tensor all at once. For - /// example, if we wanted to insert two slices in the first dimension of a - /// rank-3 tensor with two matrices of new values. - /// - /// In Python, this scatter add operation would look like this: - /// - /// ```python - /// indices = tf.constant([[0], [2]]) - /// updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], - /// [7, 7, 7, 7], [8, 8, 8, 8]], - /// [[5, 5, 5, 5], [6, 6, 6, 6], - /// [7, 7, 7, 7], [8, 8, 8, 8]]]) - /// tensor = tf.ones([4, 4, 4],dtype=tf.int32) - /// updated = tf.tensor_scatter_nd_sub(tensor, indices, updates) - /// print(updated) - /// ``` - /// - /// The resulting tensor would look like this: - /// - /// [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]], - /// [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], - /// [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]], - /// [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] - /// - /// Note that on CPU, if an out of bound index is found, an error is returned. - /// On GPU, if an out of bound index is found, the index is ignored. - /// - /// - Parameters: - /// - tensor: Tensor to copy/update. - /// - indices: Index tensor. - /// - updates: Updates to scatter into output. - /// - /// - Output output: A new tensor copied from tensor and updates subtracted according to the indices. - @inlinable @inline(__always) - public static func tensorScatterSub< - T: TensorFlowScalar, - Tindices: TensorFlowIndex - >( - _ tensor: Tensor, - indices: Tensor, - updates: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(tensor.handle.backend, indices.handle.backend), updates.handle.backend) - { - case .XLA: - let output_device = updates.device - let tensor = Tensor(copying: tensor, to: .defaultTFEager) - let indices = Tensor(copying: indices, to: .defaultTFEager) - let updates = Tensor(copying: updates, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.tensorScatterSub(tensor, indices: indices, updates: updates), - to: output_device) - case .TF_EAGER: - return _RawTFEager.tensorScatterSub(tensor, indices: indices, updates: updates) - } - - } - - /// Scatter `updates` into an existing tensor according to `indices`. - /// - /// This operation creates a new tensor by applying sparse `updates` to the passed - /// in `tensor`. - /// This operation is very similar to `tf.scatter_nd`, except that the updates are - /// scattered onto an existing tensor (as opposed to a zero-tensor). If the memory - /// for the existing tensor cannot be re-used, a copy is made and updated. - /// - /// If `indices` contains duplicates, then their updates are accumulated (summed). - /// - /// **WARNING**: The order in which updates are applied is nondeterministic, so the - /// output will be nondeterministic if `indices` contains duplicates -- because - /// of some numerical approximation issues, numbers summed in different order - /// may yield different results. - /// - /// `indices` is an integer tensor containing indices into a new tensor of shape - /// `shape`. The last dimension of `indices` can be at most the rank of `shape`: - /// - /// indices.shape[-1] <= shape.rank - /// - /// The last dimension of `indices` corresponds to indices into elements - /// (if `indices.shape[-1] = shape.rank`) or slices - /// (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of - /// `shape`. `updates` is a tensor with shape - /// - /// indices.shape[:-1] + shape[indices.shape[-1]:] - /// - /// The simplest form of scatter is to insert individual elements in a tensor by - /// index. For example, say we want to insert 4 scattered elements in a rank-1 - /// tensor with 8 elements. - /// - ///
- /// - ///
- /// - /// In Python, this scatter operation would look like this: - /// - /// >>> indices = tf.constant([[4], [3], [1], [7]]) - /// >>> updates = tf.constant([9, 10, 11, 12]) - /// >>> tensor = tf.ones([8], dtype=tf.int32) - /// >>> print(tf.tensor_scatter_nd_update(tensor, indices, updates)) - /// tf.Tensor([ 1 11 1 10 9 1 1 12], shape=(8,), dtype=int32) - /// - /// We can also, insert entire slices of a higher rank tensor all at once. For - /// example, if we wanted to insert two slices in the first dimension of a - /// rank-3 tensor with two matrices of new values. - /// - /// In Python, this scatter operation would look like this: - /// - /// >>> indices = tf.constant([[0], [2]]) - /// >>> updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], - /// ... [7, 7, 7, 7], [8, 8, 8, 8]], - /// ... [[5, 5, 5, 5], [6, 6, 6, 6], - /// ... [7, 7, 7, 7], [8, 8, 8, 8]]]) - /// >>> tensor = tf.ones([4, 4, 4], dtype=tf.int32) - /// >>> print(tf.tensor_scatter_nd_update(tensor, indices, updates).numpy()) - /// [[[5 5 5 5] - /// [6 6 6 6] - /// [7 7 7 7] - /// [8 8 8 8]] - /// [[1 1 1 1] - /// [1 1 1 1] - /// [1 1 1 1] - /// [1 1 1 1]] - /// [[5 5 5 5] - /// [6 6 6 6] - /// [7 7 7 7] - /// [8 8 8 8]] - /// [[1 1 1 1] - /// [1 1 1 1] - /// [1 1 1 1] - /// [1 1 1 1]]] - /// - /// Note that on CPU, if an out of bound index is found, an error is returned. - /// On GPU, if an out of bound index is found, the index is ignored. - /// - /// - Parameters: - /// - tensor: Tensor to copy/update. - /// - indices: Index tensor. - /// - updates: Updates to scatter into output. - /// - /// - Output output: A new tensor with the given shape and updates applied according - /// to the indices. - @inlinable @inline(__always) - public static func tensorScatterUpdate< - T: TensorFlowScalar, - Tindices: TensorFlowIndex - >( - _ tensor: Tensor, - indices: Tensor, - updates: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(tensor.handle.backend, indices.handle.backend), updates.handle.backend) - { - case .XLA: - let output_device = updates.device - let tensor = Tensor(copying: tensor, to: .defaultTFEager) - let indices = Tensor(copying: indices, to: .defaultTFEager) - let updates = Tensor(copying: updates, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.tensorScatterUpdate(tensor, indices: indices, updates: updates), - to: output_device) - case .TF_EAGER: - return _RawTFEager.tensorScatterUpdate(tensor, indices: indices, updates: updates) - } - - } - - /// Creates a dataset that emits each dim-0 slice of `components` once. - @inlinable @inline(__always) - public static func tensorSliceDataset( - components: ToutputTypes, - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.tensorSliceDataset(components: components, outputShapes: outputShapes) - } - - /// Assign `value` to the sliced l-value reference of `input`. - /// - /// The values of `value` are assigned to the positions in the tensor `input` that - /// are selected by the slice parameters. The slice parameters `begin` `end` - /// `strides` etc. work exactly as in `StridedSlice`. - /// - /// NOTE this op currently does not support broadcasting and so `value`'s shape - /// must be exactly the shape produced by the slice of `input`. - @inlinable @inline(__always) - public static func tensorStridedSliceUpdate< - T: TensorFlowScalar, - Index: TensorFlowIndex - >( - _ input: Tensor, - begin: Tensor, - end: Tensor, - strides: Tensor, - value: Tensor, - beginMask: Int64 = 0, - endMask: Int64 = 0, - ellipsisMask: Int64 = 0, - newAxisMask: Int64 = 0, - shrinkAxisMask: Int64 = 0 - ) -> Tensor { - switch commonBackend( + } + + /// Replica ID. + @inlinable @inline(__always) + public static func xlaReplicaId() -> Tensor { + _RawTFEager.xlaReplicaId() + } + + /// Wraps the XLA SelectAndScatter operator, documented at + /// + /// https://www.tensorflow.org/performance/xla/operation_semantics#selectandscatter + /// . + /// + /// - Parameters: + /// - operand: the input tensor + /// - window_dimensions: the shape of the window + /// - window_strides: the inter-window strides + /// - padding: the padding to apply at the start and end of each input dimensions + /// - source: a tensor of values to scatter + /// - init_value: a scalar representing the initial value for the output tensor + /// + /// - Attrs: + /// - select: a selection function to apply + /// - scatter: a scatter function to apply + @inlinable @inline(__always) + public static func xlaSelectAndScatter< + T: TensorFlowNumeric, + Tindices: TensorFlowIndex, + SelectIn: TensorGroup, + SelectOut: TensorGroup, + ScatterIn: TensorGroup, + ScatterOut: TensorGroup + >( + operand: Tensor, + windowDimensions: Tensor, + windowStrides: Tensor, + padding: Tensor, + source: Tensor, + initValue: Tensor, + select: (SelectIn) -> SelectOut, + scatter: (ScatterIn) -> ScatterOut + ) -> Tensor { + switch commonBackend( + commonBackend( commonBackend( commonBackend( - commonBackend(input.handle.backend, begin.handle.backend), end.handle.backend), - strides.handle.backend), value.handle.backend) - { - case .XLA: - return _RawXLA.tensorStridedSliceUpdate( - input, begin: begin, end: end, strides: strides, value: value, beginMask: beginMask, - endMask: endMask, ellipsisMask: ellipsisMask, newAxisMask: newAxisMask, - shrinkAxisMask: shrinkAxisMask) - case .TF_EAGER: - return _RawTFEager.tensorStridedSliceUpdate( - input, begin: begin, end: end, strides: strides, value: value, beginMask: beginMask, - endMask: endMask, ellipsisMask: ellipsisMask, newAxisMask: newAxisMask, - shrinkAxisMask: shrinkAxisMask) - } - - } - - /// Outputs a `Summary` protocol buffer with a tensor. - /// - /// This op is being phased out in favor of TensorSummaryV2, which lets callers pass - /// a tag as well as a serialized SummaryMetadata proto string that contains - /// plugin-specific data. We will keep this op to maintain backwards compatibility. - /// - /// - Parameter tensor: A tensor to serialize. - /// - /// - Attrs: - /// - description: A json-encoded SummaryDescription proto. - /// - labels: An unused list of strings. - /// - display_name: An unused string. - @inlinable @inline(__always) - public static func tensorSummary( - _ tensor: Tensor, - description: String, - labels: [String], - displayName: String - ) -> StringTensor { - _RawTFEager.tensorSummary( - tensor, description: description, labels: labels, displayName: displayName) - } - - /// Outputs a `Summary` protocol buffer with a tensor and per-plugin data. - /// - /// - Parameters: - /// - tag: A string attached to this summary. Used for organization in TensorBoard. - /// - tensor: A tensor to serialize. - /// - serialized_summary_metadata: A serialized SummaryMetadata proto. Contains plugin - /// data. - @inlinable @inline(__always) - public static func tensorSummaryV2( - tag: StringTensor, - _ tensor: Tensor, - serializedSummaryMetadata: StringTensor - ) -> StringTensor { - _RawTFEager.tensorSummaryV2( - tag: tag, tensor, serializedSummaryMetadata: serializedSummaryMetadata) - } - - @inlinable @inline(__always) - public static func testAttr() -> Tensor { - _RawTFEager.testAttr() - } - - @inlinable @inline(__always) - public static func testStringOutput( - _ input: Tensor - ) -> (output1: Tensor, output2: StringTensor) { - _RawTFEager.testStringOutput(input) - } - - /// Creates a dataset that emits the lines of one or more text files. - /// - /// - Parameters: - /// - filenames: A scalar or a vector containing the name(s) of the file(s) to be - /// read. - /// - compression_type: A scalar containing either (i) the empty string (no - /// compression), (ii) "ZLIB", or (iii) "GZIP". - /// - buffer_size: A scalar containing the number of bytes to buffer. - @inlinable @inline(__always) - public static func textLineDataset( - filenames: StringTensor, - compressionType: StringTensor, - bufferSize: Tensor - ) -> VariantHandle { - _RawTFEager.textLineDataset( - filenames: filenames, compressionType: compressionType, bufferSize: bufferSize) - } - - /// A Reader that outputs the lines of a file delimited by '\n'. - /// - /// - Attrs: - /// - skip_header_lines: Number of lines to skip from the beginning of every file. - /// - container: If non-empty, this reader is placed in the given container. - /// Otherwise, a default container is used. - /// - shared_name: If non-empty, this reader is named in the given bucket - /// with this shared_name. Otherwise, the node name is used instead. - /// - /// - Output reader_handle: The handle to reference the Reader. - @inlinable @inline(__always) - public static func textLineReaderV2( - skipHeaderLines: Int64 = 0, - container: String, - sharedName: String - ) -> ResourceHandle { - _RawTFEager.textLineReaderV2( - skipHeaderLines: skipHeaderLines, container: container, sharedName: sharedName) - } - - /// Creates a dataset that uses a custom thread pool to compute `input_dataset`. - /// - /// - Parameter thread_pool: A resource produced by the ThreadPoolHandle op. - @inlinable @inline(__always) - public static func threadPoolDataset( - inputDataset: VariantHandle, - threadPool: ResourceHandle, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.threadPoolDataset( - inputDataset: inputDataset, threadPool: threadPool, outputTypes: outputTypes, - outputShapes: outputShapes) - } - - /// Creates a dataset that uses a custom thread pool to compute `input_dataset`. - /// - /// - Attrs: - /// - num_threads: The number of threads in the thread pool. - /// - max_intra_op_parallelism: The maximum degree of parallelism to use within operations that execute on this - /// threadpool. - /// - display_name: A human-readable name for the threads that may be visible in some - /// visualizations. - /// threadpool. - /// - /// - Output handle: A resource that can be consumed by one or more ExperimentalThreadPoolDataset - /// ops. - @inlinable @inline(__always) - public static func threadPoolHandle( - numThreads: Int64, - maxIntraOpParallelism: Int64 = 1, - displayName: String, - container: String, - sharedName: String - ) -> ResourceHandle { - _RawTFEager.threadPoolHandle( - numThreads: numThreads, maxIntraOpParallelism: maxIntraOpParallelism, - displayName: displayName, container: container, sharedName: sharedName) - } - - /// Generates labels for candidate sampling with a learned unigram distribution. - /// - /// See explanations of candidate sampling and the data formats at - /// go/candidate-sampling. - /// - /// For each batch, this op picks a single set of sampled candidate labels. - /// - /// The advantages of sampling candidates per-batch are simplicity and the - /// possibility of efficient dense matrix multiplication. The disadvantage is that - /// the sampled candidates must be chosen independently of the context and of the - /// true labels. - /// - /// - Parameter true_classes: A batch_size * num_true matrix, in which each row contains the - /// IDs of the num_true target_classes in the corresponding original label. - /// - /// - Attrs: - /// - num_true: Number of true labels per context. - /// - num_sampled: Number of candidates to randomly sample. - /// - unique: If unique is true, we sample with rejection, so that all sampled - /// candidates in a batch are unique. This requires some approximation to - /// estimate the post-rejection sampling probabilities. - /// - range_max: The sampler will sample integers from the interval [0, range_max). - /// - seed: If either seed or seed2 are set to be non-zero, the random number - /// generator is seeded by the given seed. Otherwise, it is seeded by a - /// random seed. - /// - seed2: An second seed to avoid seed collision. - /// - /// - Outputs: - /// - sampled_candidates: A vector of length num_sampled, in which each element is - /// the ID of a sampled candidate. - /// - true_expected_count: A batch_size * num_true matrix, representing - /// the number of times each candidate is expected to occur in a batch - /// of sampled candidates. If unique=true, then this is a probability. - /// - sampled_expected_count: A vector of length num_sampled, for each sampled - /// candidate representing the number of times the candidate is expected - /// to occur in a batch of sampled candidates. If unique=true, then this is a - /// probability. - @inlinable @inline(__always) - public static func threadUnsafeUnigramCandidateSampler( - trueClasses: Tensor, - numTrue: Int64, - numSampled: Int64, - unique: Bool, - rangeMax: Int64, - seed: Int64 = 0, - seed2: Int64 = 0 - ) -> ( - sampledCandidates: Tensor, trueExpectedCount: Tensor, - sampledExpectedCount: Tensor - ) { - _RawTFEager.threadUnsafeUnigramCandidateSampler( - trueClasses: trueClasses, numTrue: numTrue, numSampled: numSampled, unique: unique, - rangeMax: rangeMax, seed: seed, seed2: seed2) - } - - /// Constructs a tensor by tiling a given tensor. - /// - /// This operation creates a new tensor by replicating `input` `multiples` times. - /// The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements, - /// and the values of `input` are replicated `multiples[i]` times along the 'i'th - /// dimension. For example, tiling `[a b c d]` by `[2]` produces - /// `[a b c d a b c d]`. - /// - /// >>> a = tf.constant([[1,2,3],[4,5,6]], tf.int32) - /// >>> b = tf.constant([1,2], tf.int32) - /// >>> tf.tile(a, b) - /// - /// >>> c = tf.constant([2,1], tf.int32) - /// >>> tf.tile(a, c) - /// - /// >>> d = tf.constant([2,2], tf.int32) - /// >>> tf.tile(a, d) - /// - /// - /// - Parameters: - /// - input: 1-D or higher. - /// - multiples: 1-D. Length must be the same as the number of dimensions in `input` - @inlinable @inline(__always) - public static func tile< - T: TensorFlowScalar, - Tmultiples: TensorFlowIndex - >( - _ input: Tensor, - multiples: Tensor - ) -> Tensor { - switch commonBackend(input.handle.backend, multiples.handle.backend) { - case .XLA: - return _RawXLA.tile(input, multiples: multiples) - case .TF_EAGER: - return _RawTFEager.tile(input, multiples: multiples) - } - - } - - /// Returns the gradient of `Tile`. - /// - /// Since `Tile` takes an input and repeats the input `multiples` times - /// along each dimension, `TileGrad` takes in `multiples` and aggregates - /// each repeated tile of `input` into `output`. - @inlinable @inline(__always) - public static func tileGrad( - _ input: Tensor, - multiples: Tensor - ) -> Tensor { - switch commonBackend(input.handle.backend, multiples.handle.backend) { - case .XLA: - let output_device = multiples.device - let input = Tensor(copying: input, to: .defaultTFEager) - let multiples = Tensor(copying: multiples, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.tileGrad(input, multiples: multiples), to: output_device) - case .TF_EAGER: - return _RawTFEager.tileGrad(input, multiples: multiples) - } - - } - - /// Provides the time since epoch in seconds. - /// - /// Returns the timestamp as a `float64` for seconds since the Unix epoch. - /// - /// Note: the timestamp is computed when the op is executed, not when it is added - /// to the graph. - @inlinable @inline(__always) - public static func timestamp() -> Tensor { - _RawTFEager.timestamp() - } - - /// Finds values and indices of the `k` largest elements for the last dimension. - /// - /// If the input is a vector (rank-1), finds the `k` largest entries in the vector - /// and outputs their values and indices as vectors. Thus `values[j]` is the - /// `j`-th largest entry in `input`, and its index is `indices[j]`. - /// - /// For matrices (resp. higher rank input), computes the top `k` entries in each - /// row (resp. vector along the last dimension). Thus, - /// - /// values.shape = indices.shape = input.shape[:-1] + [k] - /// - /// If two elements are equal, the lower-index element appears first. - /// - /// If `k` varies dynamically, use `TopKV2` below. - /// - /// - Parameter input: 1-D or higher with last dimension at least `k`. - /// - /// - Attrs: - /// - k: Number of top elements to look for along the last dimension (along each - /// row for matrices). - /// - sorted: If true the resulting `k` elements will be sorted by the values in - /// descending order. - /// - /// - Outputs: - /// - values: The `k` largest elements along each last dimensional slice. - /// - indices: The indices of `values` within the last dimension of `input`. - @inlinable @inline(__always) - public static func topK( - _ input: Tensor, - k: Int64, - sorted: Bool = true - ) -> (values: Tensor, indices: Tensor) { - _RawTFEager.topK(input, k: k, sorted: sorted) - } - - /// Finds values and indices of the `k` largest elements for the last dimension. - /// - /// If the input is a vector (rank-1), finds the `k` largest entries in the vector - /// and outputs their values and indices as vectors. Thus `values[j]` is the - /// `j`-th largest entry in `input`, and its index is `indices[j]`. - /// - /// For matrices (resp. higher rank input), computes the top `k` entries in each - /// row (resp. vector along the last dimension). Thus, - /// - /// values.shape = indices.shape = input.shape[:-1] + [k] - /// - /// If two elements are equal, the lower-index element appears first. - /// - /// - Parameters: - /// - input: 1-D or higher with last dimension at least `k`. - /// - k: 0-D. Number of top elements to look for along the last dimension (along each - /// row for matrices). - /// - /// - Attr sorted: If true the resulting `k` elements will be sorted by the values in - /// descending order. - /// - /// - Outputs: - /// - values: The `k` largest elements along each last dimensional slice. - /// - indices: The indices of `values` within the last dimension of `input`. - @inlinable @inline(__always) - public static func topKV2( - _ input: Tensor, - k: Tensor, - sorted: Bool = true - ) -> (values: Tensor, indices: Tensor) { - _RawTFEager.topKV2(input, k: k, sorted: sorted) - } - - /// Shuffle dimensions of x according to a permutation. - /// - /// The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy: - /// `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]` - @inlinable @inline(__always) - public static func transpose< - T: TensorFlowScalar, - Tperm: TensorFlowIndex - >( - _ x: Tensor, - perm: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, perm.handle.backend) { - case .XLA: - return _RawXLA.transpose(x, perm: perm) - case .TF_EAGER: - return _RawTFEager.transpose(x, perm: perm) - } - - } - - /// Calculate product with tridiagonal matrix. - /// - /// Calculates product of two matrices, where left matrix is a tridiagonal matrix. - /// - /// - Parameters: - /// - superdiag: Tensor of shape `[..., 1, M]`, representing superdiagonals of - /// tri-diagonal matrices to the left of multiplication. Last element is ignored. - /// - maindiag: Tensor of shape `[..., 1, M]`, representing main diagonals of tri-diagonal - /// matrices to the left of multiplication. - /// - subdiag: Tensor of shape `[..., 1, M]`, representing subdiagonals of tri-diagonal - /// matrices to the left of multiplication. First element is ignored. - /// - rhs: Tensor of shape `[..., M, N]`, representing MxN matrices to the right of - /// multiplication. - /// - /// - Output output: Tensor of shape `[..., M, N]` containing the product. - @inlinable @inline(__always) - public static func tridiagonalMatMul( - superdiag: Tensor, - maindiag: Tensor, - subdiag: Tensor, - rhs: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(superdiag.handle.backend, maindiag.handle.backend), subdiag.handle.backend), - rhs.handle.backend) - { - case .XLA: - let output_device = rhs.device - let superdiag = Tensor(copying: superdiag, to: .defaultTFEager) - let maindiag = Tensor(copying: maindiag, to: .defaultTFEager) - let subdiag = Tensor(copying: subdiag, to: .defaultTFEager) - let rhs = Tensor(copying: rhs, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.tridiagonalMatMul( - superdiag: superdiag, maindiag: maindiag, subdiag: subdiag, rhs: rhs), to: output_device - ) - case .TF_EAGER: - return _RawTFEager.tridiagonalMatMul( - superdiag: superdiag, maindiag: maindiag, subdiag: subdiag, rhs: rhs) - } - - } - - /// Solves tridiagonal systems of equations. - /// - /// Solves tridiagonal systems of equations. - /// Supports batch dimensions and multiple right-hand sides per each left-hand - /// side. - /// On CPU, solution is computed via Gaussian elimination with or without partial - /// pivoting, depending on `partial_pivoting` attribute. On GPU, Nvidia's cuSPARSE - /// library is used: https://docs.nvidia.com/cuda/cusparse/index.html#gtsv - /// - /// - Parameters: - /// - diagonals: Tensor of shape `[..., 3, M]` whose innermost 2 dimensions represent the - /// tridiagonal matrices with three rows being the superdiagonal, diagonals, and - /// subdiagonals, in order. The last element of the superdiagonal and the first - /// element of the subdiagonal is ignored. - /// - rhs: Tensor of shape `[..., M, K]`, representing K right-hand sides per each - /// left-hand side. - /// - /// - Attr partial_pivoting: Whether to apply partial pivoting. Partial pivoting makes the procedure more - /// stable, but slower. - /// - /// - Output output: Tensor of shape `[..., M, K]` containing the solutions - @inlinable @inline(__always) - public static func tridiagonalSolve( - diagonals: Tensor, - rhs: Tensor, - partialPivoting: Bool = true - ) -> Tensor { - switch commonBackend(diagonals.handle.backend, rhs.handle.backend) { - case .XLA: - let output_device = rhs.device - let diagonals = Tensor(copying: diagonals, to: .defaultTFEager) - let rhs = Tensor(copying: rhs, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.tridiagonalSolve( - diagonals: diagonals, rhs: rhs, partialPivoting: partialPivoting), to: output_device) - case .TF_EAGER: - return _RawTFEager.tridiagonalSolve( - diagonals: diagonals, rhs: rhs, partialPivoting: partialPivoting) - } - - } - - /// Returns x / y element-wise for integer types. - /// - /// Truncation designates that negative numbers will round fractional quantities - /// toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different - /// than Python semantics. See `FloorDiv` for a division function that matches - /// Python Semantics. - /// - /// *NOTE*: `TruncateDiv` supports broadcasting. More about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - @inlinable @inline(__always) - public static func truncateDiv( - _ x: Tensor, - _ y: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - let output_device = y.device - let x = Tensor(copying: x, to: .defaultTFEager) - let y = Tensor(copying: y, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.truncateDiv(x, y), to: output_device) - case .TF_EAGER: - return _RawTFEager.truncateDiv(x, y) - } - - } - - /// Returns element-wise remainder of division. This emulates C semantics in that - /// - /// the result here is consistent with a truncating divide. E.g. `truncate(x / y) * - /// y + truncate_mod(x, y) = x`. - /// - /// *NOTE*: `TruncateMod` supports broadcasting. More about broadcasting - /// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - @inlinable @inline(__always) - public static func truncateMod( - _ x: Tensor, - _ y: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - let output_device = y.device - let x = Tensor(copying: x, to: .defaultTFEager) - let y = Tensor(copying: y, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.truncateMod(x, y), to: output_device) - case .TF_EAGER: - return _RawTFEager.truncateMod(x, y) - } - - } - - /// Outputs random values from a truncated normal distribution. - /// - /// The generated values follow a normal distribution with mean 0 and standard - /// deviation 1, except that values whose magnitude is more than 2 standard - /// deviations from the mean are dropped and re-picked. - /// - /// - Parameter shape: The shape of the output tensor. - /// - /// - Attrs: - /// - seed: If either `seed` or `seed2` are set to be non-zero, the random number - /// generator is seeded by the given seed. Otherwise, it is seeded by a - /// random seed. - /// - seed2: A second seed to avoid seed collision. - /// - dtype: The type of the output. - /// - /// - Output output: A tensor of the specified shape filled with random truncated normal - /// values. - @inlinable @inline(__always) - public static func truncatedNormal< - Dtype: FloatingPoint & TensorFlowScalar, - T: TensorFlowIndex - >( - shape: Tensor, - seed: Int64 = 0, - seed2: Int64 = 0 - ) -> Tensor { - switch shape.handle.backend { - case .XLA: - let output_device = shape.device - let shape = Tensor(copying: shape, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.truncatedNormal(shape: shape, seed: seed, seed2: seed2), - to: output_device) - case .TF_EAGER: - return _RawTFEager.truncatedNormal(shape: shape, seed: seed, seed2: seed2) - } - - } - - /// Perform batches of RPC requests. - /// - /// This op asynchronously performs either a single RPC request, or a batch - /// of requests. RPC requests are defined by three main parameters: - /// - /// - `address` (the host+port or BNS address of the request) - /// - `method` (the method name for the request) - /// - `request` (the serialized proto string, or vector of strings, - /// of the RPC request argument). - /// - /// For example, if you have an RPC service running on port localhost:2345, - /// and its interface is configured with the following proto declaration: - /// - /// ``` - /// service MyService { - /// rpc MyMethod(MyRequestProto) returns (MyResponseProto) { - /// } - /// }; - /// ``` - /// - /// then call this op with arguments: - /// - /// ``` - /// address = "localhost:2345" - /// method = "MyService/MyMethod" - /// ``` - /// - /// The `request` tensor is a string tensor representing serialized `MyRequestProto` - /// strings; and the output string tensor `response` will have the same shape - /// and contain (upon successful completion) corresponding serialized - /// `MyResponseProto` strings. - /// - /// For example, to send a single, empty, `MyRequestProto`, call - /// this op with `request = ""`. To send 5 **parallel** empty requests, - /// call this op with `request = ["", "", "", "", ""]`. - /// - /// More generally, one can create a batch of `MyRequestProto` serialized protos - /// from regular batched tensors using the `encode_proto` op, and convert - /// the response `MyResponseProto` serialized protos to batched tensors - /// using the `decode_proto` op. - /// - /// **NOTE** Working with serialized proto strings is faster than instantiating - /// actual proto objects in memory, so no performance degradation is expected - /// compared to writing custom kernels for this workflow. - /// - /// Unlike the standard `Rpc` op, if the connection fails or the remote worker - /// returns an error status, this op does **not** reraise the exception. - /// Instead, the `status_code` and `status_message` entry for the corresponding RPC - /// call is set with the error returned from the RPC call. The `response` tensor - /// will contain valid response values for those minibatch entries whose RPCs did - /// not fail; the rest of the entries will have empty strings. - /// - /// - Parameters: - /// - address: `0-D` or `1-D`. The address (i.e. host_name:port) of the RPC server. - /// If this tensor has more than 1 element, then multiple parallel rpc requests - /// are sent. This argument broadcasts with `method` and `request`. - /// - method: `0-D` or `1-D`. The method address on the RPC server. - /// If this tensor has more than 1 element, then multiple parallel rpc requests - /// are sent. This argument broadcasts with `address` and `request`. - /// - request: `0-D` or `1-D`. Serialized proto strings: the rpc request argument. - /// If this tensor has more than 1 element, then multiple parallel rpc requests - /// are sent. This argument broadcasts with `address` and `method`. - /// - /// - Attrs: - /// - protocol: RPC protocol to use. Empty string means use the default protocol. - /// Options include 'grpc'. - /// - fail_fast: `boolean`. If `true` (default), then failures to connect - /// (i.e., the server does not immediately respond) cause an RPC failure. - /// - timeout_in_ms: `int`. If `0` (default), then the kernel will run the RPC - /// request and only time out if the RPC deadline passes or the session times out. - /// If this value is greater than `0`, then the op will raise an exception if - /// the RPC takes longer than `timeout_in_ms`. - /// - /// - Outputs: - /// - response: Same shape as `request`. Serialized proto strings: the rpc responses. - /// - status_code: Same shape as `request`. Values correspond to tensorflow Status enum codes. - /// - status_message: Same shape as `request`. Values correspond to Status messages - /// returned from the RPC calls. - @inlinable @inline(__always) - public static func tryRpc( - address: StringTensor, - method: StringTensor, - request: StringTensor, - protocol_: String, - failFast: Bool = true, - timeoutInMs: Int64 = 0 - ) -> (response: StringTensor, statusCode: Tensor, statusMessage: StringTensor) { - _RawTFEager.tryRpc( - address: address, method: method, request: request, protocol_: protocol_, - failFast: failFast, timeoutInMs: timeoutInMs) - } - - @inlinable @inline(__always) - public static func twoFloatInputs( - _ a: Tensor, - _ b: Tensor - ) { - _RawTFEager.twoFloatInputs(a, b) - } - - @inlinable @inline(__always) - public static func twoFloatInputsFloatOutput( - _ a: Tensor, - _ b: Tensor - ) -> Tensor { - switch commonBackend(a.handle.backend, b.handle.backend) { - case .XLA: - let output_device = b.device - let a = Tensor(copying: a, to: .defaultTFEager) - let b = Tensor(copying: b, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.twoFloatInputsFloatOutput(a, b), to: output_device) - case .TF_EAGER: - return _RawTFEager.twoFloatInputsFloatOutput(a, b) - } - - } - - @inlinable @inline(__always) - public static func twoFloatInputsIntOutput( - _ a: Tensor, - _ b: Tensor - ) -> Tensor { - switch commonBackend(a.handle.backend, b.handle.backend) { - case .XLA: - let output_device = b.device - let a = Tensor(copying: a, to: .defaultTFEager) - let b = Tensor(copying: b, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.twoFloatInputsIntOutput(a, b), to: output_device) - case .TF_EAGER: - return _RawTFEager.twoFloatInputsIntOutput(a, b) - } - - } - - @inlinable @inline(__always) - public static func twoFloatOutputs() -> (a: Tensor, b: Tensor) { - _RawTFEager.twoFloatOutputs() - } - - @inlinable @inline(__always) - public static func twoIntInputs( - _ a: Tensor, - _ b: Tensor - ) { - _RawTFEager.twoIntInputs(a, b) + commonBackend(operand.handle.backend, windowDimensions.handle.backend), + windowStrides.handle.backend), padding.handle.backend), source.handle.backend), + initValue.handle.backend) + { + case .XLA: + let output_device = initValue.device + let operand = Tensor(copying: operand, to: .defaultTFEager) + let windowDimensions = Tensor(copying: windowDimensions, to: .defaultTFEager) + let windowStrides = Tensor(copying: windowStrides, to: .defaultTFEager) + let padding = Tensor(copying: padding, to: .defaultTFEager) + let source = Tensor(copying: source, to: .defaultTFEager) + let initValue = Tensor(copying: initValue, to: .defaultTFEager) + return Tensor( + copying: _RawTFEager.xlaSelectAndScatter( + operand: operand, windowDimensions: windowDimensions, windowStrides: windowStrides, + padding: padding, source: source, initValue: initValue, select: select, scatter: scatter + ), to: output_device) + case .TF_EAGER: + return _RawTFEager.xlaSelectAndScatter( + operand: operand, windowDimensions: windowDimensions, windowStrides: windowStrides, + padding: padding, source: source, initValue: initValue, select: select, scatter: scatter) } - @inlinable @inline(__always) - public static func twoIntOutputs() -> (a: Tensor, b: Tensor) { - _RawTFEager.twoIntOutputs() - } + } - @inlinable @inline(__always) - public static func typeList( - _ a: T - ) { - _RawTFEager.typeList(a) - } + /// Computes the eigen decomposition of a batch of self-adjoint matrices + /// + /// (Note: Only real inputs are supported). + /// + /// Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices in + /// tensor such that tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i], for + /// i=0...N-1. + /// + /// - Parameter a: the input tensor. + /// + /// - Attrs: + /// - lower: a boolean specifies whether the calculation is done with the lower + /// triangular part or the upper triangular part. + /// - max_iter: maximum number of sweep update, i.e., the whole lower triangular + /// part or upper triangular part based on parameter lower. Heuristically, it has + /// been argued that approximatly logN sweeps are needed in practice (Ref: Golub & + /// van Loan "Matrix Computation"). + /// - epsilon: the tolerance ratio. + /// + /// - Outputs: + /// - w: The eigenvalues in ascending order, each repeated according to its + /// multiplicity. + /// - v: The column v[..., :, i] is the normalized eigenvector corresponding to the + /// eigenvalue w[..., i]. + @inlinable @inline(__always) + public static func xlaSelfAdjointEig( + _ a: Tensor, + lower: Bool, + maxIter: Int64, + epsilon: Double + ) -> (w: Tensor, v: Tensor) { + _RawTFEager.xlaSelfAdjointEig(a, lower: lower, maxIter: maxIter, epsilon: epsilon) + } - @inlinable @inline(__always) - public static func typeListRestrict( - _ a: T - ) { - _RawTFEager.typeListRestrict(a) - } + /// Sends the named tensor to another XLA computation. Wraps the XLA Send operator + /// + /// documented at + /// https://www.tensorflow.org/performance/xla/operation_semantics#send . + /// + /// - Parameter tensor: The tensor to send. + /// + /// - Attr tensor_name: A string key that identifies the channel. + @inlinable @inline(__always) + public static func xlaSend( + _ tensor: Tensor, + tensorName: String + ) { + _RawTFEager.xlaSend(tensor, tensorName: tensorName) + } - @inlinable @inline(__always) - public static func typeListTwice( - _ a: T, - _ b: T - ) { - _RawTFEager.typeListTwice(a, b) - } - - @inlinable @inline(__always) - public static func unary( - _ a: Tensor - ) -> Tensor { - switch a.handle.backend { - case .XLA: - let output_device = a.device - let a = Tensor(copying: a, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.unary(a), to: output_device) - case .TF_EAGER: - return _RawTFEager.unary(a) - } - - } - - /// Reverses the operation of Batch for a single output Tensor. - /// - /// An instance of Unbatch either receives an empty batched_tensor, in which case it - /// asynchronously waits until the values become available from a concurrently - /// running instance of Unbatch with the same container and shared_name, or receives - /// a non-empty batched_tensor in which case it finalizes all other concurrently - /// running instances and outputs its own element from the batch. - /// - /// batched_tensor: The possibly transformed output of Batch. The size of the first - /// dimension should remain unchanged by the transformations for the operation to - /// work. - /// batch_index: The matching batch_index obtained from Batch. - /// id: The id scalar emitted by Batch. - /// unbatched_tensor: The Tensor corresponding to this execution. - /// timeout_micros: Maximum amount of time (in microseconds) to wait to receive the - /// batched input tensor associated with a given invocation of the op. - /// container: Container to control resource sharing. - /// shared_name: Instances of Unbatch with the same container and shared_name are - /// assumed to possibly belong to the same batch. If left empty, the op name will - /// be used as the shared name. - @inlinable @inline(__always) - public static func unbatch( - batchedTensor: Tensor, - batchIndex: Tensor, - id: Tensor, - timeoutMicros: Int64, - container: String, - sharedName: String - ) -> Tensor { - switch commonBackend( - commonBackend(batchedTensor.handle.backend, batchIndex.handle.backend), id.handle.backend) - { - case .XLA: - let output_device = id.device - let batchedTensor = Tensor(copying: batchedTensor, to: .defaultTFEager) - let batchIndex = Tensor(copying: batchIndex, to: .defaultTFEager) - let id = Tensor(copying: id, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.unbatch( - batchedTensor: batchedTensor, batchIndex: batchIndex, id: id, - timeoutMicros: timeoutMicros, container: container, sharedName: sharedName), - to: output_device) - case .TF_EAGER: - return _RawTFEager.unbatch( - batchedTensor: batchedTensor, batchIndex: batchIndex, id: id, - timeoutMicros: timeoutMicros, container: container, sharedName: sharedName) - } - - } - - /// A dataset that splits the elements of its input into multiple elements. - @inlinable @inline(__always) - public static func unbatchDataset( - inputDataset: VariantHandle, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.unbatchDataset( - inputDataset: inputDataset, outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Gradient of Unbatch. - /// - /// Acts like Batch but using the given batch_index index of batching things as they - /// become available. This ensures that the gradients are propagated back in the - /// same session which did the forward pass. - /// - /// original_input: The input to the Unbatch operation this is the gradient of. - /// batch_index: The batch_index given to the Unbatch operation this is the gradient - /// of. - /// grad: The downstream gradient. - /// id: The id scalar emitted by Batch. - /// batched_grad: The return value, either an empty tensor or the batched gradient. - /// container: Container to control resource sharing. - /// shared_name: Instances of UnbatchGrad with the same container and shared_name - /// are assumed to possibly belong to the same batch. If left empty, the op name - /// will be used as the shared name. - @inlinable @inline(__always) - public static func unbatchGrad( - originalInput: Tensor, - batchIndex: Tensor, - grad: Tensor, - id: Tensor, - container: String, - sharedName: String - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend(originalInput.handle.backend, batchIndex.handle.backend), - grad.handle.backend), id.handle.backend) - { - case .XLA: - let output_device = id.device - let originalInput = Tensor(copying: originalInput, to: .defaultTFEager) - let batchIndex = Tensor(copying: batchIndex, to: .defaultTFEager) - let grad = Tensor(copying: grad, to: .defaultTFEager) - let id = Tensor(copying: id, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.unbatchGrad( - originalInput: originalInput, batchIndex: batchIndex, grad: grad, id: id, - container: container, sharedName: sharedName), to: output_device) - case .TF_EAGER: - return _RawTFEager.unbatchGrad( - originalInput: originalInput, batchIndex: batchIndex, grad: grad, id: id, - container: container, sharedName: sharedName) - } - - } - - /// Decodes each string in `input` into a sequence of Unicode code points. - /// - /// The character codepoints for all strings are returned using a single vector - /// `char_values`, with strings expanded to characters in row-major order. - /// - /// The `row_splits` tensor indicates where the codepoints for - /// each input string begin and end within the `char_values` tensor. - /// In particular, the values for the `i`th - /// string (in row-major order) are stored in the slice - /// `[row_splits[i]:row_splits[i+1]]`. Thus: - /// - /// * `char_values[row_splits[i]+j]` is the Unicode codepoint for the `j`th - /// character in the `i`th string (in row-major order). - /// * `row_splits[i+1] - row_splits[i]` is the number of characters in the `i`th - /// string (in row-major order). - /// - /// - Parameter input: The text to be decoded. Can have any shape. Note that the output is flattened - /// to a vector of char values. - /// - /// - Attrs: - /// - input_encoding: Text encoding of the input strings. This is any of the encodings supported - /// by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`. - /// - errors: Error handling policy when there is invalid formatting found in the input. - /// The value of 'strict' will cause the operation to produce a InvalidArgument - /// error on any invalid input formatting. A value of 'replace' (the default) will - /// cause the operation to replace any invalid formatting in the input with the - /// `replacement_char` codepoint. A value of 'ignore' will cause the operation to - /// skip any invalid formatting in the input and produce no corresponding output - /// character. - /// - replacement_char: The replacement character codepoint to be used in place of any invalid - /// formatting in the input when `errors='replace'`. Any valid unicode codepoint may - /// be used. The default value is the default unicode replacement character is - /// 0xFFFD or U+65533.) - /// - replace_control_characters: Whether to replace the C0 control characters (00-1F) with the - /// `replacement_char`. Default is false. - /// - /// - Outputs: - /// - row_splits: A 1D int32 tensor containing the row splits. - /// - char_values: A 1D int32 Tensor containing the decoded codepoints. - @inlinable @inline(__always) - public static func unicodeDecode( - _ input: StringTensor, - inputEncoding: String, - errors: Errors = .replace, - replacementChar: Int64 = 65533, - replaceControlCharacters: Bool = false - ) -> (rowSplits: Tensor, charValues: Tensor) { - _RawTFEager.unicodeDecode( - input, inputEncoding: inputEncoding, errors: errors, replacementChar: replacementChar, - replaceControlCharacters: replaceControlCharacters) - } - - /// Decodes each string in `input` into a sequence of Unicode code points. - /// - /// The character codepoints for all strings are returned using a single vector - /// `char_values`, with strings expanded to characters in row-major order. - /// Similarly, the character start byte offsets are returned using a single vector - /// `char_to_byte_starts`, with strings expanded in row-major order. - /// - /// The `row_splits` tensor indicates where the codepoints and start offsets for - /// each input string begin and end within the `char_values` and - /// `char_to_byte_starts` tensors. In particular, the values for the `i`th - /// string (in row-major order) are stored in the slice - /// `[row_splits[i]:row_splits[i+1]]`. Thus: - /// - /// * `char_values[row_splits[i]+j]` is the Unicode codepoint for the `j`th - /// character in the `i`th string (in row-major order). - /// * `char_to_bytes_starts[row_splits[i]+j]` is the start byte offset for the `j`th - /// character in the `i`th string (in row-major order). - /// * `row_splits[i+1] - row_splits[i]` is the number of characters in the `i`th - /// string (in row-major order). - /// - /// - Parameter input: The text to be decoded. Can have any shape. Note that the output is flattened - /// to a vector of char values. - /// - /// - Attrs: - /// - input_encoding: Text encoding of the input strings. This is any of the encodings supported - /// by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`. - /// - errors: Error handling policy when there is invalid formatting found in the input. - /// The value of 'strict' will cause the operation to produce a InvalidArgument - /// error on any invalid input formatting. A value of 'replace' (the default) will - /// cause the operation to replace any invalid formatting in the input with the - /// `replacement_char` codepoint. A value of 'ignore' will cause the operation to - /// skip any invalid formatting in the input and produce no corresponding output - /// character. - /// - replacement_char: The replacement character codepoint to be used in place of any invalid - /// formatting in the input when `errors='replace'`. Any valid unicode codepoint may - /// be used. The default value is the default unicode replacement character is - /// 0xFFFD or U+65533.) - /// - replace_control_characters: Whether to replace the C0 control characters (00-1F) with the - /// `replacement_char`. Default is false. - /// - /// - Outputs: - /// - row_splits: A 1D int32 tensor containing the row splits. - /// - char_values: A 1D int32 Tensor containing the decoded codepoints. - /// - char_to_byte_starts: A 1D int32 Tensor containing the byte index in the input string where each - /// character in `char_values` starts. - @inlinable @inline(__always) - public static func unicodeDecodeWithOffsets( - _ input: StringTensor, - inputEncoding: String, - errors: Errors = .replace, - replacementChar: Int64 = 65533, - replaceControlCharacters: Bool = false - ) -> (rowSplits: Tensor, charValues: Tensor, charToByteStarts: Tensor) { - _RawTFEager.unicodeDecodeWithOffsets( - input, inputEncoding: inputEncoding, errors: errors, replacementChar: replacementChar, - replaceControlCharacters: replaceControlCharacters) - } - - /// Encode a tensor of ints into unicode strings. - /// - /// Returns a vector of strings, where `output[i]` is constructed by encoding the - /// Unicode codepoints in `input_values[input_splits[i]:input_splits[i+1]]` - /// using `output_encoding`. - /// - /// --- - /// - /// Example: - /// - /// ``` - /// input_values = [72, 101, 108, 108, 111, 87, 111, 114, 108, 100] - /// input_splits = [0, 5, 10] - /// output_encoding = 'UTF-8' - /// - /// output = ['Hello', 'World'] - /// ``` - /// - /// - Parameters: - /// - input_values: A 1D tensor containing the unicode codepoints that should be encoded. - /// - input_splits: A 1D tensor specifying how the unicode codepoints should be split into strings. - /// In particular, `output[i]` is constructed by encoding the codepoints in the - /// slice `input_values[input_splits[i]:input_splits[i+1]]`. - /// - /// - Attrs: - /// - errors: Error handling policy when there is invalid formatting found in the input. - /// The value of 'strict' will cause the operation to produce a InvalidArgument - /// error on any invalid input formatting. A value of 'replace' (the default) will - /// cause the operation to replace any invalid formatting in the input with the - /// `replacement_char` codepoint. A value of 'ignore' will cause the operation to - /// skip any invalid formatting in the input and produce no corresponding output - /// character. - /// - output_encoding: Unicode encoding of the output strings. Valid encodings are: `"UTF-8", - /// "UTF-16-BE", and "UTF-32-BE"`. - /// - replacement_char: The replacement character codepoint to be used in place of any invalid - /// formatting in the input when `errors='replace'`. Any valid unicode codepoint may - /// be used. The default value is the default unicode replacement character is - /// 0xFFFD (U+65533). - /// - /// - Output output: The 1-D Tensor of strings encoded from the provided unicode codepoints. - @inlinable @inline(__always) - public static func unicodeEncode( - inputValues: Tensor, - inputSplits: Tensor, - errors: Errors = .replace, - outputEncoding: OutputEncoding, - replacementChar: Int64 = 65533 - ) -> StringTensor { - _RawTFEager.unicodeEncode( - inputValues: inputValues, inputSplits: inputSplits, errors: errors, - outputEncoding: outputEncoding, replacementChar: replacementChar) - } - - /// Determine the script codes of a given tensor of Unicode integer code points. - /// - /// This operation converts Unicode code points to script codes corresponding to - /// each code point. Script codes correspond to International Components for - /// Unicode (ICU) UScriptCode values. See http://icu-project.org/apiref/icu4c/uscript_8h.html. - /// Returns -1 (USCRIPT_INVALID_CODE) for invalid codepoints. Output shape will - /// match input shape. - /// - /// Examples: - /// - /// >>> tf.strings.unicode_script([1, 31, 38]) - /// - /// - /// - Parameter input: A Tensor of int32 Unicode code points. - /// - /// - Output output: A Tensor of int32 script codes corresponding to each input code point. - @inlinable @inline(__always) - public static func unicodeScript( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.unicodeScript(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.unicodeScript(input) - } - - } - - /// Transcode the input text from a source encoding to a destination encoding. - /// - /// The input is a string tensor of any shape. The output is a string tensor of - /// the same shape containing the transcoded strings. Output strings are always - /// valid unicode. If the input contains invalid encoding positions, the - /// `errors` attribute sets the policy for how to deal with them. If the default - /// error-handling policy is used, invalid formatting will be substituted in the - /// output by the `replacement_char`. If the errors policy is to `ignore`, any - /// invalid encoding positions in the input are skipped and not included in the - /// output. If it set to `strict` then any invalid formatting will result in an - /// InvalidArgument error. - /// - /// This operation can be used with `output_encoding = input_encoding` to enforce - /// correct formatting for inputs even if they are already in the desired encoding. - /// - /// If the input is prefixed by a Byte Order Mark needed to determine encoding - /// (e.g. if the encoding is UTF-16 and the BOM indicates big-endian), then that - /// BOM will be consumed and not emitted into the output. If the input encoding - /// is marked with an explicit endianness (e.g. UTF-16-BE), then the BOM is - /// interpreted as a non-breaking-space and is preserved in the output (including - /// always for UTF-8). - /// - /// The end result is that if the input is marked as an explicit endianness the - /// transcoding is faithful to all codepoints in the source. If it is not marked - /// with an explicit endianness, the BOM is not considered part of the string itself - /// but as metadata, and so is not preserved in the output. - /// - /// Examples: - /// - /// >>> tf.strings.unicode_transcode(["Hello", "TensorFlow", "2.x"], "UTF-8", "UTF-16-BE") - /// - /// >>> tf.strings.unicode_transcode(["A", "B", "C"], "US ASCII", "UTF-8").numpy() - /// array([b'A', b'B', b'C'], dtype=object) - /// - /// - Parameter input: The text to be processed. Can have any shape. - /// - /// - Attrs: - /// - input_encoding: Text encoding of the input strings. This is any of the encodings supported - /// by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`. - /// - output_encoding: The unicode encoding to use in the output. Must be one of - /// `"UTF-8", "UTF-16-BE", "UTF-32-BE"`. Multi-byte encodings will be big-endian. - /// - errors: Error handling policy when there is invalid formatting found in the input. - /// The value of 'strict' will cause the operation to produce a InvalidArgument - /// error on any invalid input formatting. A value of 'replace' (the default) will - /// cause the operation to replace any invalid formatting in the input with the - /// `replacement_char` codepoint. A value of 'ignore' will cause the operation to - /// skip any invalid formatting in the input and produce no corresponding output - /// character. - /// - replacement_char: The replacement character codepoint to be used in place of any invalid - /// formatting in the input when `errors='replace'`. Any valid unicode codepoint may - /// be used. The default value is the default unicode replacement character is - /// 0xFFFD or U+65533.) - /// - /// Note that for UTF-8, passing a replacement character expressible in 1 byte, such - /// as ' ', will preserve string alignment to the source since invalid bytes will be - /// replaced with a 1-byte replacement. For UTF-16-BE and UTF-16-LE, any 1 or 2 byte - /// replacement character will preserve byte alignment to the source. - /// - replace_control_characters: Whether to replace the C0 control characters (00-1F) with the - /// `replacement_char`. Default is false. - /// - /// - Output output: A string tensor containing unicode text encoded using `output_encoding`. - @inlinable @inline(__always) - public static func unicodeTranscode( - _ input: StringTensor, - inputEncoding: String, - outputEncoding: OutputEncoding, - errors: Errors = .replace, - replacementChar: Int64 = 65533, - replaceControlCharacters: Bool = false - ) -> StringTensor { - _RawTFEager.unicodeTranscode( - input, inputEncoding: inputEncoding, outputEncoding: outputEncoding, errors: errors, - replacementChar: replacementChar, replaceControlCharacters: replaceControlCharacters) - } - - /// Generates labels for candidate sampling with a uniform distribution. - /// - /// See explanations of candidate sampling and the data formats at - /// go/candidate-sampling. - /// - /// For each batch, this op picks a single set of sampled candidate labels. - /// - /// The advantages of sampling candidates per-batch are simplicity and the - /// possibility of efficient dense matrix multiplication. The disadvantage is that - /// the sampled candidates must be chosen independently of the context and of the - /// true labels. - /// - /// - Parameter true_classes: A batch_size * num_true matrix, in which each row contains the - /// IDs of the num_true target_classes in the corresponding original label. - /// - /// - Attrs: - /// - num_true: Number of true labels per context. - /// - num_sampled: Number of candidates to randomly sample. - /// - unique: If unique is true, we sample with rejection, so that all sampled - /// candidates in a batch are unique. This requires some approximation to - /// estimate the post-rejection sampling probabilities. - /// - range_max: The sampler will sample integers from the interval [0, range_max). - /// - seed: If either seed or seed2 are set to be non-zero, the random number - /// generator is seeded by the given seed. Otherwise, it is seeded by a - /// random seed. - /// - seed2: An second seed to avoid seed collision. - /// - /// - Outputs: - /// - sampled_candidates: A vector of length num_sampled, in which each element is - /// the ID of a sampled candidate. - /// - true_expected_count: A batch_size * num_true matrix, representing - /// the number of times each candidate is expected to occur in a batch - /// of sampled candidates. If unique=true, then this is a probability. - /// - sampled_expected_count: A vector of length num_sampled, for each sampled - /// candidate representing the number of times the candidate is expected - /// to occur in a batch of sampled candidates. If unique=true, then this is a - /// probability. - @inlinable @inline(__always) - public static func uniformCandidateSampler( - trueClasses: Tensor, - numTrue: Int64, - numSampled: Int64, - unique: Bool, - rangeMax: Int64, - seed: Int64 = 0, - seed2: Int64 = 0 - ) -> ( - sampledCandidates: Tensor, trueExpectedCount: Tensor, - sampledExpectedCount: Tensor - ) { - _RawTFEager.uniformCandidateSampler( - trueClasses: trueClasses, numTrue: numTrue, numSampled: numSampled, unique: unique, - rangeMax: rangeMax, seed: seed, seed2: seed2) - } - - /// Finds unique elements in a 1-D tensor. - /// - /// This operation returns a tensor `y` containing all of the unique elements of `x` - /// sorted in the same order that they occur in `x`; `x` does not need to be sorted. - /// This operation also returns a tensor `idx` the same size as `x` that contains - /// the index of each value of `x` in the unique output `y`. In other words: - /// - /// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - /// - /// Examples: - /// - /// ``` - /// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] - /// y, idx = unique(x) - /// y ==> [1, 2, 4, 7, 8] - /// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] - /// ``` - /// - /// ``` - /// # tensor 'x' is [4, 5, 1, 2, 3, 3, 4, 5] - /// y, idx = unique(x) - /// y ==> [4, 5, 1, 2, 3] - /// idx ==> [0, 1, 2, 3, 4, 4, 0, 1] - /// ``` - /// - /// - Parameter x: 1-D. - /// - /// - Outputs: - /// - y: 1-D. - /// - idx: 1-D. - @inlinable @inline(__always) - public static func unique< - T: TensorFlowScalar, - OutIdx: TensorFlowIndex - >( - _ x: Tensor - ) -> (y: Tensor, idx: Tensor) { - _RawTFEager.unique(x) - } - - /// Creates a dataset that contains the unique elements of `input_dataset`. - @inlinable @inline(__always) - public static func uniqueDataset( - inputDataset: VariantHandle, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.uniqueDataset( - inputDataset: inputDataset, outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Finds unique elements along an axis of a tensor. - /// - /// This operation either returns a tensor `y` containing unique elements - /// along the `axis` of a tensor. The returned unique elements is sorted - /// in the same order as they occur along `axis` in `x`. - /// This operation also returns a tensor `idx` that is the same size as - /// the number of the elements in `x` along the `axis` dimension. It - /// contains the index in the unique output `y`. - /// In other words, for an `1-D` tensor `x` with `axis = None: - /// - /// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - /// - /// For example: - /// - /// ``` - /// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] - /// y, idx = unique(x) - /// y ==> [1, 2, 4, 7, 8] - /// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] - /// ``` - /// - /// For an `2-D` tensor `x` with `axis = 0`: - /// - /// ``` - /// # tensor 'x' is [[1, 0, 0], - /// # [1, 0, 0], - /// # [2, 0, 0]] - /// y, idx = unique(x, axis=0) - /// y ==> [[1, 0, 0], - /// [2, 0, 0]] - /// idx ==> [0, 0, 1] - /// ``` - /// - /// For an `2-D` tensor `x` with `axis = 1`: - /// - /// ``` - /// # tensor 'x' is [[1, 0, 0], - /// # [1, 0, 0], - /// # [2, 0, 0]] - /// y, idx = unique(x, axis=1) - /// y ==> [[1, 0], - /// [1, 0], - /// [2, 0]] - /// idx ==> [0, 1, 1] - /// ``` - /// - /// - Parameters: - /// - x: A `Tensor`. - /// - axis: A `Tensor` of type `int32` (default: None). The axis of the Tensor to - /// find the unique elements. - /// - /// - Outputs: - /// - y: A `Tensor`. Unique elements along the `axis` of `Tensor` x. - /// - idx: A 1-D Tensor. Has the same type as x that contains the index of each - /// value of x in the output y. - @inlinable @inline(__always) - public static func uniqueV2< - T: TensorFlowScalar, - Taxis: TensorFlowIndex, - OutIdx: TensorFlowIndex - >( - _ x: Tensor, - axis: Tensor - ) -> (y: Tensor, idx: Tensor) { - _RawTFEager.uniqueV2(x, axis: axis) - } - - /// Finds unique elements in a 1-D tensor. - /// - /// This operation returns a tensor `y` containing all of the unique elements of `x` - /// sorted in the same order that they occur in `x`. This operation also returns a - /// tensor `idx` the same size as `x` that contains the index of each value of `x` - /// in the unique output `y`. Finally, it returns a third tensor `count` that - /// contains the count of each element of `y` in `x`. In other words: - /// - /// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - /// - /// For example: - /// - /// ``` - /// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] - /// y, idx, count = unique_with_counts(x) - /// y ==> [1, 2, 4, 7, 8] - /// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] - /// count ==> [2, 1, 3, 1, 2] - /// ``` - /// - /// - Parameter x: 1-D. - /// - /// - Outputs: - /// - y: 1-D. - /// - idx: 1-D. - /// - count: 1-D. - @inlinable @inline(__always) - public static func uniqueWithCounts< - T: TensorFlowScalar, - OutIdx: TensorFlowIndex - >( - _ x: Tensor - ) -> (y: Tensor, idx: Tensor, count: Tensor) { - _RawTFEager.uniqueWithCounts(x) - } - - /// Finds unique elements along an axis of a tensor. - /// - /// This operation either returns a tensor `y` containing unique elements - /// along the `axis` of a tensor. The returned unique elements is sorted - /// in the same order as they occur along `axis` in `x`. - /// This operation also returns a tensor `idx` and a tensor `count` - /// that are the same size as the number of the elements in `x` along the - /// `axis` dimension. The `idx` contains the index in the unique output `y` - /// and the `count` contains the count in the unique output `y`. - /// In other words, for an `1-D` tensor `x` with `axis = None: - /// - /// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - /// - /// For example: - /// - /// ``` - /// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] - /// y, idx, count = unique_with_counts(x) - /// y ==> [1, 2, 4, 7, 8] - /// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] - /// count ==> [2, 1, 3, 1, 2] - /// ``` - /// - /// For an `2-D` tensor `x` with `axis = 0`: - /// - /// ``` - /// # tensor 'x' is [[1, 0, 0], - /// # [1, 0, 0], - /// # [2, 0, 0]] - /// y, idx, count = unique_with_counts(x, axis=0) - /// y ==> [[1, 0, 0], - /// [2, 0, 0]] - /// idx ==> [0, 0, 1] - /// count ==> [2, 1] - /// ``` - /// - /// For an `2-D` tensor `x` with `axis = 1`: - /// - /// ``` - /// # tensor 'x' is [[1, 0, 0], - /// # [1, 0, 0], - /// # [2, 0, 0]] - /// y, idx, count = unique_with_counts(x, axis=1) - /// y ==> [[1, 0], - /// [1, 0], - /// [2, 0]] - /// idx ==> [0, 1, 1] - /// count ==> [1, 2] - /// ``` - /// - /// - Parameters: - /// - x: A `Tensor`. - /// - axis: A `Tensor` of type `int32` (default: None). The axis of the Tensor to - /// find the unique elements. - /// - /// - Outputs: - /// - y: A `Tensor`. Unique elements along the `axis` of `Tensor` x. - /// - idx: A 1-D Tensor. Has the same type as x that contains the index of each - /// value of x in the output y. - /// - count: A 1-D Tensor. The count of each value of x in the output y. - @inlinable @inline(__always) - public static func uniqueWithCountsV2< - T: TensorFlowScalar, - Taxis: TensorFlowIndex, - OutIdx: TensorFlowIndex - >( - _ x: Tensor, - axis: Tensor - ) -> (y: Tensor, idx: Tensor, count: Tensor) { - _RawTFEager.uniqueWithCountsV2(x, axis: axis) - } - - /// Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors. - /// - /// Unpacks `num` tensors from `value` by chipping it along the `axis` dimension. - /// For example, given a tensor of shape `(A, B, C, D)`; - /// - /// If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]` - /// and each tensor in `output` will have shape `(B, C, D)`. (Note that the - /// dimension unpacked along is gone, unlike `split`). - /// - /// If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]` - /// and each tensor in `output` will have shape `(A, C, D)`. - /// Etc. - /// - /// This is the opposite of `pack`. - /// - /// - Parameter value: 1-D or higher, with `axis` dimension size equal to `num`. - /// - /// - Attr axis: Dimension along which to unpack. Negative values wrap around, so the - /// valid range is `[-R, R)`. - /// - /// - Output output: The list of tensors unpacked from `value`. - @inlinable @inline(__always) - public static func unpack( - value: Tensor, - num: Int64, - axis: Int64 = 0 - ) -> [Tensor] { - switch value.handle.backend { - case .XLA: - return _RawXLA.unpack(value: value, num: num, axis: axis) - case .TF_EAGER: - return _RawTFEager.unpack(value: value, num: num, axis: axis) - } - - } - - /// Converts an array of flat indices into a tuple of coordinate arrays. - /// - /// - /// Example: - /// - /// ``` - /// y = tf.unravel_index(indices=[2, 5, 7], dims=[3, 3]) - /// # 'dims' represent a hypothetical (3, 3) tensor of indices: - /// # [[0, 1, *2*], - /// # [3, 4, *5*], - /// # [6, *7*, 8]] - /// # For each entry from 'indices', this operation returns - /// # its coordinates (marked with '*'), such as - /// # 2 ==> (0, 2) - /// # 5 ==> (1, 2) - /// # 7 ==> (2, 1) - /// y ==> [[0, 1, 2], [2, 2, 1]] - /// ``` - /// - /// @compatibility(numpy) - /// Equivalent to np.unravel_index - /// @end_compatibility - /// - /// - Parameters: - /// - indices: An 0-D or 1-D `int` Tensor whose elements are indices into the - /// flattened version of an array of dimensions dims. - /// - dims: An 1-D `int` Tensor. The shape of the array to use for unraveling - /// indices. - /// - /// - Output output: An 2-D (or 1-D if indices is 0-D) tensor where each row has the - /// same shape as the indices array. - @inlinable @inline(__always) - public static func unravelIndex( - indices: Tensor, - dims: Tensor - ) -> Tensor { - switch commonBackend(indices.handle.backend, dims.handle.backend) { - case .XLA: - let output_device = dims.device - let indices = Tensor(copying: indices, to: .defaultTFEager) - let dims = Tensor(copying: dims, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.unravelIndex(indices: indices, dims: dims), to: output_device) - case .TF_EAGER: - return _RawTFEager.unravelIndex(indices: indices, dims: dims) - } - - } - - /// Joins the elements of `inputs` based on `segment_ids`. - /// - /// Computes the string join along segments of a tensor. - /// Given `segment_ids` with rank `N` and `data` with rank `N+M`: - /// - /// `output[i, k1...kM] = strings.join([data[j1...jN, k1...kM])` - /// - /// where the join is over all [j1...jN] such that segment_ids[j1...jN] = i. - /// Strings are joined in row-major order. - /// - /// For example: - /// - /// ```python - /// inputs = [['Y', 'q', 'c'], ['Y', '6', '6'], ['p', 'G', 'a']] - /// output_array = string_ops.unsorted_segment_join(inputs=inputs, - /// segment_ids=[1, 0, 1], - /// num_segments=2, - /// separator=':')) - /// # output_array ==> [['Y', '6', '6'], ['Y:p', 'q:G', 'c:a']] - /// - /// - /// inputs = ['this', 'is', 'a', 'test'] - /// output_array = string_ops.unsorted_segment_join(inputs=inputs, - /// segment_ids=[0, 0, 0, 0], - /// num_segments=1, - /// separator=':')) - /// # output_array ==> ['this:is:a:test'] - /// ``` - /// - /// - Parameters: - /// - inputs: The input to be joined. - /// - segment_ids: A tensor whose shape is a prefix of data.shape. Negative segment ids are not - /// supported. - /// - num_segments: A scalar. - /// - /// - Attr separator: The separator to use when joining. - @inlinable @inline(__always) - public static func unsortedSegmentJoin< - Tindices: TensorFlowIndex, - Tnumsegments: TensorFlowIndex - >( - inputs: StringTensor, - segmentIds: Tensor, - numSegments: Tensor, - separator: String - ) -> StringTensor { - _RawTFEager.unsortedSegmentJoin( - inputs: inputs, segmentIds: segmentIds, numSegments: numSegments, separator: separator) - } - - /// Computes the maximum along segments of a tensor. - /// - /// Read - /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) - /// for an explanation of segments. - /// - /// This operator is similar to the unsorted segment sum operator found - /// [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum). - /// Instead of computing the sum over segments, it computes the maximum such that: - /// - /// \\(output_i = \max_{j...} data[j...]\\) where max is over tuples `j...` such - /// that `segment_ids[j...] == i`. - /// - /// If the maximum is empty for a given segment ID `i`, it outputs the smallest - /// possible value for the specific numeric type, - /// `output[i] = numeric_limits::lowest()`. - /// - /// If the given segment ID `i` is negative, then the corresponding value is - /// dropped, and will not be included in the result. - /// - ///
- /// - ///
- /// - /// For example: - /// - /// ``` python - /// c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) - /// tf.unsorted_segment_max(c, tf.constant([0, 1, 0]), num_segments=2) - /// # ==> [[ 4, 3, 3, 4], - /// # [5, 6, 7, 8]] - /// ``` - /// - /// - /// - Parameter segment_ids: A tensor whose shape is a prefix of `data.shape`. - /// - /// - Output output: Has same shape as data, except for the first `segment_ids.rank` - /// dimensions, which are replaced with a single dimension which has size - /// `num_segments`. - @inlinable @inline(__always) - public static func unsortedSegmentMax< - T: TensorFlowNumeric, - Tindices: TensorFlowIndex, - Tnumsegments: TensorFlowIndex - >( - data: Tensor, - segmentIds: Tensor, - numSegments: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(data.handle.backend, segmentIds.handle.backend), numSegments.handle.backend) - { - case .XLA: - let output_device = numSegments.device - let data = Tensor(copying: data, to: .defaultTFEager) - let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) - let numSegments = Tensor(copying: numSegments, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.unsortedSegmentMax( - data: data, segmentIds: segmentIds, numSegments: numSegments), to: output_device) - case .TF_EAGER: - return _RawTFEager.unsortedSegmentMax( - data: data, segmentIds: segmentIds, numSegments: numSegments) - } - - } - - /// Computes the minimum along segments of a tensor. - /// - /// Read - /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) - /// for an explanation of segments. - /// - /// This operator is similar to the unsorted segment sum operator found - /// [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum). - /// Instead of computing the sum over segments, it computes the minimum such that: - /// - /// \\(output_i = \min_{j...} data_[j...]\\) where min is over tuples `j...` such - /// that `segment_ids[j...] == i`. - /// - /// If the minimum is empty for a given segment ID `i`, it outputs the largest - /// possible value for the specific numeric type, - /// `output[i] = numeric_limits::max()`. - /// - /// For example: - /// - /// ``` python - /// c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) - /// tf.unsorted_segment_min(c, tf.constant([0, 1, 0]), num_segments=2) - /// # ==> [[ 1, 2, 2, 1], - /// # [5, 6, 7, 8]] - /// ``` - /// - /// If the given segment ID `i` is negative, then the corresponding value is - /// dropped, and will not be included in the result. - /// - /// - Parameter segment_ids: A tensor whose shape is a prefix of `data.shape`. - /// - /// - Output output: Has same shape as data, except for the first `segment_ids.rank` - /// dimensions, which are replaced with a single dimension which has size - /// `num_segments`. - @inlinable @inline(__always) - public static func unsortedSegmentMin< - T: TensorFlowNumeric, - Tindices: TensorFlowIndex, - Tnumsegments: TensorFlowIndex - >( - data: Tensor, - segmentIds: Tensor, - numSegments: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(data.handle.backend, segmentIds.handle.backend), numSegments.handle.backend) - { - case .XLA: - let output_device = numSegments.device - let data = Tensor(copying: data, to: .defaultTFEager) - let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) - let numSegments = Tensor(copying: numSegments, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.unsortedSegmentMin( - data: data, segmentIds: segmentIds, numSegments: numSegments), to: output_device) - case .TF_EAGER: - return _RawTFEager.unsortedSegmentMin( - data: data, segmentIds: segmentIds, numSegments: numSegments) - } - - } - - /// Computes the product along segments of a tensor. - /// - /// Read - /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) - /// for an explanation of segments. - /// - /// This operator is similar to the unsorted segment sum operator found - /// [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum). - /// Instead of computing the sum over segments, it computes the product of all - /// entries belonging to a segment such that: - /// - /// \\(output_i = \prod_{j...} data[j...]\\) where the product is over tuples - /// `j...` such that `segment_ids[j...] == i`. - /// - /// For example: - /// - /// ``` python - /// c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) - /// tf.unsorted_segment_prod(c, tf.constant([0, 1, 0]), num_segments=2) - /// # ==> [[ 4, 6, 6, 4], - /// # [5, 6, 7, 8]] - /// ``` - /// - /// If there is no entry for a given segment ID `i`, it outputs 1. - /// - /// If the given segment ID `i` is negative, then the corresponding value is - /// dropped, and will not be included in the result. - /// - /// - Parameter segment_ids: A tensor whose shape is a prefix of `data.shape`. - /// - /// - Output output: Has same shape as data, except for the first `segment_ids.rank` - /// dimensions, which are replaced with a single dimension which has size - /// `num_segments`. - @inlinable @inline(__always) - public static func unsortedSegmentProd< - T: TensorFlowNumeric, - Tindices: TensorFlowIndex, - Tnumsegments: TensorFlowIndex - >( - data: Tensor, - segmentIds: Tensor, - numSegments: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(data.handle.backend, segmentIds.handle.backend), numSegments.handle.backend) - { - case .XLA: - let output_device = numSegments.device - let data = Tensor(copying: data, to: .defaultTFEager) - let segmentIds = Tensor(copying: segmentIds, to: .defaultTFEager) - let numSegments = Tensor(copying: numSegments, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.unsortedSegmentProd( - data: data, segmentIds: segmentIds, numSegments: numSegments), to: output_device) - case .TF_EAGER: - return _RawTFEager.unsortedSegmentProd( - data: data, segmentIds: segmentIds, numSegments: numSegments) - } - - } - - /// Computes the sum along segments of a tensor. - /// - /// Read - /// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) - /// for an explanation of segments. - /// - /// Computes a tensor such that - /// \\(output[i] = \sum_{j...} data[j...]\\) where the sum is over tuples `j...` such - /// that `segment_ids[j...] == i`. Unlike `SegmentSum`, `segment_ids` - /// need not be sorted and need not cover all values in the full - /// range of valid values. - /// - /// If the sum is empty for a given segment ID `i`, `output[i] = 0`. - /// If the given segment ID `i` is negative, the value is dropped and will not be - /// added to the sum of the segment. - /// - /// `num_segments` should equal the number of distinct segment IDs. - /// - ///
- /// - ///
- /// - /// ``` python - /// c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) - /// tf.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2) - /// # ==> [[ 5, 5, 5, 5], - /// # [5, 6, 7, 8]] - /// ``` - /// - /// - /// - Parameter segment_ids: A tensor whose shape is a prefix of `data.shape`. - /// - /// - Output output: Has same shape as data, except for the first `segment_ids.rank` - /// dimensions, which are replaced with a single dimension which has size - /// `num_segments`. - @inlinable @inline(__always) - public static func unsortedSegmentSum< - T: TensorFlowNumeric, - Tindices: TensorFlowIndex, - Tnumsegments: TensorFlowIndex - >( - data: Tensor, - segmentIds: Tensor, - numSegments: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(data.handle.backend, segmentIds.handle.backend), numSegments.handle.backend) - { - case .XLA: - return _RawXLA.unsortedSegmentSum( - data: data, segmentIds: segmentIds, numSegments: numSegments) - case .TF_EAGER: - return _RawTFEager.unsortedSegmentSum( - data: data, segmentIds: segmentIds, numSegments: numSegments) - } - - } - - /// Op is similar to a lightweight Dequeue. - /// - /// The basic functionality is similar to dequeue with many fewer - /// capabilities and options. This Op is optimized for performance. - @inlinable @inline(__always) - public static func unstage( - capacity: Int64 = 0, - memoryLimit: Int64 = 0, - container: String, - sharedName: String - ) -> Dtypes { - _RawTFEager.unstage( - capacity: capacity, memoryLimit: memoryLimit, container: container, sharedName: sharedName) - } - - @inlinable @inline(__always) - public static func unwrapDatasetVariant( - inputHandle: VariantHandle - ) -> VariantHandle { - _RawTFEager.unwrapDatasetVariant(inputHandle: inputHandle) - } - - /// Applies upper_bound(sorted_search_values, values) along each row. - /// - /// Each set of rows with the same index in (sorted_inputs, values) is treated - /// independently. The resulting row is the equivalent of calling - /// `np.searchsorted(sorted_inputs, values, side='right')`. - /// - /// The result is not a global index to the entire - /// `Tensor`, but rather just the index in the last dimension. - /// - /// A 2-D example: - /// sorted_sequence = [[0, 3, 9, 9, 10], - /// [1, 2, 3, 4, 5]] - /// values = [[2, 4, 9], - /// [0, 2, 6]] - /// - /// result = UpperBound(sorted_sequence, values) - /// - /// result == [[1, 2, 4], - /// [0, 2, 5]] - /// - /// - Parameters: - /// - sorted_inputs: 2-D Tensor where each row is ordered. - /// - values: 2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains - /// the values that will be searched for in `sorted_search_values`. - /// - /// - Output output: A `Tensor` with the same shape as `values`. It contains the last scalar index - /// into the last dimension where values can be inserted without changing the - /// ordered property. - @inlinable @inline(__always) - public static func upperBound< - T: TensorFlowScalar, - OutType: TensorFlowIndex - >( - sortedInputs: Tensor, - _ values: Tensor - ) -> Tensor { - switch commonBackend(sortedInputs.handle.backend, values.handle.backend) { - case .XLA: - let output_device = values.device - let sortedInputs = Tensor(copying: sortedInputs, to: .defaultTFEager) - let values = Tensor(copying: values, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.upperBound(sortedInputs: sortedInputs, values), to: output_device) - case .TF_EAGER: - return _RawTFEager.upperBound(sortedInputs: sortedInputs, values) - } - - } - - /// Creates a handle to a Variable resource. - /// - /// - Attrs: - /// - container: the container this variable is placed in. - /// - shared_name: the name by which this variable is referred to. - /// - dtype: the type of this variable. Must agree with the dtypes - /// of all ops using this variable. - /// - shape: The (possibly partially specified) shape of this variable. - @inlinable @inline(__always) - public static func varHandleOp( - container: String, - sharedName: String, - dtype: TensorDataType, - shape: TensorShape? - ) -> ResourceHandle { - _RawTFEager.varHandleOp( - container: container, sharedName: sharedName, dtype: dtype, shape: shape) - } - - /// Checks whether a resource handle-based variable has been initialized. - /// - /// - Parameter resource: the input resource handle. - /// - /// - Output is_initialized: a scalar boolean which is true if the variable has been - /// initialized. - @inlinable @inline(__always) - public static func varIsInitializedOp( - resource: ResourceHandle - ) -> Tensor { - _RawTFEager.varIsInitializedOp(resource: resource) - } - - /// Returns the shape of the variable pointed to by `resource`. - /// - /// This operation returns a 1-D integer tensor representing the shape of `input`. - /// - /// For example: - /// - /// ``` - /// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] - /// shape(t) ==> [2, 2, 3] - /// ``` - @inlinable @inline(__always) - public static func variableShape( - _ input: ResourceHandle - ) -> Tensor { - _RawTFEager.variableShape(input) - } - - /// Returns locations of nonzero / true values in a tensor. - /// - /// This operation returns the coordinates of true elements in `condition`. The - /// coordinates are returned in a 2-D tensor where the first dimension (rows) - /// represents the number of true elements, and the second dimension (columns) - /// represents the coordinates of the true elements. Keep in mind, the shape of - /// the output tensor can vary depending on how many true values there are in - /// `condition`. Indices are output in row-major order. - /// - /// For example: - /// - /// ``` - /// # 'input' tensor is [[True, False] - /// # [True, False]] - /// # 'input' has two true values, so output has two coordinates. - /// # 'input' has rank of 2, so coordinates have two indices. - /// where(input) ==> [[0, 0], - /// [1, 0]] - /// - /// # `condition` tensor is [[[True, False] - /// # [True, False]] - /// # [[False, True] - /// # [False, True]] - /// # [[False, False] - /// # [False, True]]] - /// # 'input' has 5 true values, so output has 5 coordinates. - /// # 'input' has rank of 3, so coordinates have three indices. - /// where(input) ==> [[0, 0, 0], - /// [0, 1, 0], - /// [1, 0, 1], - /// [1, 1, 1], - /// [2, 1, 1]] - /// - /// # `condition` tensor is [[[1.5, 0.0] - /// # [-0.5, 0.0]] - /// # [[0.0, 0.25] - /// # [0.0, 0.75]] - /// # [[0.0, 0.0] - /// # [0.0, 0.01]]] - /// # 'input' has 5 nonzero values, so output has 5 coordinates. - /// # 'input' has rank of 3, so coordinates have three indices. - /// where(input) ==> [[0, 0, 0], - /// [0, 1, 0], - /// [1, 0, 1], - /// [1, 1, 1], - /// [2, 1, 1]] - /// - /// # `condition` tensor is [[[1.5 + 0.0j, 0.0 + 0.0j] - /// # [0.0 + 0.5j, 0.0 + 0.0j]] - /// # [[0.0 + 0.0j, 0.25 + 1.5j] - /// # [0.0 + 0.0j, 0.75 + 0.0j]] - /// # [[0.0 + 0.0j, 0.0 + 0.0j] - /// # [0.0 + 0.0j, 0.01 + 0.0j]]] - /// # 'input' has 5 nonzero magnitude values, so output has 5 coordinates. - /// # 'input' has rank of 3, so coordinates have three indices. - /// where(input) ==> [[0, 0, 0], - /// [0, 1, 0], - /// [1, 0, 1], - /// [1, 1, 1], - /// [2, 1, 1]] - /// ``` - @inlinable @inline(__always) - public static func where_( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.where_(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.where_(input) - } - - } - - /// output = input; While (Cond(output)) { output = Body(output) } - /// - /// - Parameter input: A list of input tensors whose types are T. - /// - /// - Attrs: - /// - T: dtype in use. - /// - cond: A function takes 'input' and returns a tensor. If the tensor is - /// a scalar of non-boolean, the scalar is converted to a boolean - /// according to the following rule: if the scalar is a numerical - /// value, non-zero means True and zero means False; if the scalar is - /// a string, non-empty means True and empty means False. If the - /// tensor is not a scalar, non-emptiness means True and False - /// otherwise. - /// - body: A function that takes a list of tensors and returns another - /// list of tensors. Both lists have the same types as specified - /// by T. - /// - /// - Output output: A list of output tensors whose types are T. - @inlinable @inline(__always) - public static func while_< - T: TensorArrayProtocol, - CondIn: TensorGroup, - CondOut: TensorGroup, - BodyIn: TensorGroup, - BodyOut: TensorGroup - >( - _ input: T, - cond: (CondIn) -> CondOut, - body: (BodyIn) -> BodyOut, - outputShapes: [TensorShape?], - parallelIterations: Int64 = 10 - ) -> T { - _RawTFEager.while_( - input, cond: cond, body: body, outputShapes: outputShapes, - parallelIterations: parallelIterations) + /// An op which shards the input based on the given sharding attribute. + @inlinable @inline(__always) + public static func xlaSharding( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.xlaSharding(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.xlaSharding(input) } - /// A Reader that outputs the entire contents of a file as a value. - /// - /// To use, enqueue filenames in a Queue. The output of ReaderRead will - /// be a filename (key) and the contents of that file (value). - /// - /// - Attrs: - /// - container: If non-empty, this reader is placed in the given container. - /// Otherwise, a default container is used. - /// - shared_name: If non-empty, this reader is named in the given bucket - /// with this shared_name. Otherwise, the node name is used instead. - /// - /// - Output reader_handle: The handle to reference the Reader. - @inlinable @inline(__always) - public static func wholeFileReaderV2( - container: String, - sharedName: String - ) -> ResourceHandle { - _RawTFEager.wholeFileReaderV2(container: container, sharedName: sharedName) - } - - /// Combines (nests of) input elements into a dataset of (nests of) windows. - /// - /// A "window" is a finite dataset of flat elements of size `size` (or possibly - /// fewer if there are not enough input elements to fill the window and - /// `drop_remainder` evaluates to false). - /// - /// The `shift` argument determines the number of input elements by which - /// the window moves on each iteration. The first element in the `k`th window - /// will be element - /// - /// ``` - /// 1 + (k-1) * shift - /// ``` - /// - /// of the input dataset. In particular, the first element of the first window - /// will always be the first element of the input dataset. - /// - /// If the `stride` parameter is greater than 1, then each window will skip - /// `(stride - 1)` input elements between each element that appears in the - /// window. Output windows will still contain `size` elements regardless of - /// the value of `stride`. - /// - /// The `stride` argument determines the stride of the input elements, and the - /// `shift` argument determines the shift of the window. - /// - /// For example, letting `{...}` to represent a Dataset: - /// - /// - `tf.data.Dataset.range(7).window(2)` produces - /// `{{0, 1}, {2, 3}, {4, 5}, {6}}` - /// - `tf.data.Dataset.range(7).window(3, 2, 1, True)` produces - /// `{{0, 1, 2}, {2, 3, 4}, {4, 5, 6}}` - /// - `tf.data.Dataset.range(7).window(3, 1, 2, True)` produces - /// `{{0, 2, 4}, {1, 3, 5}, {2, 4, 6}}` - /// - /// Note that when the `window` transformation is applied to a dataset of - /// nested elements, it produces a dataset of nested windows. - /// - /// For example: - /// - /// - `tf.data.Dataset.from_tensor_slices((range(4), range(4))).window(2)` - /// produces `{({0, 1}, {0, 1}), ({2, 3}, {2, 3})}` - /// - `tf.data.Dataset.from_tensor_slices({"a": range(4)}).window(2)` - /// produces `{{"a": {0, 1}}, {"a": {2, 3}}}` - /// - /// - Parameters: - /// - size: An integer scalar, representing the number of elements - /// of the input dataset to combine into a window. Must be positive. - /// - shift: An integer scalar, representing the number of input elements - /// by which the window moves in each iteration. Defaults to `size`. - /// Must be positive. - /// - stride: An integer scalar, representing the stride of the input elements - /// in the sliding window. Must be positive. The default value of 1 means - /// "retain every input element". - /// - drop_remainder: A Boolean scalar, representing whether the last window should be - /// dropped if its size is smaller than `window_size`. - @inlinable @inline(__always) - public static func windowDataset( - inputDataset: VariantHandle, - size: Tensor, - shift: Tensor, - stride: Tensor, - dropRemainder: Tensor, - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.windowDataset( - inputDataset: inputDataset, size: size, shift: shift, stride: stride, - dropRemainder: dropRemainder, outputTypes: outputTypes, outputShapes: outputShapes) - } - - /// Worker heartbeat op. - /// - /// Heartbeats may be sent periodically to indicate the coordinator is still active, - /// to retrieve the current worker status and to expedite shutdown when necessary. - /// - /// - Parameter request: A string tensor containing a serialized WorkerHeartbeatRequest - /// - /// - Output response: A string tensor containing a serialized WorkerHeartbeatResponse - @inlinable @inline(__always) - public static func workerHeartbeat( - request: StringTensor - ) -> StringTensor { - _RawTFEager.workerHeartbeat(request: request) - } - - @inlinable @inline(__always) - public static func wrapDatasetVariant( - inputHandle: VariantHandle - ) -> VariantHandle { - _RawTFEager.wrapDatasetVariant(inputHandle: inputHandle) - } - - @inlinable @inline(__always) - public static func writeAudioSummary( - writer: ResourceHandle, - step: Tensor, - tag: StringTensor, - _ tensor: Tensor, - sampleRate: Tensor, - maxOutputs: Int64 = 3 - ) { - _RawTFEager.writeAudioSummary( - writer: writer, step: step, tag: tag, tensor, sampleRate: sampleRate, maxOutputs: maxOutputs - ) - } + } - /// Writes contents to the file at input filename. Creates file and recursively - /// - /// creates directory if not existing. - /// - /// - Parameters: - /// - filename: scalar. The name of the file to which we write the contents. - /// - contents: scalar. The content to be written to the output file. - @inlinable @inline(__always) - public static func writeFile( - filename: StringTensor, - contents: StringTensor - ) { - _RawTFEager.writeFile(filename: filename, contents: contents) + /// Wraps the XLA Sort operator, documented at + /// + /// https://www.tensorflow.org/performance/xla/operation_semantics#sort + /// . + /// + /// Sorts a tensor. Currently only sorts in ascending order are supported. + /// + /// - Parameter input: A `Tensor` of type T. + /// + /// - Output output: A `Tensor` of type T. + @inlinable @inline(__always) + public static func xlaSort( + _ input: Tensor + ) -> Tensor { + switch input.handle.backend { + case .XLA: + let output_device = input.device + let input = Tensor(copying: input, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.xlaSort(input), to: output_device) + case .TF_EAGER: + return _RawTFEager.xlaSort(input) } - @inlinable @inline(__always) - public static func writeGraphSummary( - writer: ResourceHandle, - step: Tensor, - _ tensor: StringTensor - ) { - _RawTFEager.writeGraphSummary(writer: writer, step: step, tensor) - } + } - @inlinable @inline(__always) - public static func writeHistogramSummary( - writer: ResourceHandle, - step: Tensor, - tag: StringTensor, - _ values: Tensor - ) { - _RawTFEager.writeHistogramSummary(writer: writer, step: step, tag: tag, values) - } + /// Computes the eigen decomposition of a batch of self-adjoint matrices + /// + /// (Note: Only real inputs are supported). + /// + /// Computes the eigenvalues and eigenvectors of the innermost M-by-N matrices in + /// tensor such that tensor[...,:,:] = u[..., :, :] * Diag(s[..., :]) * Transpose(v[...,:,:]). + /// + /// - Parameter a: the input tensor. + /// + /// - Attrs: + /// - max_iter: maximum number of sweep update, i.e., the whole lower triangular + /// part or upper triangular part based on parameter lower. Heuristically, it has + /// been argued that approximatly log(min (M, N)) sweeps are needed in practice + /// (Ref: Golub & van Loan "Matrix Computation"). + /// - epsilon: the tolerance ratio. + /// - precision_config: a serialized xla::PrecisionConfig proto. + /// + /// - Outputs: + /// - s: Singular values. The values are sorted in reverse order of magnitude, so + /// s[..., 0] is the largest value, s[..., 1] is the second largest, etc. + /// - u: Left singular vectors. + /// - v: Right singular vectors. + @inlinable @inline(__always) + public static func xlaSvd( + _ a: Tensor, + maxIter: Int64, + epsilon: Double, + precisionConfig: String + ) -> (s: Tensor, u: Tensor, v: Tensor) { + _RawTFEager.xlaSvd(a, maxIter: maxIter, epsilon: epsilon, precisionConfig: precisionConfig) + } - @inlinable @inline(__always) - public static func writeImageSummary( - writer: ResourceHandle, - step: Tensor, - tag: StringTensor, - _ tensor: Tensor, - badColor: Tensor, - maxImages: Int64 = 3 - ) { - _RawTFEager.writeImageSummary( - writer: writer, step: step, tag: tag, tensor, badColor: badColor, maxImages: maxImages) + /// output = input; While (Cond(output)) { output = Body(output) } + /// + /// - Parameter input: A list of input tensors whose types are T. + /// + /// - Attrs: + /// - cond: A function takes 'input' and returns a tensor. If the tensor is + /// a scalar of non-boolean, the scalar is converted to a boolean + /// according to the following rule: if the scalar is a numerical + /// value, non-zero means True and zero means False; if the scalar is + /// a string, non-empty means True and empty means False. If the + /// tensor is not a scalar, non-emptiness means True and False + /// otherwise. + /// - body: A function that takes a list of tensors and returns another + /// list of tensors. Both lists have the same types as specified by T. + /// + /// - Output output: A list of output tensors whose types are T. + @inlinable @inline(__always) + public static func xlaWhile< + T: TensorArrayProtocol, + CondIn: TensorGroup, + CondOut: TensorGroup, + BodyIn: TensorGroup, + BodyOut: TensorGroup + >( + _ input: T, + cond: (CondIn) -> CondOut, + body: (BodyIn) -> BodyOut + ) -> T { + _RawTFEager.xlaWhile(input, cond: cond, body: body) + } + + /// Returns 0 if x == 0, and x * log(y) otherwise, elementwise. + @inlinable @inline(__always) + public static func xlogy( + _ x: Tensor, + _ y: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, y.handle.backend) { + case .XLA: + let output_device = y.device + let x = Tensor(copying: x, to: .defaultTFEager) + let y = Tensor(copying: y, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.xlogy(x, y), to: output_device) + case .TF_EAGER: + return _RawTFEager.xlogy(x, y) } - @inlinable @inline(__always) - public static func writeRawProtoSummary( - writer: ResourceHandle, - step: Tensor, - _ tensor: StringTensor - ) { - _RawTFEager.writeRawProtoSummary(writer: writer, step: step, tensor) + } + + /// Returns a tensor of zeros with the same shape and type as x. + /// + /// - Parameter x: a tensor of type T. + /// + /// - Output y: a tensor of the same shape and type as x but filled with zeros. + @inlinable @inline(__always) + public static func zerosLike( + _ x: Tensor + ) -> Tensor { + switch x.handle.backend { + case .XLA: + return _RawXLA.zerosLike(x) + case .TF_EAGER: + return _RawTFEager.zerosLike(x) } - @inlinable @inline(__always) - public static func writeScalarSummary( - writer: ResourceHandle, - step: Tensor, - tag: StringTensor, - value: Tensor - ) { - _RawTFEager.writeScalarSummary(writer: writer, step: step, tag: tag, value: value) + } + + /// Compute the Hurwitz zeta function \\(\zeta(x, q)\\). + /// + /// The Hurwitz zeta function is defined as: + /// + /// + /// \\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\\) + @inlinable @inline(__always) + public static func zeta( + _ x: Tensor, + q: Tensor + ) -> Tensor { + switch commonBackend(x.handle.backend, q.handle.backend) { + case .XLA: + let output_device = q.device + let x = Tensor(copying: x, to: .defaultTFEager) + let q = Tensor(copying: q, to: .defaultTFEager) + return Tensor(copying: _RawTFEager.zeta(x, q: q), to: output_device) + case .TF_EAGER: + return _RawTFEager.zeta(x, q: q) } - @inlinable @inline(__always) - public static func writeSummary( - writer: ResourceHandle, - step: Tensor, - _ tensor: Tensor, - tag: StringTensor, - summaryMetadata: StringTensor - ) { - _RawTFEager.writeSummary( - writer: writer, step: step, tensor, tag: tag, summaryMetadata: summaryMetadata) - } - - /// Returns 0 if x == 0, and x / y otherwise, elementwise. - @inlinable @inline(__always) - public static func xdivy( - _ x: Tensor, - _ y: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - return _RawXLA.xdivy(x, y) - case .TF_EAGER: - return _RawTFEager.xdivy(x, y) - } - - } - - /// Helper operator for performing XLA-style broadcasts - /// - /// Broadcasts `lhs` and `rhs` to the same rank, by adding size 1 dimensions to - /// whichever of `lhs` and `rhs` has the lower rank, using XLA's broadcasting rules - /// for binary operators. - /// - /// - Parameters: - /// - lhs: the LHS input tensor - /// - rhs: the RHS input tensor - /// - broadcast_dims: an XLA-style broadcast dimension specification - /// - /// - Outputs: - /// - lhs_output: the broadcasted LHS tensor - /// - rhs_output: the broadcasted RHS tensor - @inlinable @inline(__always) - public static func xlaBroadcastHelper< - T: TensorFlowNumeric, - Tindices: TensorFlowIndex - >( - lhs: Tensor, - rhs: Tensor, - broadcastDims: Tensor - ) -> (lhsOutput: Tensor, rhsOutput: Tensor) { - _RawTFEager.xlaBroadcastHelper(lhs: lhs, rhs: rhs, broadcastDims: broadcastDims) - } - - /// Operator that connects the output of an XLA computation to other consumer graph nodes. - @inlinable @inline(__always) - public static func xlaClusterOutput( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.xlaClusterOutput(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.xlaClusterOutput(input) - } - - } - - /// Wraps the XLA ConvGeneralDilated operator, documented at - /// - /// https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution - /// . - /// - /// - Parameters: - /// - lhs: the input tensor - /// - rhs: the kernel tensor - /// - window_strides: the inter-window strides - /// - padding: the padding to apply at the start and end of each input dimensions - /// - lhs_dilation: dilation to apply between input elements - /// - rhs_dilation: dilation to apply between kernel elements - /// - feature_group_count: number of feature groups for grouped convolution. - /// - /// - Attrs: - /// - dimension_numbers: a serialized xla::ConvolutionDimensionNumbers proto. - /// - precision_config: a serialized xla::PrecisionConfig proto. - @inlinable @inline(__always) - public static func xlaConv< - T: TensorFlowNumeric, - Tindices: TensorFlowIndex - >( - lhs: Tensor, - rhs: Tensor, - windowStrides: Tensor, - padding: Tensor, - lhsDilation: Tensor, - rhsDilation: Tensor, - featureGroupCount: Tensor, - dimensionNumbers: String, - precisionConfig: String - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend( - commonBackend( - commonBackend( - commonBackend(lhs.handle.backend, rhs.handle.backend), windowStrides.handle.backend), - padding.handle.backend), lhsDilation.handle.backend), rhsDilation.handle.backend), - featureGroupCount.handle.backend) - { - case .XLA: - let output_device = featureGroupCount.device - let lhs = Tensor(copying: lhs, to: .defaultTFEager) - let rhs = Tensor(copying: rhs, to: .defaultTFEager) - let windowStrides = Tensor(copying: windowStrides, to: .defaultTFEager) - let padding = Tensor(copying: padding, to: .defaultTFEager) - let lhsDilation = Tensor(copying: lhsDilation, to: .defaultTFEager) - let rhsDilation = Tensor(copying: rhsDilation, to: .defaultTFEager) - let featureGroupCount = Tensor(copying: featureGroupCount, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.xlaConv( - lhs: lhs, rhs: rhs, windowStrides: windowStrides, padding: padding, - lhsDilation: lhsDilation, rhsDilation: rhsDilation, - featureGroupCount: featureGroupCount, dimensionNumbers: dimensionNumbers, - precisionConfig: precisionConfig), to: output_device) - case .TF_EAGER: - return _RawTFEager.xlaConv( - lhs: lhs, rhs: rhs, windowStrides: windowStrides, padding: padding, - lhsDilation: lhsDilation, rhsDilation: rhsDilation, featureGroupCount: featureGroupCount, - dimensionNumbers: dimensionNumbers, precisionConfig: precisionConfig) - } - - } - - /// Wraps the XLA DotGeneral operator, documented at - /// - /// https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral - /// . - /// - /// - Parameters: - /// - lhs: the LHS tensor - /// - rhs: the RHS tensor - /// - /// - Attrs: - /// - dimension_numbers: a serialized xla::DotDimensionNumbers proto. - /// - precision_config: a serialized xla::PrecisionConfig proto. - @inlinable @inline(__always) - public static func xlaDot( - lhs: Tensor, - rhs: Tensor, - dimensionNumbers: String, - precisionConfig: String - ) -> Tensor { - switch commonBackend(lhs.handle.backend, rhs.handle.backend) { - case .XLA: - let output_device = rhs.device - let lhs = Tensor(copying: lhs, to: .defaultTFEager) - let rhs = Tensor(copying: rhs, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.xlaDot( - lhs: lhs, rhs: rhs, dimensionNumbers: dimensionNumbers, precisionConfig: precisionConfig - ), to: output_device) - case .TF_EAGER: - return _RawTFEager.xlaDot( - lhs: lhs, rhs: rhs, dimensionNumbers: dimensionNumbers, precisionConfig: precisionConfig) - } - - } - - /// Wraps the XLA DynamicSlice operator, documented at - /// - /// https://www.tensorflow.org/performance/xla/operation_semantics#dynamicslice - /// . - /// - /// DynamicSlice extracts a sub-array from the input array at dynamic - /// start_indices. The size of the slice in each dimension is passed in - /// size_indices, which specify the end point of exclusive slice intervals in each - /// dimension -- [start, start + size). The shape of start_indices must have rank 1, - /// with dimension size equal to the rank of operand. - /// - /// - Parameters: - /// - input: A `Tensor` of type T. - /// - start_indices: List of N integers containing the slice size for each - /// dimension. Each value must be strictly greater than zero, and start + size - /// must be less than or equal to the size of the dimension to avoid - /// implementation defined behavior. - @inlinable @inline(__always) - public static func xlaDynamicSlice< - T: TensorFlowScalar, - Tindices: TensorFlowIndex - >( - _ input: Tensor, - startIndices: Tensor, - sizeIndices: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(input.handle.backend, startIndices.handle.backend), sizeIndices.handle.backend - ) { - case .XLA: - let output_device = sizeIndices.device - let input = Tensor(copying: input, to: .defaultTFEager) - let startIndices = Tensor(copying: startIndices, to: .defaultTFEager) - let sizeIndices = Tensor(copying: sizeIndices, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.xlaDynamicSlice( - input, startIndices: startIndices, sizeIndices: sizeIndices), to: output_device) - case .TF_EAGER: - return _RawTFEager.xlaDynamicSlice( - input, startIndices: startIndices, sizeIndices: sizeIndices) - } - - } - - /// Wraps the XLA DynamicUpdateSlice operator, documented at - /// - /// https://www.tensorflow.org/performance/xla/operation_semantics#dynamicupdateslice - /// . - /// - /// XlaDynamicUpdateSlice generates a result which is the value of the `input` - /// operand, with a slice update overwritten at `indices`. The shape of `update` - /// determines the shape of the sub-array of the result which is updated. The shape - /// of indices must be rank == 1, with dimension size equal to the rank of `input`. - /// - /// Handling of out-of-bounds slice indices is implementation-defined. - /// - /// - Parameters: - /// - input: A `Tensor` of type T. - /// - update: A `Tensor` of type T. Same rank as `input`. - /// - indices: A vector of indices into `input`. Must have length equal to the rank of - /// `input`. - /// - /// - Output output: A `Tensor` of type T. - @inlinable @inline(__always) - public static func xlaDynamicUpdateSlice< - T: TensorFlowScalar, - Tindices: TensorFlowIndex - >( - _ input: Tensor, - update: Tensor, - indices: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend(input.handle.backend, update.handle.backend), indices.handle.backend) - { - case .XLA: - let output_device = indices.device - let input = Tensor(copying: input, to: .defaultTFEager) - let update = Tensor(copying: update, to: .defaultTFEager) - let indices = Tensor(copying: indices, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.xlaDynamicUpdateSlice(input, update: update, indices: indices), - to: output_device) - case .TF_EAGER: - return _RawTFEager.xlaDynamicUpdateSlice(input, update: update, indices: indices) - } - - } - - /// An op which supports basic einsum op with 2 inputs and 1 output. - /// - /// This op has better TPU performnce since it doesn't have explicitly reshape and - /// transpose operations as tf.einsum does. - @inlinable @inline(__always) - public static func xlaEinsum( - _ a: Tensor, - _ b: Tensor, - equation: String - ) -> Tensor { - switch commonBackend(a.handle.backend, b.handle.backend) { - case .XLA: - let output_device = b.device - let a = Tensor(copying: a, to: .defaultTFEager) - let b = Tensor(copying: b, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.xlaEinsum(a, b, equation: equation), to: output_device) - case .TF_EAGER: - return _RawTFEager.xlaEinsum(a, b, equation: equation) - } - - } - - /// output = cond ? then_branch(inputs) : else_branch(inputs). - /// - /// - Parameters: - /// - cond: A boolean scalar. - /// - inputs: A list of input tensors. - /// - /// - Attrs: - /// - then_branch: A function takes 'inputs' and returns a list of tensors, - /// whose types are the same as what else_branch returns. - /// - else_branch: A function takes 'inputs' and returns a list of tensors. - /// whose types are the same as what then_branch returns. - /// - /// - Output output: A list of tensors returned by either then_branch(inputs) or - /// else_branch(inputs). The input shapes of the then_branch and - /// else_branch must match. - @inlinable @inline(__always) - public static func xlaIf< - Tcond: TensorFlowScalar, - ThenbranchIn: TensorGroup, - ThenbranchOut: TensorGroup, - ElsebranchIn: TensorGroup, - ElsebranchOut: TensorGroup, - Tin: TensorArrayProtocol, - Tout: TensorGroup - >( - cond: Tensor, - inputs: Tin, - thenBranch: (ThenbranchIn) -> ThenbranchOut, - elseBranch: (ElsebranchIn) -> ElsebranchOut - ) -> Tout { - _RawTFEager.xlaIf(cond: cond, inputs: inputs, thenBranch: thenBranch, elseBranch: elseBranch) - } - - /// Wraps the XLA Sort operator, documented at - /// - /// https://www.tensorflow.org/performance/xla/operation_semantics#sort - /// . - /// - /// Sorts a tensor. Currently only sorts in ascending order are supported. - /// - /// - Parameters: - /// - keys: A `Tensor` of type K. - /// - values: A `Tensor` of type V. - /// - /// - Outputs: - /// - sorted_keys: A `Tensor` of type K. - /// - sorted_values: A `Tensor` of type V. - @inlinable @inline(__always) - public static func xlaKeyValueSort< - K: TensorFlowNumeric, - V: TensorFlowScalar - >( - keys: Tensor, - _ values: Tensor - ) -> (sortedKeys: Tensor, sortedValues: Tensor) { - _RawTFEager.xlaKeyValueSort(keys: keys, values) - } - - /// XLA Launch Op. For use by the XLA JIT only. - @inlinable @inline(__always) - public static func xlaLaunch< - Tconstants: TensorArrayProtocol, - Targs: TensorArrayProtocol, - Tresults: TensorGroup, - FunctionIn: TensorGroup, - FunctionOut: TensorGroup - >( - constants: Tconstants, - args: Targs, - resources: [ResourceHandle], - function: (FunctionIn) -> FunctionOut - ) -> Tresults { - _RawTFEager.xlaLaunch( - constants: constants, args: args, resources: resources, function: function) - } - - /// Wraps the XLA Pad operator, documented at - /// - /// https://www.tensorflow.org/performance/xla/operation_semantics#pad - /// . - /// - /// - Parameters: - /// - input: A `Tensor` of type T. - /// - padding_value: A scalar `Tensor` of type T. - /// - padding_low: the padding to apply at the start of each input dimensions - /// - padding_high: the padding to apply at the end of each input dimension. - /// - padding_interior: the padding to apply between each input element. - /// - /// - Output output: A `Tensor` of type T. - @inlinable @inline(__always) - public static func xlaPad< - T: TensorFlowScalar, - Tindices: TensorFlowIndex - >( - _ input: Tensor, - paddingValue: Tensor, - paddingLow: Tensor, - paddingHigh: Tensor, - paddingInterior: Tensor - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend( - commonBackend(input.handle.backend, paddingValue.handle.backend), - paddingLow.handle.backend), paddingHigh.handle.backend), paddingInterior.handle.backend) - { - case .XLA: - let output_device = paddingInterior.device - let input = Tensor(copying: input, to: .defaultTFEager) - let paddingValue = Tensor(copying: paddingValue, to: .defaultTFEager) - let paddingLow = Tensor(copying: paddingLow, to: .defaultTFEager) - let paddingHigh = Tensor(copying: paddingHigh, to: .defaultTFEager) - let paddingInterior = Tensor(copying: paddingInterior, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.xlaPad( - input, paddingValue: paddingValue, paddingLow: paddingLow, paddingHigh: paddingHigh, - paddingInterior: paddingInterior), to: output_device) - case .TF_EAGER: - return _RawTFEager.xlaPad( - input, paddingValue: paddingValue, paddingLow: paddingLow, paddingHigh: paddingHigh, - paddingInterior: paddingInterior) - } - - } - - /// Receives the named tensor from another XLA computation. Wraps the XLA Recv - /// - /// operator documented at - /// https://www.tensorflow.org/performance/xla/operation_semantics#recv . - /// - /// - Attrs: - /// - dtype: The type of the tensor. - /// - tensor_name: A string key that identifies the channel. - /// - shape: The shape of the tensor. - /// - /// - Output tensor: The tensor to receive. - @inlinable @inline(__always) - public static func xlaRecv( - tensorName: String, - shape: TensorShape? - ) -> Tensor { - _RawTFEager.xlaRecv(tensorName: tensorName, shape: shape) - } - - /// Wraps the XLA Reduce operator, documented at - /// - /// https://www.tensorflow.org/performance/xla/operation_semantics#reduce . - /// - /// - Parameters: - /// - input: the input tensor - /// - init_value: a scalar representing the initial value for the reduction - /// - /// - Attrs: - /// - dimensions_to_reduce: dimension numbers over which to reduce - /// - reducer: a reducer function to apply - @inlinable @inline(__always) - public static func xlaReduce< - T: TensorFlowNumeric, - ReducerIn: TensorGroup, - ReducerOut: TensorGroup - >( - _ input: Tensor, - initValue: Tensor, - dimensionsToReduce: [Int32], - reducer: (ReducerIn) -> ReducerOut - ) -> Tensor { - switch commonBackend(input.handle.backend, initValue.handle.backend) { - case .XLA: - let output_device = initValue.device - let input = Tensor(copying: input, to: .defaultTFEager) - let initValue = Tensor(copying: initValue, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.xlaReduce( - input, initValue: initValue, dimensionsToReduce: dimensionsToReduce, reducer: reducer), - to: output_device) - case .TF_EAGER: - return _RawTFEager.xlaReduce( - input, initValue: initValue, dimensionsToReduce: dimensionsToReduce, reducer: reducer) - } - - } - - /// Wraps the XLA ReduceWindow operator, documented at - /// - /// https://www.tensorflow.org/performance/xla/operation_semantics#reducewindow . - /// - /// - Parameters: - /// - input: the input tensor - /// - init_value: a scalar representing the initial value for the reduction - /// - window_dimensions: the shape of the window - /// - window_strides: the inter-window strides - /// - padding: the padding to apply at the start and end of each input dimensions - /// - /// - Attr computation: a reducer function to apply - @inlinable @inline(__always) - public static func xlaReduceWindow< - T: TensorFlowNumeric, - Tindices: TensorFlowIndex, - ComputationIn: TensorGroup, - ComputationOut: TensorGroup - >( - _ input: Tensor, - initValue: Tensor, - windowDimensions: Tensor, - windowStrides: Tensor, - baseDilations: Tensor, - windowDilations: Tensor, - padding: Tensor, - computation: (ComputationIn) -> ComputationOut - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend( - commonBackend( - commonBackend( - commonBackend(input.handle.backend, initValue.handle.backend), - windowDimensions.handle.backend), windowStrides.handle.backend), - baseDilations.handle.backend), windowDilations.handle.backend), padding.handle.backend) - { - case .XLA: - let output_device = padding.device - let input = Tensor(copying: input, to: .defaultTFEager) - let initValue = Tensor(copying: initValue, to: .defaultTFEager) - let windowDimensions = Tensor(copying: windowDimensions, to: .defaultTFEager) - let windowStrides = Tensor(copying: windowStrides, to: .defaultTFEager) - let baseDilations = Tensor(copying: baseDilations, to: .defaultTFEager) - let windowDilations = Tensor(copying: windowDilations, to: .defaultTFEager) - let padding = Tensor(copying: padding, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.xlaReduceWindow( - input, initValue: initValue, windowDimensions: windowDimensions, - windowStrides: windowStrides, baseDilations: baseDilations, - windowDilations: windowDilations, padding: padding, computation: computation), - to: output_device) - case .TF_EAGER: - return _RawTFEager.xlaReduceWindow( - input, initValue: initValue, windowDimensions: windowDimensions, - windowStrides: windowStrides, baseDilations: baseDilations, - windowDilations: windowDilations, padding: padding, computation: computation) - } - - } - - /// Replica ID. - @inlinable @inline(__always) - public static func xlaReplicaId() -> Tensor { - _RawTFEager.xlaReplicaId() - } - - /// Wraps the XLA SelectAndScatter operator, documented at - /// - /// https://www.tensorflow.org/performance/xla/operation_semantics#selectandscatter - /// . - /// - /// - Parameters: - /// - operand: the input tensor - /// - window_dimensions: the shape of the window - /// - window_strides: the inter-window strides - /// - padding: the padding to apply at the start and end of each input dimensions - /// - source: a tensor of values to scatter - /// - init_value: a scalar representing the initial value for the output tensor - /// - /// - Attrs: - /// - select: a selection function to apply - /// - scatter: a scatter function to apply - @inlinable @inline(__always) - public static func xlaSelectAndScatter< - T: TensorFlowNumeric, - Tindices: TensorFlowIndex, - SelectIn: TensorGroup, - SelectOut: TensorGroup, - ScatterIn: TensorGroup, - ScatterOut: TensorGroup - >( - operand: Tensor, - windowDimensions: Tensor, - windowStrides: Tensor, - padding: Tensor, - source: Tensor, - initValue: Tensor, - select: (SelectIn) -> SelectOut, - scatter: (ScatterIn) -> ScatterOut - ) -> Tensor { - switch commonBackend( - commonBackend( - commonBackend( - commonBackend( - commonBackend(operand.handle.backend, windowDimensions.handle.backend), - windowStrides.handle.backend), padding.handle.backend), source.handle.backend), - initValue.handle.backend) - { - case .XLA: - let output_device = initValue.device - let operand = Tensor(copying: operand, to: .defaultTFEager) - let windowDimensions = Tensor(copying: windowDimensions, to: .defaultTFEager) - let windowStrides = Tensor(copying: windowStrides, to: .defaultTFEager) - let padding = Tensor(copying: padding, to: .defaultTFEager) - let source = Tensor(copying: source, to: .defaultTFEager) - let initValue = Tensor(copying: initValue, to: .defaultTFEager) - return Tensor( - copying: _RawTFEager.xlaSelectAndScatter( - operand: operand, windowDimensions: windowDimensions, windowStrides: windowStrides, - padding: padding, source: source, initValue: initValue, select: select, scatter: scatter - ), to: output_device) - case .TF_EAGER: - return _RawTFEager.xlaSelectAndScatter( - operand: operand, windowDimensions: windowDimensions, windowStrides: windowStrides, - padding: padding, source: source, initValue: initValue, select: select, scatter: scatter) - } - - } - - /// Computes the eigen decomposition of a batch of self-adjoint matrices - /// - /// (Note: Only real inputs are supported). - /// - /// Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices in - /// tensor such that tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i], for - /// i=0...N-1. - /// - /// - Parameter a: the input tensor. - /// - /// - Attrs: - /// - lower: a boolean specifies whether the calculation is done with the lower - /// triangular part or the upper triangular part. - /// - max_iter: maximum number of sweep update, i.e., the whole lower triangular - /// part or upper triangular part based on parameter lower. Heuristically, it has - /// been argued that approximatly logN sweeps are needed in practice (Ref: Golub & - /// van Loan "Matrix Computation"). - /// - epsilon: the tolerance ratio. - /// - /// - Outputs: - /// - w: The eigenvalues in ascending order, each repeated according to its - /// multiplicity. - /// - v: The column v[..., :, i] is the normalized eigenvector corresponding to the - /// eigenvalue w[..., i]. - @inlinable @inline(__always) - public static func xlaSelfAdjointEig( - _ a: Tensor, - lower: Bool, - maxIter: Int64, - epsilon: Double - ) -> (w: Tensor, v: Tensor) { - _RawTFEager.xlaSelfAdjointEig(a, lower: lower, maxIter: maxIter, epsilon: epsilon) - } - - /// Sends the named tensor to another XLA computation. Wraps the XLA Send operator - /// - /// documented at - /// https://www.tensorflow.org/performance/xla/operation_semantics#send . - /// - /// - Parameter tensor: The tensor to send. - /// - /// - Attr tensor_name: A string key that identifies the channel. - @inlinable @inline(__always) - public static func xlaSend( - _ tensor: Tensor, - tensorName: String - ) { - _RawTFEager.xlaSend(tensor, tensorName: tensorName) - } - - /// An op which shards the input based on the given sharding attribute. - @inlinable @inline(__always) - public static func xlaSharding( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.xlaSharding(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.xlaSharding(input) - } - - } - - /// Wraps the XLA Sort operator, documented at - /// - /// https://www.tensorflow.org/performance/xla/operation_semantics#sort - /// . - /// - /// Sorts a tensor. Currently only sorts in ascending order are supported. - /// - /// - Parameter input: A `Tensor` of type T. - /// - /// - Output output: A `Tensor` of type T. - @inlinable @inline(__always) - public static func xlaSort( - _ input: Tensor - ) -> Tensor { - switch input.handle.backend { - case .XLA: - let output_device = input.device - let input = Tensor(copying: input, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.xlaSort(input), to: output_device) - case .TF_EAGER: - return _RawTFEager.xlaSort(input) - } - - } - - /// Computes the eigen decomposition of a batch of self-adjoint matrices - /// - /// (Note: Only real inputs are supported). - /// - /// Computes the eigenvalues and eigenvectors of the innermost M-by-N matrices in - /// tensor such that tensor[...,:,:] = u[..., :, :] * Diag(s[..., :]) * Transpose(v[...,:,:]). - /// - /// - Parameter a: the input tensor. - /// - /// - Attrs: - /// - max_iter: maximum number of sweep update, i.e., the whole lower triangular - /// part or upper triangular part based on parameter lower. Heuristically, it has - /// been argued that approximatly log(min (M, N)) sweeps are needed in practice - /// (Ref: Golub & van Loan "Matrix Computation"). - /// - epsilon: the tolerance ratio. - /// - precision_config: a serialized xla::PrecisionConfig proto. - /// - /// - Outputs: - /// - s: Singular values. The values are sorted in reverse order of magnitude, so - /// s[..., 0] is the largest value, s[..., 1] is the second largest, etc. - /// - u: Left singular vectors. - /// - v: Right singular vectors. - @inlinable @inline(__always) - public static func xlaSvd( - _ a: Tensor, - maxIter: Int64, - epsilon: Double, - precisionConfig: String - ) -> (s: Tensor, u: Tensor, v: Tensor) { - _RawTFEager.xlaSvd(a, maxIter: maxIter, epsilon: epsilon, precisionConfig: precisionConfig) - } - - /// output = input; While (Cond(output)) { output = Body(output) } - /// - /// - Parameter input: A list of input tensors whose types are T. - /// - /// - Attrs: - /// - cond: A function takes 'input' and returns a tensor. If the tensor is - /// a scalar of non-boolean, the scalar is converted to a boolean - /// according to the following rule: if the scalar is a numerical - /// value, non-zero means True and zero means False; if the scalar is - /// a string, non-empty means True and empty means False. If the - /// tensor is not a scalar, non-emptiness means True and False - /// otherwise. - /// - body: A function that takes a list of tensors and returns another - /// list of tensors. Both lists have the same types as specified by T. - /// - /// - Output output: A list of output tensors whose types are T. - @inlinable @inline(__always) - public static func xlaWhile< - T: TensorArrayProtocol, - CondIn: TensorGroup, - CondOut: TensorGroup, - BodyIn: TensorGroup, - BodyOut: TensorGroup - >( - _ input: T, - cond: (CondIn) -> CondOut, - body: (BodyIn) -> BodyOut - ) -> T { - _RawTFEager.xlaWhile(input, cond: cond, body: body) - } - - /// Returns 0 if x == 0, and x * log(y) otherwise, elementwise. - @inlinable @inline(__always) - public static func xlogy( - _ x: Tensor, - _ y: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, y.handle.backend) { - case .XLA: - let output_device = y.device - let x = Tensor(copying: x, to: .defaultTFEager) - let y = Tensor(copying: y, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.xlogy(x, y), to: output_device) - case .TF_EAGER: - return _RawTFEager.xlogy(x, y) - } - - } - - /// Returns a tensor of zeros with the same shape and type as x. - /// - /// - Parameter x: a tensor of type T. - /// - /// - Output y: a tensor of the same shape and type as x but filled with zeros. - @inlinable @inline(__always) - public static func zerosLike( - _ x: Tensor - ) -> Tensor { - switch x.handle.backend { - case .XLA: - return _RawXLA.zerosLike(x) - case .TF_EAGER: - return _RawTFEager.zerosLike(x) - } - - } - - /// Compute the Hurwitz zeta function \\(\zeta(x, q)\\). - /// - /// The Hurwitz zeta function is defined as: - /// - /// - /// \\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\\) - @inlinable @inline(__always) - public static func zeta( - _ x: Tensor, - q: Tensor - ) -> Tensor { - switch commonBackend(x.handle.backend, q.handle.backend) { - case .XLA: - let output_device = q.device - let x = Tensor(copying: x, to: .defaultTFEager) - let q = Tensor(copying: q, to: .defaultTFEager) - return Tensor(copying: _RawTFEager.zeta(x, q: q), to: output_device) - case .TF_EAGER: - return _RawTFEager.zeta(x, q: q) - } - - } - - /// Creates a dataset that zips together `input_datasets`. - /// - /// The elements of the resulting dataset are created by zipping corresponding - /// elements from each of the input datasets. - /// - /// The size of the resulting dataset will match the size of the smallest input - /// dataset, and no error will be raised if input datasets have different sizes. - /// - /// - Parameter input_datasets: List of `N` variant Tensors representing datasets to be zipped together. - /// - /// - Attr N: The length of `input_datasets` - @inlinable @inline(__always) - public static func zipDataset( - inputDatasets: [VariantHandle], - outputTypes: [TensorDataType], - outputShapes: [TensorShape?] - ) -> VariantHandle { - _RawTFEager.zipDataset( - inputDatasets: inputDatasets, outputTypes: outputTypes, outputShapes: outputShapes) - } - - } -#else - public typealias _Raw = _RawTFEager -#endif + } + + /// Creates a dataset that zips together `input_datasets`. + /// + /// The elements of the resulting dataset are created by zipping corresponding + /// elements from each of the input datasets. + /// + /// The size of the resulting dataset will match the size of the smallest input + /// dataset, and no error will be raised if input datasets have different sizes. + /// + /// - Parameter input_datasets: List of `N` variant Tensors representing datasets to be zipped together. + /// + /// - Attr N: The length of `input_datasets` + @inlinable @inline(__always) + public static func zipDataset( + inputDatasets: [VariantHandle], + outputTypes: [TensorDataType], + outputShapes: [TensorShape?] + ) -> VariantHandle { + _RawTFEager.zipDataset( + inputDatasets: inputDatasets, outputTypes: outputTypes, outputShapes: outputShapes) + } + +} diff --git a/Sources/TensorFlow/Bindings/generate_wrappers.py b/Sources/TensorFlow/Bindings/generate_wrappers.py index 3218b1fb0..dced84e76 100644 --- a/Sources/TensorFlow/Bindings/generate_wrappers.py +++ b/Sources/TensorFlow/Bindings/generate_wrappers.py @@ -74,11 +74,7 @@ ) public typealias Raw = _Raw -#if USING_X10_BACKEND {raw_dispatching_enum} -#else -public typealias _Raw = _RawTFEager -#endif ''' _OUTPUT_FILE = 'RawOpsGenerated.swift' diff --git a/Sources/TensorFlow/CMakeLists.txt b/Sources/TensorFlow/CMakeLists.txt index 17a582969..da6166fa2 100644 --- a/Sources/TensorFlow/CMakeLists.txt +++ b/Sources/TensorFlow/CMakeLists.txt @@ -88,7 +88,6 @@ if(ENABLE_PYTHON_SUPPORT) Core/PythonConversion.swift) endif() target_compile_definitions(TensorFlow PRIVATE - USING_X10_BACKEND $<$:TENSORFLOW_USE_STANDARD_TOOLCHAIN> DEFAULT_BACKEND_EAGER) target_link_libraries(TensorFlow PRIVATE diff --git a/Sources/TensorFlow/Core/DataTypes.swift b/Sources/TensorFlow/Core/DataTypes.swift index 6e4436fbc..790c3563b 100644 --- a/Sources/TensorFlow/Core/DataTypes.swift +++ b/Sources/TensorFlow/Core/DataTypes.swift @@ -68,11 +68,7 @@ public protocol _TensorFlowDataTypeCompatible { /// `Tensor`. // // This includes all `_TensorFlowDataTypeCompatible` types except `String`. -#if USING_X10_BACKEND - public protocol TensorFlowScalar: XLAScalarType & _TensorFlowDataTypeCompatible {} -#else - public protocol TensorFlowScalar: _TensorFlowDataTypeCompatible {} -#endif +public protocol TensorFlowScalar: XLAScalarType & _TensorFlowDataTypeCompatible {} public typealias TensorFlowNumeric = TensorFlowScalar & Numeric public typealias TensorFlowSignedNumeric = TensorFlowScalar & SignedNumeric diff --git a/Sources/TensorFlow/Core/MixedPrecision.swift b/Sources/TensorFlow/Core/MixedPrecision.swift index cb44733b2..97d5d98ff 100644 --- a/Sources/TensorFlow/Core/MixedPrecision.swift +++ b/Sources/TensorFlow/Core/MixedPrecision.swift @@ -14,148 +14,141 @@ import _Differentiation -#if USING_X10_BACKEND - @_implementationOnly import x10_xla_tensor_wrapper +@_implementationOnly import x10_xla_tensor_wrapper + +/// A type whose nested floating-point tensor properties and elements can be converted from full +/// precision to reduced precision and vice versa. +/// +/// - Note: Do not ever use this API directly. This is an implementation detail to support +/// `KeyPathIterable.convertToReducedPrecision` and `KeyPathIterable.convertToFullPrecision`. +/// +/// - Note: this workaround is necessary because `ReducedPrecisionConvertible` is a protocol with +/// `Self` requirements, so `x as? ReducedPrecisionConvertible` does not work. +public protocol _ReducedPrecisionConvertible { + /// Given an `inout Root` root value and a `PartialKeyPath` key path, converts the value at + /// the key path in the root value to reduced precision. + static func _convertToReducedPrecision( + _ root: inout Root, _ rootKeyPath: PartialKeyPath) + + /// Given an `inout Root` root value and a `PartialKeyPath` key path, converts the value at + /// the key path in the root value to full precision. + static func _convertToFullPrecision( + _ root: inout Root, _ rootKeyPath: PartialKeyPath) +} + +/// A type whose nested floating-point tensor properties and elements can be converted from full +/// precision to reduced precision and vice versa. +public protocol ReducedPrecisionConvertible: _ReducedPrecisionConvertible { + /// Returns a copy of `self`, converting nested floating-point tensor properties and elements + /// from full precision to reduced precision. + var toReducedPrecision: Self { get } - /// A type whose nested floating-point tensor properties and elements can be converted from full - /// precision to reduced precision and vice versa. + /// Returns a copy of `self`, converting nested floating-point tensor properties and elements + /// from full precision to reduced precision. + var toFullPrecision: Self { get } +} + +extension ReducedPrecisionConvertible { + /// Given an `inout Root` root value and a `PartialKeyPath` key path, converts the physical + /// scalar type of the value at the key path in the root value to `BFloat16`. /// /// - Note: Do not ever use this API directly. This is an implementation detail to support /// `KeyPathIterable.convertToReducedPrecision` and `KeyPathIterable.convertToFullPrecision`. - /// - /// - Note: this workaround is necessary because `ReducedPrecisionConvertible` is a protocol with - /// `Self` requirements, so `x as? ReducedPrecisionConvertible` does not work. - public protocol _ReducedPrecisionConvertible { - /// Given an `inout Root` root value and a `PartialKeyPath` key path, converts the value at - /// the key path in the root value to reduced precision. - static func _convertToReducedPrecision( - _ root: inout Root, _ rootKeyPath: PartialKeyPath) - - /// Given an `inout Root` root value and a `PartialKeyPath` key path, converts the value at - /// the key path in the root value to full precision. - static func _convertToFullPrecision( - _ root: inout Root, _ rootKeyPath: PartialKeyPath) - } - - /// A type whose nested floating-point tensor properties and elements can be converted from full - /// precision to reduced precision and vice versa. - public protocol ReducedPrecisionConvertible: _ReducedPrecisionConvertible { - /// Returns a copy of `self`, converting nested floating-point tensor properties and elements - /// from full precision to reduced precision. - var toReducedPrecision: Self { get } - - /// Returns a copy of `self`, converting nested floating-point tensor properties and elements - /// from full precision to reduced precision. - var toFullPrecision: Self { get } - } - - extension ReducedPrecisionConvertible { - /// Given an `inout Root` root value and a `PartialKeyPath` key path, converts the physical - /// scalar type of the value at the key path in the root value to `BFloat16`. - /// - /// - Note: Do not ever use this API directly. This is an implementation detail to support - /// `KeyPathIterable.convertToReducedPrecision` and `KeyPathIterable.convertToFullPrecision`. - public static func _convertToReducedPrecision( - _ root: inout Root, _ rootKeyPath: PartialKeyPath - ) { - guard let keyPath = rootKeyPath as? WritableKeyPath else { - fatalError( - "Failed conversion from \(rootKeyPath) to 'WritableKeyPath<\(Root.self), \(Self.self)>'") - } - root[keyPath: keyPath] = root[keyPath: keyPath].toReducedPrecision + public static func _convertToReducedPrecision( + _ root: inout Root, _ rootKeyPath: PartialKeyPath + ) { + guard let keyPath = rootKeyPath as? WritableKeyPath else { + fatalError( + "Failed conversion from \(rootKeyPath) to 'WritableKeyPath<\(Root.self), \(Self.self)>'") } + root[keyPath: keyPath] = root[keyPath: keyPath].toReducedPrecision + } - /// Given an `inout Root` root value and a `PartialKeyPath` key path, converts the physical - /// scalar type of the value at the key path in the root value from `BFloat16` to a different - /// floating-point type. - /// - /// - Note: Do not ever use this API directly. This is an implementation detail to support - /// `KeyPathIterable.convertToReducedPrecision` and `KeyPathIterable.convertToFullPrecision`. - public static func _convertToFullPrecision( - _ root: inout Root, _ rootKeyPath: PartialKeyPath - ) { - guard let keyPath = rootKeyPath as? WritableKeyPath else { - fatalError( - "Failed conversion from \(rootKeyPath) to 'WritableKeyPath<\(Root.self), \(Self.self)>'") - } - root[keyPath: keyPath] = root[keyPath: keyPath].toFullPrecision + /// Given an `inout Root` root value and a `PartialKeyPath` key path, converts the physical + /// scalar type of the value at the key path in the root value from `BFloat16` to a different + /// floating-point type. + /// + /// - Note: Do not ever use this API directly. This is an implementation detail to support + /// `KeyPathIterable.convertToReducedPrecision` and `KeyPathIterable.convertToFullPrecision`. + public static func _convertToFullPrecision( + _ root: inout Root, _ rootKeyPath: PartialKeyPath + ) { + guard let keyPath = rootKeyPath as? WritableKeyPath else { + fatalError( + "Failed conversion from \(rootKeyPath) to 'WritableKeyPath<\(Root.self), \(Self.self)>'") } + root[keyPath: keyPath] = root[keyPath: keyPath].toFullPrecision } +} - extension _KeyPathIterableBase { - /// Recursively converts all `_ReducedPrecisionConvertible`-conforming nested properties and - /// elements in `root` to reduced precision. - public func _convertToReducedPrecision( - _ root: inout Root, _ rootKeyPath: PartialKeyPath - ) { - for kp in _allKeyPathsTypeErased { - let joinedKeyPath = rootKeyPath.appending(path: kp)! - if let valueType = type(of: joinedKeyPath).valueType as? _ReducedPrecisionConvertible.Type { - valueType._convertToReducedPrecision(&root, joinedKeyPath) - } else if let value = self[keyPath: kp], let nested = value as? _KeyPathIterableBase { - nested._convertToReducedPrecision(&root, joinedKeyPath) - } +extension _KeyPathIterableBase { + /// Recursively converts all `_ReducedPrecisionConvertible`-conforming nested properties and + /// elements in `root` to reduced precision. + public func _convertToReducedPrecision( + _ root: inout Root, _ rootKeyPath: PartialKeyPath + ) { + for kp in _allKeyPathsTypeErased { + let joinedKeyPath = rootKeyPath.appending(path: kp)! + if let valueType = type(of: joinedKeyPath).valueType as? _ReducedPrecisionConvertible.Type { + valueType._convertToReducedPrecision(&root, joinedKeyPath) + } else if let value = self[keyPath: kp], let nested = value as? _KeyPathIterableBase { + nested._convertToReducedPrecision(&root, joinedKeyPath) } } + } - /// Recursively converts all `_ReducedPrecisionConvertible`-conforming nested properties and - /// elements in `root` to full precision. - public func _convertToFullPrecision( - _ root: inout Root, _ rootKeyPath: PartialKeyPath - ) { - for kp in _allKeyPathsTypeErased { - let joinedKeyPath = rootKeyPath.appending(path: kp)! - if let valueType = type(of: joinedKeyPath).valueType as? _ReducedPrecisionConvertible.Type { - valueType._convertToFullPrecision(&root, joinedKeyPath) - } else if let value = self[keyPath: kp], let nested = value as? _KeyPathIterableBase { - nested._convertToFullPrecision(&root, joinedKeyPath) - } + /// Recursively converts all `_ReducedPrecisionConvertible`-conforming nested properties and + /// elements in `root` to full precision. + public func _convertToFullPrecision( + _ root: inout Root, _ rootKeyPath: PartialKeyPath + ) { + for kp in _allKeyPathsTypeErased { + let joinedKeyPath = rootKeyPath.appending(path: kp)! + if let valueType = type(of: joinedKeyPath).valueType as? _ReducedPrecisionConvertible.Type { + valueType._convertToFullPrecision(&root, joinedKeyPath) + } else if let value = self[keyPath: kp], let nested = value as? _KeyPathIterableBase { + nested._convertToFullPrecision(&root, joinedKeyPath) } } } +} - extension KeyPathIterable { - /// Recursively converts all `_ReducedPrecisionConvertible`-conforming nested properties and elements - /// to reduced precision. - public mutating func convertToReducedPrecision() { - _convertToReducedPrecision(&self, \.self) - } +extension KeyPathIterable { + /// Recursively converts all `_ReducedPrecisionConvertible`-conforming nested properties and elements + /// to reduced precision. + public mutating func convertToReducedPrecision() { + _convertToReducedPrecision(&self, \.self) + } - /// Recursively converts all `_ReducedPrecisionConvertible`-conforming nested properties and elements - /// to full precision. - public mutating func convertToFullPrecision() { - _convertToFullPrecision(&self, \.self) - } + /// Recursively converts all `_ReducedPrecisionConvertible`-conforming nested properties and elements + /// to full precision. + public mutating func convertToFullPrecision() { + _convertToFullPrecision(&self, \.self) + } - /// Returns a copy of `self`, converting all `_ReducedPrecisionConvertible`-conforming nested - /// properties and elements to reduced precision. - public var toReducedPrecision: Self { - var result = self - result.convertToReducedPrecision() - return result - } + /// Returns a copy of `self`, converting all `_ReducedPrecisionConvertible`-conforming nested + /// properties and elements to reduced precision. + public var toReducedPrecision: Self { + var result = self + result.convertToReducedPrecision() + return result + } - /// Returns a copy of `self`, converting all `_ReducedPrecisionConvertible`-conforming nested - /// properties and elements to full precision. - public var toFullPrecision: Self { - var result = self - result.convertToFullPrecision() - return result - } + /// Returns a copy of `self`, converting all `_ReducedPrecisionConvertible`-conforming nested + /// properties and elements to full precision. + public var toFullPrecision: Self { + var result = self + result.convertToFullPrecision() + return result } -#endif +} extension Tensor { /// Returns true if the physical scalar type is reduced precision. /// /// Currently, reduced precision physical scalar types include only `BFloat16`. public var isReducedPrecision: Bool { - #if USING_X10_BACKEND - return device.backend == .XLA && xlaTensor.physicalScalarType == XLATensorScalarType_BFloat16 - #else - // TODO: Implement. - return false - #endif + return device.backend == .XLA && xlaTensor.physicalScalarType == XLATensorScalarType_BFloat16 } /// Promotes a scalar to a tensor with the same device and precision as the given tensor. @@ -188,45 +181,29 @@ extension Tensor where Scalar: TensorFlowFloatingPoint { } } -#if USING_X10_BACKEND - extension Tensor: ReducedPrecisionConvertible, _ReducedPrecisionConvertible { - /// Returns a copy of `self` converted to `BFloat16` physical scalar type. - public var toReducedPrecision: Self { - if isReducedPrecision { - fatalError("Must not already have reduced precision") - } - if Scalar.self != Float.self { - fatalError("Reduced precision is only supported for Float tensors") - } - return _Raw.physicalCast(self, destType: BFloat16.self) +extension Tensor: ReducedPrecisionConvertible, _ReducedPrecisionConvertible { + /// Returns a copy of `self` converted to `BFloat16` physical scalar type. + public var toReducedPrecision: Self { + if isReducedPrecision { + fatalError("Must not already have reduced precision") } - - /// Returns a copy of `self` converted to `Scalar` physical scalar type. - public var toFullPrecision: Self { - if !isReducedPrecision { - fatalError("Must have reduced precision") - } - if Scalar.self != Float.self { - fatalError("Reduced precision is only supported for Float tensors") - } - return _Raw.physicalCast(self, destType: Scalar.self) + if Scalar.self != Float.self { + fatalError("Reduced precision is only supported for Float tensors") } + return _Raw.physicalCast(self, destType: BFloat16.self) } -#else - extension Tensor { - /// Returns a copy of `self` converted to `BFloat16` physical scalar type. - public var toReducedPrecision: Self { - // TODO: Implement. - return self - } - /// Returns a copy of `self` converted to `Scalar` physical scalar type. - public var toFullPrecision: Self { - // TODO: Implement. - return self + /// Returns a copy of `self` converted to `Scalar` physical scalar type. + public var toFullPrecision: Self { + if !isReducedPrecision { + fatalError("Must have reduced precision") } + if Scalar.self != Float.self { + fatalError("Reduced precision is only supported for Float tensors") + } + return _Raw.physicalCast(self, destType: Scalar.self) } -#endif +} extension Tensor where Scalar: TensorFlowFloatingPoint { @usableFromInline diff --git a/Sources/TensorFlow/Core/Runtime.swift b/Sources/TensorFlow/Core/Runtime.swift index ef3709de3..54aa67297 100644 --- a/Sources/TensorFlow/Core/Runtime.swift +++ b/Sources/TensorFlow/Core/Runtime.swift @@ -248,14 +248,6 @@ public final class _ExecutionContext { stringAddrs.append(currentStringAddr) currentStringAddr = currentStringAddr?.advanced(by: length) } - - stringAddrs.withUnsafeMutableBufferPointer { stringAddrsBuffer in - #if !USING_X10_BACKEND - var cArgsCount = Int32(args.count) - var cArgs = stringAddrsBuffer.baseAddress.map(UnsafeMutablePointer.init) - TF_InitMain(nil, &cArgsCount, &cArgs) - #endif - } } _RuntimeConfig.tensorFlowRuntimeInitialized = true } @@ -581,17 +573,6 @@ struct DeviceScopes { } } -#if !USING_X10_BACKEND - // Evaluate the pullback on a one. - @usableFromInline - func pullbackOfOneLikeY( - y: Tensor, - pullback: (Tensor) -> R - ) -> R { - pullback(Tensor(1)) - } -#endif - @usableFromInline func _TFCOpSetDeviceFromScope(_ op: CTFEOp, _ status: CTFStatus) { if let deviceName = _ExecutionContext.global.currentDeviceName { diff --git a/Sources/TensorFlow/Core/Tensor.swift b/Sources/TensorFlow/Core/Tensor.swift index 631803931..b376aa6de 100644 --- a/Sources/TensorFlow/Core/Tensor.swift +++ b/Sources/TensorFlow/Core/Tensor.swift @@ -77,24 +77,11 @@ extension Tensor { } /// The number of scalars in the `Tensor`. - #if USING_X10_BACKEND - @inlinable - public var scalarCount: Int { - @_semantics("autodiff.nonvarying") - get { shape.contiguousSize } - } - #else - @inlinable - public var scalarCount: Int { - @_semantics("autodiff.nonvarying") - get { - let status = _ExecutionContext.global.status - let size = TFE_TensorHandleNumElements(handle._cTensorHandle, status) - checkOk(status) - return Int(size) - } - } - #endif + @inlinable + public var scalarCount: Int { + @_semantics("autodiff.nonvarying") + get { shape.contiguousSize } + } /// The rank of the tensor, represented as a `Tensor`. @inlinable @@ -181,22 +168,18 @@ extension Tensor { @inlinable public var array: ShapedArray { debugLog("Returning a host copy of array.") - #if USING_X10_BACKEND - if handle.backend == .XLA { - return ShapedArray(shape: shape.dimensions, scalars: scalars) - } - #endif + if handle.backend == .XLA { + return ShapedArray(shape: shape.dimensions, scalars: scalars) + } return handle.makeHostCopy() } @differentiable(where Scalar: TensorFlowFloatingPoint) public var scalars: [Scalar] { - #if USING_X10_BACKEND - if handle.backend == .XLA { - let (storage, _) = xlaTensor.fetchTensorValues(Scalar.self) - return storage - } - #endif + if handle.backend == .XLA { + let (storage, _) = xlaTensor.fetchTensorValues(Scalar.self) + return storage + } return array.scalars } } @@ -222,16 +205,12 @@ extension Tensor { /// Creates a 0-D tensor from a scalar value. @differentiable(where Scalar: TensorFlowFloatingPoint) public init(_ value: Scalar, on device: Device = .default) { - #if USING_X10_BACKEND - switch device.backend { - case .XLA: - self.init(_xla: XLATensor.make(value, on: device)) - case .TF_EAGER: - self.init(shape: [], scalars: [value], on: device) - } - #else + switch device.backend { + case .XLA: + self.init(_xla: XLATensor.make(value, on: device)) + case .TF_EAGER: self.init(shape: [], scalars: [value], on: device) - #endif + } } } @@ -258,20 +237,7 @@ extension Tensor { public init( _ vector: C, on device: Device = .default ) where C.Element == Scalar { - #if USING_X10_BACKEND - self.init([Scalar](vector), on: device) - #else - let handle = TensorHandle( - shape: [vector.count], - scalarsInitializer: { addr in - var currentAddr = addr - for scalar in vector { - currentAddr.initialize(to: scalar) - currentAddr = currentAddr.advanced(by: 1) - } - }) - self.init(handle: handle) - #endif + self.init([Scalar](vector), on: device) } /// Creates a tensor with the specified shape and contiguous scalars in row-major order. @@ -311,85 +277,74 @@ extension Tensor { The shape requires \(shape.contiguousSize) scalars but \(scalars.count) were \ provided. """) - #if USING_X10_BACKEND - switch device.backend { - case .XLA: - self.init(_xla: XLATensor.make(scalars, shape.dimensions, on: device)) - case .TF_EAGER: - let handle = TensorHandle( - shape: shape.dimensions, - scalarsInitializer: { address in - address.initialize(from: scalars.baseAddress!, count: shape.contiguousSize) - }) - self.init(handle: handle) - } - #else + switch device.backend { + case .XLA: + self.init(_xla: XLATensor.make(scalars, shape.dimensions, on: device)) + case .TF_EAGER: let handle = TensorHandle( shape: shape.dimensions, scalarsInitializer: { address in address.initialize(from: scalars.baseAddress!, count: shape.contiguousSize) }) self.init(handle: handle) - #endif - } - - #if USING_X10_BACKEND - /// Creates a tensor with the specified shape and contiguous scalars in row-major order. - /// - /// - Parameters: - /// - shape: The shape of the tensor. - /// - scalars: The scalar contents of the tensor. - /// - Precondition: The product of the dimensions of the shape must equal the number of scalars. - @inlinable - public init( - shape: TensorShape, - scalars: [Scalar], - toReducedPrecision: Bool, - directlyOn device: Device - ) { - precondition( - shape.contiguousSize == scalars.count, - """ - The shape requires \(shape.contiguousSize) scalars but \(scalars.count) were \ - provided. - """) - self = scalars.withUnsafeBufferPointer { bufferPointer in - Tensor( - shape: shape, scalars: bufferPointer, toReducedPrecision: toReducedPrecision, - directlyOn: device) - } } + } - /// Creates a tensor with the specified shape and contiguous scalars in row-major order. - /// - /// - Parameters: - /// - shape: The shape of the tensor. - /// - scalars: The scalar contents of the tensor. - /// - Precondition: The product of the dimensions of the shape must equal the number of scalars. - public init( - shape: TensorShape, - scalars: UnsafeBufferPointer, - toReducedPrecision: Bool, - directlyOn device: Device - ) { - precondition( - shape.contiguousSize == scalars.count, - """ - The shape requires \(shape.contiguousSize) scalars but \(scalars.count) were \ - provided. - """) - switch device.backend { - case .XLA: - self.init( - _xla: XLATensor.make( - scalars, shape.dimensions, toReducedPrecision: toReducedPrecision, - directlyOn: device)) - case .TF_EAGER: - precondition(!toReducedPrecision) - self = .init(shape: shape, scalars: scalars, on: device) - } + /// Creates a tensor with the specified shape and contiguous scalars in row-major order. + /// + /// - Parameters: + /// - shape: The shape of the tensor. + /// - scalars: The scalar contents of the tensor. + /// - Precondition: The product of the dimensions of the shape must equal the number of scalars. + @inlinable + public init( + shape: TensorShape, + scalars: [Scalar], + toReducedPrecision: Bool, + directlyOn device: Device + ) { + precondition( + shape.contiguousSize == scalars.count, + """ + The shape requires \(shape.contiguousSize) scalars but \(scalars.count) were \ + provided. + """) + self = scalars.withUnsafeBufferPointer { bufferPointer in + Tensor( + shape: shape, scalars: bufferPointer, toReducedPrecision: toReducedPrecision, + directlyOn: device) } - #endif + } + + /// Creates a tensor with the specified shape and contiguous scalars in row-major order. + /// + /// - Parameters: + /// - shape: The shape of the tensor. + /// - scalars: The scalar contents of the tensor. + /// - Precondition: The product of the dimensions of the shape must equal the number of scalars. + public init( + shape: TensorShape, + scalars: UnsafeBufferPointer, + toReducedPrecision: Bool, + directlyOn device: Device + ) { + precondition( + shape.contiguousSize == scalars.count, + """ + The shape requires \(shape.contiguousSize) scalars but \(scalars.count) were \ + provided. + """) + switch device.backend { + case .XLA: + self.init( + _xla: XLATensor.make( + scalars, shape.dimensions, toReducedPrecision: toReducedPrecision, + directlyOn: device)) + case .TF_EAGER: + precondition(!toReducedPrecision) + self = .init(shape: shape, scalars: scalars, on: device) + } + } /// Creates a tensor with the specified shape and contiguous scalars in row-major order. /// @@ -406,20 +361,7 @@ extension Tensor { The shape requires \(shape.contiguousSize) scalars but \(scalars.count) were \ provided. """) - #if USING_X10_BACKEND - self.init(shape: shape, scalars: [Scalar](scalars), on: device) - #else - let handle = TensorHandle( - shape: shape.dimensions, - scalarsInitializer: { addr in - var currentAddr = addr - for scalar in scalars { - currentAddr.initialize(to: scalar) - currentAddr = currentAddr.advanced(by: 1) - } - }) - self.init(handle: handle) - #endif + self.init(shape: shape, scalars: [Scalar](scalars), on: device) } } @@ -626,9 +568,7 @@ extension Tensor { } } - #if USING_X10_BACKEND - public var irText: String { XLATensor.irText(xlaTensor) } - #endif + public var irText: String { XLATensor.irText(xlaTensor) } } // Xcode Playground display conversion. @@ -676,19 +616,14 @@ extension Tensor: Codable where Scalar: Codable { extension Tensor: AdditiveArithmetic where Scalar: Numeric { /// The scalar zero tensor. - #if USING_X10_BACKEND - public static var zero: Tensor { - var zero = Tensor(0, on: _DeviceThreadLocalState.local.currentDevice) - if _DeviceThreadLocalState.local.isReducedPrecision { - zero = zero.toReducedPrecision - } - zero._isScalarZero = true - return zero + public static var zero: Tensor { + var zero = Tensor(0, on: _DeviceThreadLocalState.local.currentDevice) + if _DeviceThreadLocalState.local.isReducedPrecision { + zero = zero.toReducedPrecision } - #else - @inlinable - public static var zero: Tensor { Tensor(0) } - #endif + zero._isScalarZero = true + return zero + } /// Adds two tensors and produces their sum. /// - Note: `+` supports broadcasting. @@ -780,22 +715,20 @@ extension Tensor: Differentiable & EuclideanDifferentiable where Scalar: TensorF // Multi-device support //===------------------------------------------------------------------------------------------===// -#if USING_X10_BACKEND - extension Tensor { - /// The device on which `self` is allocated. - public var device: Device { - @_semantics("autodiff.nonvarying") - get { - switch handle.backend { - case .XLA: - return xlaTensor.device - case .TF_EAGER: - return Device.defaultTFEager - } +extension Tensor { + /// The device on which `self` is allocated. + public var device: Device { + @_semantics("autodiff.nonvarying") + get { + switch handle.backend { + case .XLA: + return xlaTensor.device + case .TF_EAGER: + return Device.defaultTFEager } } } -#endif +} //===------------------------------------------------------------------------------------------===// // Annotations @@ -819,16 +752,12 @@ where Scalar: TensorFlowFloatingPoint { extension Tensor: TensorProtocol { /// The annotations describing this tensor. public var annotations: String { - #if USING_X10_BACKEND - switch handle.backend { - case .XLA: - return XLATensor.annotations(xlaTensor) - case .TF_EAGER: - return Device.defaultTFEager.annotationsAvailable - } - #else - return "Annotations not available in TF_EAGER." - #endif + switch handle.backend { + case .XLA: + return XLATensor.annotations(xlaTensor) + case .TF_EAGER: + return Device.defaultTFEager.annotationsAvailable + } } /// An alias for annotations. @@ -846,16 +775,12 @@ where Scalar: TensorFlowFloatingPoint { /// - Returns: The annotated tensor. @differentiable(wrt: self) public func annotate(_ annotation: String) -> Tensor { - #if USING_X10_BACKEND - switch handle.backend { - case .XLA: - return Tensor(_xla: XLATensor.annotate(xlaTensor, annotation)) - case .TF_EAGER: - return self - } - #else + switch handle.backend { + case .XLA: + return Tensor(_xla: XLATensor.annotate(xlaTensor, annotation)) + case .TF_EAGER: return self - #endif + } } @derivative(of: annotate) diff --git a/Sources/TensorFlow/Core/TensorHandle.swift b/Sources/TensorFlow/Core/TensorHandle.swift index 83ab6a23b..a53a62f8b 100644 --- a/Sources/TensorFlow/Core/TensorHandle.swift +++ b/Sources/TensorFlow/Core/TensorHandle.swift @@ -307,12 +307,8 @@ extension Tensor { array.shape.allSatisfy { $0 <= Int(Int32.max) }, "Conversion to TensorHandle is undefined when shape dimensions exceed `Int32.max`.") if let buffer = array.buffer as? CTensorTensorBuffer { - #if USING_X10_BACKEND - let tmp = Tensor(handle: TensorHandle(copyingFromCTensor: buffer.cTensor)) - self = tmp.device == device ? tmp : Tensor(copying: tmp, to: device) - #else - self = Tensor(handle: TensorHandle(copyingFromCTensor: buffer.cTensor)) - #endif + let tmp = Tensor(handle: TensorHandle(copyingFromCTensor: buffer.cTensor)) + self = tmp.device == device ? tmp : Tensor(copying: tmp, to: device) } else { self = array.buffer.withUnsafeBufferPointer { buffer in return Tensor(shape: TensorShape(array.shape), scalars: buffer, on: device) diff --git a/Sources/TensorFlow/Layer.swift b/Sources/TensorFlow/Layer.swift index e76e53107..44aa6fdca 100644 --- a/Sources/TensorFlow/Layer.swift +++ b/Sources/TensorFlow/Layer.swift @@ -74,12 +74,8 @@ extension Module where Input: TensorProtocol, Output: DifferentiableTensorProtoc /// - Returns: The annotated output. @differentiable public func annotated(_ output: Output) -> Output { - #if USING_X10_BACKEND - let annotated = output.annotate("type=\(Self.self)") - return annotated - #else - return output - #endif + let annotated = output.annotate("type=\(Self.self)") + return annotated } /// Returns the annotations obtained from applying the layer to the given input. @@ -96,56 +92,52 @@ extension Module where Input: TensorProtocol, Output: DifferentiableTensorProtoc /// - Parameter tensor: The output to the layer. /// - Returns: A formatted summary of `tensor.annotations`. private func formatAnnotations(from tensor: Output) -> String { - #if USING_X10_BACKEND - let rawAnnotations = tensor.annotations - if rawAnnotations == Device.defaultTFEager.annotationsAvailable { - return rawAnnotations - } + let rawAnnotations = tensor.annotations + if rawAnnotations == Device.defaultTFEager.annotationsAvailable { + return rawAnnotations + } - let lines = rawAnnotations.components(separatedBy: "\n") + let lines = rawAnnotations.components(separatedBy: "\n") - if lines.count < 3 { - return "" - } + if lines.count < 3 { + return "" + } - // Isolate layers. - let pattern = "\\s*shape=(.+)\\s+type=([^\\s]+)(\\s+.+=.+)?$" - let regex = try! NSRegularExpression(pattern: pattern) - let contents = lines.filter { $0.contains("shape=") } - .map { line -> String in - let nsrange = NSRange(line.startIndex..., in: line) - if let match = regex.firstMatch(in: line, range: nsrange) { - var content = "" - if let typeRange = Range(match.range(at: 2), in: line) { - let type = line[typeRange] - content += type - } - content += "\t\t\t" - if let shapeRange = Range(match.range(at: 1), in: line) { - let shape = line[shapeRange] - content += shape - } - content += "\t\t" - if let attributesRange = Range(match.range(at: 3), in: line) { - let attribute = line[attributesRange] - content += attribute - } - return content - } else { - return line + // Isolate layers. + let pattern = "\\s*shape=(.+)\\s+type=([^\\s]+)(\\s+.+=.+)?$" + let regex = try! NSRegularExpression(pattern: pattern) + let contents = lines.filter { $0.contains("shape=") } + .map { line -> String in + let nsrange = NSRange(line.startIndex..., in: line) + if let match = regex.firstMatch(in: line, range: nsrange) { + var content = "" + if let typeRange = Range(match.range(at: 2), in: line) { + let type = line[typeRange] + content += type } + content += "\t\t\t" + if let shapeRange = Range(match.range(at: 1), in: line) { + let shape = line[shapeRange] + content += shape + } + content += "\t\t" + if let attributesRange = Range(match.range(at: 3), in: line) { + let attribute = line[attributesRange] + content += attribute + } + return content + } else { + return line } + } - let formattedAnnotations = """ - Layer Output Shape Attributes - =============================== ==================== ====================== - \(contents.joined(separator: "\n")) - """ + let formattedAnnotations = """ + Layer Output Shape Attributes + =============================== ==================== ====================== + \(contents.joined(separator: "\n")) + """ - return formattedAnnotations - #else - return tensor.annotations - #endif + return formattedAnnotations } }