@tensorflow/tfjs-core
- Version 4.22.0
- Published
- 37.1 MB
- 7 dependencies
- Apache-2.0 license
Install
npm i @tensorflow/tfjs-core
yarn add @tensorflow/tfjs-core
pnpm add @tensorflow/tfjs-core
Overview
Hardware-accelerated JavaScript library for machine intelligence
Index
Variables
- abs
- Abs
- acos
- Acos
- acosh
- Acosh
- add
- Add
- addN
- AddN
- all
- All
- any
- Any
- argMax
- ArgMax
- argMin
- ArgMin
- asin
- Asin
- asinh
- Asinh
- atan
- Atan
- atan2
- Atan2
- atanh
- Atanh
- avgPool
- AvgPool
- avgPool3d
- AvgPool3D
- AvgPool3DGrad
- AvgPoolGrad
- basicLSTMCell
- BatchMatMul
- batchNorm
- batchNorm2d
- batchNorm3d
- batchNorm4d
- batchToSpaceND
- BatchToSpaceND
- bincount
- Bincount
- bitwiseAnd
- BitwiseAnd
- booleanMaskAsync
- broadcastArgs
- BroadcastArgs
- broadcastTo
- BroadcastTo
- cast
- Cast
- ceil
- Ceil
- clipByValue
- ClipByValue
- clone
- complex
- Complex
- ComplexAbs
- concat
- Concat
- concat1d
- concat2d
- concat3d
- concat4d
- conv1d
- conv2d
- Conv2D
- Conv2DBackpropFilter
- Conv2DBackpropInput
- conv2dTranspose
- conv3d
- Conv3D
- Conv3DBackpropFilterV2
- Conv3DBackpropInputV2
- conv3dTranspose
- cos
- Cos
- cosh
- Cosh
- CropAndResize
- cumprod
- Cumprod
- cumsum
- Cumsum
- denseBincount
- DenseBincount
- depthToSpace
- DepthToSpace
- depthwiseConv2d
- DepthwiseConv2dNative
- DepthwiseConv2dNativeBackpropFilter
- DepthwiseConv2dNativeBackpropInput
- diag
- Diag
- dilation2d
- Dilation2D
- Dilation2DBackpropFilter
- Dilation2DBackpropInput
- div
- divNoNan
- dot
- Draw
- dropout
- einsum
- Einsum
- elu
- Elu
- EluGrad
- ensureShape
- ENV
- equal
- Equal
- erf
- Erf
- euclideanNorm
- exp
- Exp
- expandDims
- ExpandDims
- expm1
- Expm1
- eye
- fft
- FFT
- Fill
- FlipLeftRight
- floor
- Floor
- floorDiv
- FloorDiv
- FromPixels
- FusedBatchNorm
- FusedConv2D
- FusedDepthwiseConv2D
- gather
- gatherND
- GatherNd
- GatherV2
- greater
- Greater
- greaterEqual
- GreaterEqual
- Identity
- ifft
- IFFT
- imag
- Imag
- image
- inTopKAsync
- irfft
- isFinite
- IsFinite
- isInf
- IsInf
- isNaN
- IsNan
- leakyRelu
- LeakyRelu
- less
- Less
- lessEqual
- LessEqual
- linalg
- LinSpace
- localResponseNormalization
- log
- Log
- log1p
- Log1p
- logicalAnd
- LogicalAnd
- logicalNot
- LogicalNot
- logicalOr
- LogicalOr
- logicalXor
- LogicalXor
- logSigmoid
- logSoftmax
- LogSoftmax
- logSumExp
- losses
- LowerBound
- LRN
- LRNGrad
- matMul
- MatrixBandPart
- max
- Max
- maximum
- Maximum
- maxPool
- MaxPool
- maxPool3d
- MaxPool3D
- MaxPool3DGrad
- MaxPoolGrad
- maxPoolWithArgmax
- MaxPoolWithArgmax
- mean
- Mean
- min
- Min
- minimum
- Minimum
- mirrorPad
- MirrorPad
- mod
- Mod
- moments
- movingAverage
- mul
- multinomial
- Multinomial
- Multiply
- multiRNNCell
- neg
- Neg
- NonMaxSuppressionV3
- NonMaxSuppressionV4
- NonMaxSuppressionV5
- norm
- notEqual
- NotEqual
- oneHot
- OneHot
- onesLike
- OnesLike
- OP_SCOPE_SUFFIX
- outerProduct
- Pack
- pad
- pad1d
- pad2d
- pad3d
- pad4d
- PadV2
- pool
- Pool
- pow
- Pow
- prelu
- Prelu
- prod
- Prod
- raggedGather
- RaggedGather
- raggedRange
- RaggedRange
- raggedTensorToTensor
- RaggedTensorToTensor
- rand
- randomGamma
- randomNormal
- randomStandardNormal
- randomUniform
- randomUniformInt
- Range
- real
- Real
- RealDiv
- reciprocal
- Reciprocal
- relu
- Relu
- relu6
- Relu6
- reshape
- Reshape
- ResizeBilinear
- ResizeBilinearGrad
- ResizeNearestNeighbor
- ResizeNearestNeighborGrad
- reverse
- Reverse
- reverse1d
- reverse2d
- reverse3d
- reverse4d
- rfft
- RotateWithOffset
- round
- Round
- rsqrt
- Rsqrt
- scatterND
- ScatterNd
- searchSorted
- SearchSorted
- Select
- selu
- Selu
- separableConv2d
- setdiff1dAsync
- sigmoid
- Sigmoid
- sign
- Sign
- signal
- sin
- Sin
- sinh
- Sinh
- slice
- Slice
- slice1d
- slice2d
- slice3d
- slice4d
- softmax
- Softmax
- softplus
- Softplus
- spaceToBatchND
- SpaceToBatchND
- sparse
- SparseFillEmptyRows
- SparseReshape
- SparseSegmentMean
- SparseSegmentSum
- sparseToDense
- SparseToDense
- spectral
- split
- SplitV
- sqrt
- Sqrt
- square
- Square
- squaredDifference
- SquaredDifference
- squeeze
- stack
- StaticRegexReplace
- step
- Step
- stridedSlice
- StridedSlice
- string
- StringNGrams
- StringSplit
- StringToHashBucketFast
- sub
- Sub
- sum
- Sum
- tan
- Tan
- tanh
- Tanh
- tensorScatterUpdate
- TensorScatterUpdate
- tile
- Tile
- topk
- TopK
- train
- Transform
- transpose
- Transpose
- truncatedNormal
- unique
- Unique
- Unpack
- unsortedSegmentSum
- UnsortedSegmentSum
- unstack
- UpperBound
- version_core
- where
- whereAsync
- zerosLike
- ZerosLike
Functions
- backend()
- buffer()
- copyRegisteredKernels()
- cosineWindow()
- customGrad()
- deprecationWarn()
- disableDeprecationWarnings()
- dispose()
- disposeVariables()
- enableDebugMode()
- enableProdMode()
- enclosingPowerOfTwo()
- engine()
- env()
- fill()
- findBackend()
- findBackendFactory()
- getBackend()
- getGradient()
- getKernel()
- getKernelsForBackend()
- grad()
- grads()
- keep()
- linspace()
- lowerBound()
- memory()
- meshgrid()
- nextFrame()
- ones()
- op()
- print()
- profile()
- range()
- ready()
- registerBackend()
- registerGradient()
- registerKernel()
- removeBackend()
- scalar()
- setBackend()
- setPlatform()
- sumOutType()
- tensor()
- tensor1d()
- tensor2d()
- tensor3d()
- tensor4d()
- tensor5d()
- tensor6d()
- tidy()
- time()
- unregisterGradient()
- unregisterKernel()
- upcastType()
- upperBound()
- valueAndGrad()
- valueAndGrads()
- variable()
- variableGrads()
- zeros()
Classes
Interfaces
Enums
Type Aliases
- AbsInputs
- AcoshInputs
- AcosInputs
- AddInputs
- AddNInputs
- AllInputs
- AnyInputs
- ArgMaxInputs
- ArgMinInputs
- AsinhInputs
- AsinInputs
- Atan2Inputs
- AtanhInputs
- AtanInputs
- Attribute
- AvgPool3DGradInputs
- AvgPool3DInputs
- AvgPoolGradInputs
- AvgPoolInputs
- BackendValues
- BatchMatMulInputs
- BatchToSpaceNDInputs
- BinaryInputs
- BincountInputs
- BitwiseAndInputs
- BroadcastArgsInputs
- BroadcastToInputs
- CastInputs
- CeilInputs
- ClipByValueInputs
- ComplexAbsInputs
- ComplexInputs
- ConcatInputs
- Conv2DBackpropFilterInputs
- Conv2DBackpropInputInputs
- Conv2DInputs
- Conv3DBackpropFilterV2Inputs
- Conv3DBackpropInputV2Inputs
- Conv3DInputs
- CoshInputs
- CosInputs
- CropAndResizeInputs
- CumprodInputs
- CumsumInputs
- DataId
- DataToGPUOptions
- DataType
- DataTypeFor
- DataValues
- DenseBincountInputs
- DepthToSpaceInputs
- DepthwiseConv2dNativeBackpropFilterInputs
- DepthwiseConv2dNativeBackpropInputInputs
- DepthwiseConv2dNativeInputs
- DiagInputs
- Dilation2DBackpropFilterInputs
- Dilation2DBackpropInputInputs
- Dilation2DInputs
- DrawInputs
- EinsumInputs
- EluGradInputs
- EluInputs
- EqualInputs
- ErfInputs
- ExpandDimsInputs
- ExpInputs
- Expm1Inputs
- FFTInputs
- FlipLeftRightInputs
- FloorDivInputs
- FloorInputs
- ForwardFunc
- FusedBatchNormInputs
- GatherNdInputs
- GatherV2Inputs
- GradFunc
- GradSaveFunc
- GreaterEqualInputs
- GreaterInputs
- IdentityInputs
- IFFTInputs
- ImagInputs
- IsFiniteInputs
- IsInfInputs
- IsNanInputs
- KernelDisposeFunc
- KernelFunc
- KernelSetupFunc
- LeakyReluInputs
- LessEqualInputs
- LessInputs
- Log1pInputs
- LogicalAndInputs
- LogicalNotInputs
- LogicalOrInputs
- LogicalXorInputs
- LogInputs
- LogSoftmaxInputs
- LowerBoundInputs
- LRNGradInputs
- LRNInputs
- LSTMCellFunc
- MatrixBandPartInputs
- MaximumInputs
- MaxInputs
- MaxPool3DGradInputs
- MaxPool3DInputs
- MaxPoolGradInputs
- MaxPoolInputs
- MaxPoolWithArgmaxInputs
- MeanInputs
- MemoryInfo
- MinimumInputs
- MinInputs
- MirrorPadInputs
- ModInputs
- MultinomialInputs
- MultiplyInputs
- NamedTensorMap
- NegInputs
- NonMaxSuppressionV3Inputs
- NonMaxSuppressionV4Inputs
- NonMaxSuppressionV5Inputs
- NotEqualInputs
- NumericDataType
- OneHotInputs
- OnesLikeInputs
- PackInputs
- PadV2Inputs
- PoolInputs
- PowInputs
- PreluInputs
- ProdInputs
- RaggedGatherInputs
- RaggedRangeInputs
- RaggedTensorToTensorInputs
- RealDivInputs
- RealInputs
- ReciprocalInputs
- Relu6Inputs
- ReluInputs
- ReshapeInputs
- ResizeBilinearGradAttrs
- ResizeBilinearGradInputs
- ResizeBilinearInputs
- ResizeNearestNeighborGradAttrs
- ResizeNearestNeighborGradInputs
- ResizeNearestNeighborInputs
- ReverseInputs
- RotateWithOffsetInputs
- RoundInputs
- RsqrtInputs
- Scalar
- ScalarLike
- ScatterNdInputs
- SearchSortedInputs
- SelectInputs
- SeluInputs
- SigmoidInputs
- SignInputs
- SinhInputs
- SinInputs
- SliceInputs
- SoftmaxInputs
- SoftplusInputs
- SpaceToBatchNDInputs
- SparseFillEmptyRowsInputs
- SparseReshapeInputs
- SparseSegmentMeanInputs
- SparseSegmentSumInputs
- SparseToDenseInputs
- SplitVInputs
- SqrtInputs
- SquaredDifferenceInputs
- SquareInputs
- StaticRegexReplaceInputs
- StepInputs
- StridedSliceInputs
- StringNGramsInputs
- StringSplitInputs
- StringToHashBucketFastInputs
- SubInputs
- SumInputs
- TanhInputs
- TanInputs
- Tensor1D
- Tensor2D
- Tensor3D
- Tensor4D
- Tensor5D
- TensorContainer
- TensorLike
- TensorScatterUpdateInputs
- TileInputs
- TopKInputs
- TransformInputs
- TransposeInputs
- TypedArray
- UnaryInputs
- UniqueInputs
- UnpackInputs
- UnsortedSegmentSumInputs
- UpperBoundInputs
- ZerosLikeInputs
Namespaces
backend_util
- Activation
- applyActivation()
- assertAndGetBroadcastShape()
- assertAxesAreInnerMostDims()
- assertParamsConsistent()
- assignToTypedArray()
- axesAreInnerMostDims()
- BackendValues
- calculateShapes()
- checkEinsumDimSizes()
- checkPadOnDimRoundingMode()
- combineLocations()
- combineRaggedTensorToTensorShapes()
- complexWithEvenIndex()
- complexWithOddIndex()
- computeConv2DInfo()
- computeConv3DInfo()
- computeDefaultPad()
- computeDilation2DInfo()
- computeOptimalWindowSize()
- computeOutAndReduceShapes()
- computeOutShape()
- computePool2DInfo()
- computePool3DInfo()
- Conv2DInfo
- Conv3DInfo
- convertConv2DDataFormat()
- decodeEinsumEquation()
- eitherStridesOrDilationsAreOne()
- ERF_A1
- ERF_A2
- ERF_A3
- ERF_A4
- ERF_A5
- ERF_P
- expandShapeToKeepDim()
- ExplicitPadding
- exponent()
- exponents()
- fromStringArrayToUint8()
- fromUint8ToStringArray()
- FusedBatchMatMulConfig
- FusedConv2DConfig
- getAxesPermutation()
- getBroadcastDims()
- getComplexWithIndex()
- getEinsumComputePath()
- getEinsumPermutation()
- getFusedBiasGradient()
- getFusedDyActivation()
- getImageCenter()
- getInnerMostAxes()
- getPermuted()
- getRaggedRank()
- getReductionAxes()
- getReshaped()
- getReshapedPermuted()
- getRowPartitionTypesHelper()
- getSliceBeginCoords()
- getSliceSize()
- getSparseFillEmptyRowsIndicesDenseShapeMismatch()
- getSparseFillEmptyRowsNegativeIndexErrorMessage()
- getSparseFillEmptyRowsOutOfRangeIndexErrorMessage()
- getSparseReshapeEmptyTensorZeroOutputDimErrorMessage()
- getSparseReshapeInputOutputMismatchErrorMessage()
- getSparseReshapeInputOutputMultipleErrorMessage()
- getSparseReshapeMultipleNegativeOneOutputDimErrorMessage()
- getSparseReshapeNegativeOutputDimErrorMessage()
- getSparseSegmentReductionIndicesOutOfRangeErrorMessage()
- getSparseSegmentReductionNegativeSegmentIdsErrorMessage()
- getSparseSegmentReductionNonIncreasingSegmentIdsErrorMessage()
- getSparseSegmentReductionSegmentIdOutOfRangeErrorMessage()
- getUndoAxesPermutation()
- isIdentityPermutation()
- log()
- MemoryInfo
- mergeRealAndImagArrays()
- PadInfo
- PadInfo3D
- PARALLELIZE_THRESHOLD
- PixelData
- prepareAndValidate()
- prepareSplitSize()
- ReduceInfo
- RowPartitionType
- ScatterShapeInfo
- SELU_SCALE
- SELU_SCALEALPHA
- shouldFuse()
- splitRealAndImagArrays()
- stridesOrDilationsArePositive()
- TimingInfo
- tupleValuesAreOne()
- TypedArray
- upcastType()
- validateDefaultValueShape()
- validateInput()
- validateUpdateShape()
- warn()
io
- browserFiles()
- browserHTTPRequest()
- CompositeArrayBuffer
- concatenateArrayBuffers()
- copyModel()
- decodeWeights()
- decodeWeightsStream()
- encodeWeights()
- fromMemory()
- fromMemorySync()
- getLoadHandlers()
- getModelArtifactsForJSON()
- getModelArtifactsForJSONSync()
- getModelArtifactsInfoForJSON()
- getSaveHandlers()
- getWeightSpecs()
- http()
- IOHandler
- IOHandlerSync
- isHTTPScheme()
- listModels()
- LoadHandler
- LoadOptions
- loadWeights()
- ModelArtifacts
- ModelArtifactsInfo
- ModelJSON
- ModelStoreManager
- moveModel()
- OnProgressCallback
- registerLoadRouter()
- registerSaveRouter()
- removeModel()
- RequestDetails
- SaveConfig
- SaveHandler
- SaveResult
- TrainingConfig
- WeightData
- WeightGroup
- weightsLoaderFactory()
- WeightsManifestConfig
- WeightsManifestEntry
- withSaveHandler()
- withSaveHandlerSync()
util
- arraysEqual()
- arraysEqualWithNull()
- assert()
- assertNonNegativeIntegerDimensions()
- assertNonNull()
- assertShapesMatch()
- bytesFromStringArray()
- bytesPerElement()
- checkConversionForErrors()
- clamp()
- computeStrides()
- convertBackendValuesAndArrayBuffer()
- createScalarValue()
- createShuffledIndices()
- decodeString()
- distSquared()
- encodeString()
- fetch()
- fingerPrint64()
- flatten()
- getArrayFromDType()
- getTypedArrayFromDType()
- hasEncodingLoss()
- hexToLong()
- indexToLoc()
- inferDtype()
- inferFromImplicitShape()
- isBoolean()
- isFunction()
- isInt()
- isNumber()
- isPromise()
- isScalarShape()
- isString()
- isTypedArray()
- isValidDtype()
- locToIndex()
- makeOnesTypedArray()
- makeZerosNestedTypedArray()
- makeZerosTypedArray()
- nearestDivisor()
- nearestLargerEven()
- now()
- parseAxisParam()
- randUniform()
- repeatedTry()
- rightPad()
- shuffle()
- shuffleCombo()
- sizeFromShape()
- sizeToSquarishShape()
- squeezeShape()
- sum()
- swap()
- tanh()
- toNestedArray()
- toTypedArray()
Variables
variable abs
const abs: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Abs
const Abs: string;
variable acos
const acos: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Acos
const Acos: string;
variable acosh
const acosh: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Acosh
const Acosh: string;
variable add
const add: <T extends Tensor<Rank>>( a: Tensor<Rank> | TensorLike, b: Tensor<Rank> | TensorLike) => T;
variable Add
const Add: string;
variable addN
const addN: <T extends Tensor<Rank>>(tensors: (TensorLike | T)[]) => T;
variable AddN
const AddN: string;
variable all
const all: <T extends Tensor<Rank>>( x: Tensor<Rank> | TensorLike, axis?: number | number[], keepDims?: boolean) => T;
variable All
const All: string;
variable any
const any: <T extends Tensor<Rank>>( x: Tensor<Rank> | TensorLike, axis?: number | number[], keepDims?: boolean) => T;
variable Any
const Any: string;
variable argMax
const argMax: <T extends Tensor<Rank>>( x: Tensor<Rank> | TensorLike, axis?: number) => T;
variable ArgMax
const ArgMax: string;
variable argMin
const argMin: <T extends Tensor<Rank>>( x: Tensor<Rank> | TensorLike, axis?: number) => T;
variable ArgMin
const ArgMin: string;
variable asin
const asin: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Asin
const Asin: string;
variable asinh
const asinh: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Asinh
const Asinh: string;
variable atan
const atan: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Atan
const Atan: string;
variable atan2
const atan2: <T extends Tensor<Rank>>( a: Tensor<Rank> | TensorLike, b: Tensor<Rank> | TensorLike) => T;
variable Atan2
const Atan2: string;
variable atanh
const atanh: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Atanh
const Atanh: string;
variable avgPool
const avgPool: <T extends Tensor3D | Tensor4D>( x: TensorLike | T, filterSize: number | [number, number], strides: number | [number, number], pad: number | 'valid' | 'same' | conv_util.ExplicitPadding, dimRoundingMode?: 'floor' | 'round' | 'ceil') => T;
variable AvgPool
const AvgPool: string;
variable avgPool3d
const avgPool3d: <T extends Tensor4D | Tensor5D>( x: TensorLike | T, filterSize: number | [number, number, number], strides: number | [number, number, number], pad: number | 'valid' | 'same', dimRoundingMode?: 'floor' | 'round' | 'ceil', dataFormat?: 'NDHWC' | 'NCDHW') => T;
variable AvgPool3D
const AvgPool3D: string;
variable AvgPool3DGrad
const AvgPool3DGrad: string;
variable AvgPoolGrad
const AvgPoolGrad: string;
variable basicLSTMCell
const basicLSTMCell: ( forgetBias: Scalar | TensorLike, lstmKernel: TensorLike | Tensor2D, lstmBias: TensorLike | Tensor1D, data: TensorLike | Tensor2D, c: TensorLike | Tensor2D, h: TensorLike | Tensor2D) => [Tensor2D, Tensor2D];
variable BatchMatMul
const BatchMatMul: string;
variable batchNorm
const batchNorm: <R extends Rank>( x: TensorLike | Tensor<R>, mean: TensorLike | Tensor1D | Tensor<R>, variance: TensorLike | Tensor1D | Tensor<R>, offset?: TensorLike | Tensor1D | Tensor<R>, scale?: TensorLike | Tensor1D | Tensor<R>, varianceEpsilon?: number) => Tensor<R>;
variable batchNorm2d
const batchNorm2d: ( x: TensorLike | Tensor2D, mean: TensorLike | Tensor1D | Tensor2D, variance: TensorLike | Tensor1D | Tensor2D, offset?: TensorLike | Tensor1D | Tensor2D, scale?: TensorLike | Tensor1D | Tensor2D, varianceEpsilon?: number) => Tensor2D;
variable batchNorm3d
const batchNorm3d: ( x: TensorLike | Tensor3D, mean: TensorLike | Tensor1D | Tensor3D, variance: TensorLike | Tensor1D | Tensor3D, offset?: TensorLike | Tensor1D | Tensor3D, scale?: TensorLike | Tensor1D | Tensor3D, varianceEpsilon?: number) => Tensor3D;
variable batchNorm4d
const batchNorm4d: ( x: TensorLike | Tensor4D, mean: TensorLike | Tensor1D | Tensor4D, variance: TensorLike | Tensor1D | Tensor4D, offset?: TensorLike | Tensor1D | Tensor4D, scale?: TensorLike | Tensor1D | Tensor4D, varianceEpsilon?: number) => Tensor4D;
variable batchToSpaceND
const batchToSpaceND: <T extends Tensor<Rank>>( x: TensorLike | T, blockShape: number[], crops: number[][]) => T;
variable BatchToSpaceND
const BatchToSpaceND: string;
variable bincount
const bincount: <T extends Tensor1D>( x: TensorLike | T, weights: TensorLike | T, size: number) => T;
variable Bincount
const Bincount: string;
variable bitwiseAnd
const bitwiseAnd: <R extends Rank>(x: Tensor<Rank>, y: Tensor<Rank>) => Tensor<R>;
variable BitwiseAnd
const BitwiseAnd: string;
variable booleanMaskAsync
const booleanMaskAsync: ( tensor: Tensor<Rank> | TensorLike, mask: Tensor<Rank> | TensorLike, axis?: number) => Promise<Tensor<Rank>>;
variable broadcastArgs
const broadcastArgs: <R extends Rank>( s0: Tensor<Rank> | TensorLike, s1: Tensor<Rank> | TensorLike) => Tensor<R>;
variable BroadcastArgs
const BroadcastArgs: string;
variable broadcastTo
const broadcastTo: <R extends Rank>( x: Tensor<Rank> | TensorLike, shape: ShapeMap[R]) => Tensor<R>;
variable BroadcastTo
const BroadcastTo: string;
variable cast
const cast: <T extends Tensor<Rank>>( x: TensorLike | T, dtype: keyof DataTypeMap) => T;
variable Cast
const Cast: string;
variable ceil
const ceil: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Ceil
const Ceil: string;
variable clipByValue
const clipByValue: <T extends Tensor<Rank>>( x: TensorLike | T, clipValueMin: number, clipValueMax: number) => T;
variable ClipByValue
const ClipByValue: string;
variable clone
const clone: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable complex
const complex: <T extends Tensor<Rank>>( real: TensorLike | T, imag: TensorLike | T) => T;
variable Complex
const Complex: string;
variable ComplexAbs
const ComplexAbs: string;
variable concat
const concat: <T extends Tensor<Rank>>( tensors: (TensorLike | T)[], axis?: number) => T;
variable Concat
const Concat: string;
variable concat1d
const concat1d: (tensors: (TensorLike | Tensor1D)[]) => Tensor1D;
variable concat2d
const concat2d: (tensors: (TensorLike | Tensor2D)[], axis: number) => Tensor2D;
variable concat3d
const concat3d: (tensors: (TensorLike | Tensor3D)[], axis: number) => Tensor3D;
variable concat4d
const concat4d: (tensors: (TensorLike | Tensor4D)[], axis: number) => Tensor4D;
variable conv1d
const conv1d: <T extends Tensor2D | Tensor3D>( x: TensorLike | T, filter: TensorLike | Tensor3D, stride: number, pad: number | 'valid' | 'same' | conv_util.ExplicitPadding, dataFormat?: 'NWC' | 'NCW', dilation?: number, dimRoundingMode?: 'floor' | 'round' | 'ceil') => T;
variable conv2d
const conv2d: <T extends Tensor3D | Tensor4D>( x: TensorLike | T, filter: TensorLike | Tensor4D, strides: number | [number, number], pad: number | 'valid' | 'same' | conv_util.ExplicitPadding, dataFormat?: 'NHWC' | 'NCHW', dilations?: number | [number, number], dimRoundingMode?: 'floor' | 'round' | 'ceil') => T;
variable Conv2D
const Conv2D: string;
variable Conv2DBackpropFilter
const Conv2DBackpropFilter: string;
variable Conv2DBackpropInput
const Conv2DBackpropInput: string;
variable conv2dTranspose
const conv2dTranspose: <T extends Tensor3D | Tensor4D>( x: TensorLike | T, filter: TensorLike | Tensor4D, outputShape: [number, number, number] | [number, number, number, number], strides: number | [number, number], pad: number | 'valid' | 'same' | ExplicitPadding, dimRoundingMode?: 'floor' | 'round' | 'ceil') => T;
variable conv3d
const conv3d: <T extends Tensor4D | Tensor5D>( x: TensorLike | T, filter: TensorLike | Tensor5D, strides: number | [number, number, number], pad: 'valid' | 'same', dataFormat?: 'NDHWC' | 'NCDHW', dilations?: number | [number, number, number]) => T;
variable Conv3D
const Conv3D: string;
variable Conv3DBackpropFilterV2
const Conv3DBackpropFilterV2: string;
variable Conv3DBackpropInputV2
const Conv3DBackpropInputV2: string;
variable conv3dTranspose
const conv3dTranspose: <T extends Tensor4D | Tensor5D>( x: TensorLike | T, filter: TensorLike | Tensor5D, outputShape: | [number, number, number, number] | [number, number, number, number, number], strides: number | [number, number, number], pad: 'valid' | 'same') => T;
variable cos
const cos: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Cos
const Cos: string;
variable cosh
const cosh: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Cosh
const Cosh: string;
variable CropAndResize
const CropAndResize: string;
variable cumprod
const cumprod: <T extends Tensor<Rank>>( x: Tensor<Rank> | TensorLike, axis?: number, exclusive?: boolean, reverse?: boolean) => T;
variable Cumprod
const Cumprod: string;
variable cumsum
const cumsum: <T extends Tensor<Rank>>( x: Tensor<Rank> | TensorLike, axis?: number, exclusive?: boolean, reverse?: boolean) => T;
variable Cumsum
const Cumsum: string;
variable denseBincount
const denseBincount: <T extends Tensor1D | Tensor2D>( x: TensorLike | T, weights: TensorLike | T, size: number, binaryOutput?: boolean) => T;
variable DenseBincount
const DenseBincount: string;
variable depthToSpace
const depthToSpace: ( x: Tensor4D | TensorLike4D, blockSize: number, dataFormat?: 'NHWC' | 'NCHW') => Tensor4D;
variable DepthToSpace
const DepthToSpace: string;
variable depthwiseConv2d
const depthwiseConv2d: <T extends Tensor3D | Tensor4D>( x: TensorLike | T, filter: TensorLike | Tensor4D, strides: number | [number, number], pad: number | 'valid' | 'same' | conv_util.ExplicitPadding, dataFormat?: 'NHWC' | 'NCHW', dilations?: number | [number, number], dimRoundingMode?: 'floor' | 'round' | 'ceil') => T;
variable DepthwiseConv2dNative
const DepthwiseConv2dNative: string;
variable DepthwiseConv2dNativeBackpropFilter
const DepthwiseConv2dNativeBackpropFilter: string;
variable DepthwiseConv2dNativeBackpropInput
const DepthwiseConv2dNativeBackpropInput: string;
variable diag
const diag: (x: Tensor<Rank>) => Tensor<Rank>;
variable Diag
const Diag: string;
variable dilation2d
const dilation2d: <T extends Tensor3D | Tensor4D>( x: TensorLike | T, filter: TensorLike | Tensor3D, strides: number | [number, number], pad: 'valid' | 'same', dilations?: number | [number, number], dataFormat?: 'NHWC') => T;
variable Dilation2D
const Dilation2D: string;
variable Dilation2DBackpropFilter
const Dilation2DBackpropFilter: string;
variable Dilation2DBackpropInput
const Dilation2DBackpropInput: string;
variable div
const div: <T extends Tensor<Rank>>( a: Tensor<Rank> | TensorLike, b: Tensor<Rank> | TensorLike) => T;
variable divNoNan
const divNoNan: <T extends Tensor<Rank>>( a: Tensor<Rank> | TensorLike, b: Tensor<Rank> | TensorLike) => T;
variable dot
const dot: ( t1: Tensor<Rank> | TensorLike, t2: Tensor<Rank> | TensorLike) => Tensor<Rank>;
variable Draw
const Draw: string;
variable dropout
const dropout: ( x: Tensor<Rank> | TensorLike, rate: number, noiseShape?: number[], seed?: string | number) => Tensor<Rank>;
variable einsum
const einsum: (equation: string, ...tensors: Tensor<Rank>[]) => Tensor<Rank>;
variable Einsum
const Einsum: string;
variable elu
const elu: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Elu
const Elu: string;
variable EluGrad
const EluGrad: string;
variable ensureShape
const ensureShape: <R extends Rank>( x: Tensor<Rank>, shape: ShapeMap[R]) => Tensor<Rank>;
variable ENV
let ENV: Environment;
variable equal
const equal: <T extends Tensor<Rank>>( a: Tensor<Rank> | TensorLike, b: Tensor<Rank> | TensorLike) => T;
variable Equal
const Equal: string;
variable erf
const erf: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Erf
const Erf: string;
variable euclideanNorm
const euclideanNorm: ( x: Tensor<Rank> | TensorLike, axis?: number | number[], keepDims?: boolean) => Tensor<Rank>;
variable exp
const exp: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Exp
const Exp: string;
variable expandDims
const expandDims: <T extends Tensor<Rank>>( x: Tensor<Rank> | TensorLike, axis?: number) => T;
variable ExpandDims
const ExpandDims: string;
variable expm1
const expm1: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Expm1
const Expm1: string;
variable eye
const eye: ( numRows: number, numColumns?: number, batchShape?: | [number, number] | [number] | [number, number, number] | [number, number, number, number], dtype?: keyof DataTypeMap) => Tensor2D;
variable fft
const fft: (input: Tensor<Rank>) => Tensor<Rank>;
variable FFT
const FFT: string;
variable Fill
const Fill: string;
variable FlipLeftRight
const FlipLeftRight: string;
variable floor
const floor: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Floor
const Floor: string;
variable floorDiv
const floorDiv: <T extends Tensor<Rank>>( a: Tensor<Rank> | TensorLike, b: Tensor<Rank> | TensorLike) => T;
variable FloorDiv
const FloorDiv: string;
variable FromPixels
const FromPixels: string;
variable FusedBatchNorm
const FusedBatchNorm: string;
variable FusedConv2D
const FusedConv2D: string;
variable FusedDepthwiseConv2D
const FusedDepthwiseConv2D: string;
variable gather
const gather: <T extends Tensor<Rank>>( x: TensorLike | T, indices: Tensor<Rank> | TensorLike, axis?: number, batchDims?: number) => T;
variable gatherND
const gatherND: ( x: Tensor<Rank> | TensorLike, indices: Tensor<Rank> | TensorLike) => Tensor<Rank>;
variable GatherNd
const GatherNd: string;
variable GatherV2
const GatherV2: string;
variable greater
const greater: <T extends Tensor<Rank>>( a: Tensor<Rank> | TensorLike, b: Tensor<Rank> | TensorLike) => T;
variable Greater
const Greater: string;
variable greaterEqual
const greaterEqual: <T extends Tensor<Rank>>( a: Tensor<Rank> | TensorLike, b: Tensor<Rank> | TensorLike) => T;
variable GreaterEqual
const GreaterEqual: string;
variable Identity
const Identity: string;
variable ifft
const ifft: (input: Tensor<Rank>) => Tensor<Rank>;
variable IFFT
const IFFT: string;
variable imag
const imag: <T extends Tensor<Rank>>(input: TensorLike | T) => T;
variable Imag
const Imag: string;
variable image
const image: { flipLeftRight: ( image: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor4D ) => any; grayscaleToRGB: <T extends unknown>(image: any) => T; resizeNearestNeighbor: <T_1 extends unknown>( images: any, size: [number, number], alignCorners?: boolean, halfPixelCenters?: boolean ) => T_1; resizeBilinear: <T_2 extends unknown>( images: any, size: [number, number], alignCorners?: boolean, halfPixelCenters?: boolean ) => T_2; rgbToGrayscale: <T_3 extends unknown>(image: any) => T_3; rotateWithOffset: ( image: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor4D, radians: number, fillValue?: number | [number, number, number], center?: number | [number, number] ) => any; cropAndResize: ( image: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor4D, boxes: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor2D, boxInd: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor1D, cropSize: [number, number], method?: 'bilinear' | 'nearest', extrapolationValue?: number ) => any; nonMaxSuppression: ( boxes: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor2D, scores: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor1D, maxOutputSize: number, iouThreshold?: number, scoreThreshold?: number ) => any; nonMaxSuppressionAsync: ( boxes: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor2D, scores: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor1D, maxOutputSize: number, iouThreshold?: number, scoreThreshold?: number ) => Promise<import('@tensorflow/tfjs-core/dist/tensor').Tensor1D>; nonMaxSuppressionWithScore: ( boxes: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor2D, scores: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor1D, maxOutputSize: number, iouThreshold?: number, scoreThreshold?: number, softNmsSigma?: number ) => any; nonMaxSuppressionWithScoreAsync: ( boxes: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor2D, scores: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor1D, maxOutputSize: number, iouThreshold?: number, scoreThreshold?: number, softNmsSigma?: number ) => Promise<import('@tensorflow/tfjs-core/dist/tensor_types').NamedTensorMap>; nonMaxSuppressionPadded: ( boxes: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor2D, scores: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor1D, maxOutputSize: number, iouThreshold?: number, scoreThreshold?: number, padToMaxOutputSize?: boolean ) => any; nonMaxSuppressionPaddedAsync: ( boxes: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor2D, scores: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor1D, maxOutputSize: number, iouThreshold?: number, scoreThreshold?: number, padToMaxOutputSize?: boolean ) => Promise<import('@tensorflow/tfjs-core/dist/tensor_types').NamedTensorMap>; threshold: ( image: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor3D, method?: string, inverted?: boolean, threshValue?: number ) => any; transform: ( image: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor4D, transforms: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor2D, interpolation?: 'bilinear' | 'nearest', fillMode?: 'reflect' | 'nearest' | 'constant' | 'wrap', fillValue?: number, outputShape?: [number, number] ) => any;};
variable inTopKAsync
const inTopKAsync: <T extends Tensor<Rank>, U extends Tensor<Rank>>( predictions: TensorLike | T, targets: TensorLike | U, k?: number) => Promise<U>;
variable irfft
const irfft: (input: Tensor<Rank>) => Tensor<Rank>;
variable isFinite
const isFinite: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable IsFinite
const IsFinite: string;
variable isInf
const isInf: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable IsInf
const IsInf: string;
variable isNaN
const isNaN: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable IsNan
const IsNan: string;
variable leakyRelu
const leakyRelu: <T extends Tensor<Rank>>(x: TensorLike | T, alpha?: number) => T;
variable LeakyRelu
const LeakyRelu: string;
variable less
const less: <T extends Tensor<Rank>>( a: Tensor<Rank> | TensorLike, b: Tensor<Rank> | TensorLike) => T;
variable Less
const Less: string;
variable lessEqual
const lessEqual: <T extends Tensor<Rank>>( a: Tensor<Rank> | TensorLike, b: Tensor<Rank> | TensorLike) => T;
variable LessEqual
const LessEqual: string;
variable linalg
const linalg: { bandPart: <T extends any>( a: any, numLower: number | import('@tensorflow/tfjs-core/dist/tensor').Scalar, numUpper: number | import('@tensorflow/tfjs-core/dist/tensor').Scalar ) => T; gramSchmidt: ( xs: | import('@tensorflow/tfjs-core/dist/tensor').Tensor2D | import('@tensorflow/tfjs-core/dist/tensor').Tensor1D[] ) => | import('@tensorflow/tfjs-core/dist/tensor').Tensor2D | import('@tensorflow/tfjs-core/dist/tensor').Tensor1D[]; qr: ( x: any, fullMatrices?: boolean ) => [ import('@tensorflow/tfjs-core/dist/tensor').Tensor< import('@tensorflow/tfjs-core/dist/types').Rank >, import('@tensorflow/tfjs-core/dist/tensor').Tensor< import('@tensorflow/tfjs-core/dist/types').Rank > ];};
variable LinSpace
const LinSpace: string;
variable localResponseNormalization
const localResponseNormalization: <T extends Tensor3D | Tensor4D>( x: TensorLike | T, depthRadius?: number, bias?: number, alpha?: number, beta?: number) => T;
variable log
const log: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Log
const Log: string;
variable log1p
const log1p: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Log1p
const Log1p: string;
variable logicalAnd
const logicalAnd: <T extends Tensor<Rank>>( a: Tensor<Rank> | TensorLike, b: Tensor<Rank> | TensorLike) => T;
variable LogicalAnd
const LogicalAnd: string;
variable logicalNot
const logicalNot: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable LogicalNot
const LogicalNot: string;
variable logicalOr
const logicalOr: <T extends Tensor<Rank>>( a: Tensor<Rank> | TensorLike, b: Tensor<Rank> | TensorLike) => T;
variable LogicalOr
const LogicalOr: string;
variable logicalXor
const logicalXor: <T extends Tensor<Rank>>( a: Tensor<Rank> | TensorLike, b: Tensor<Rank> | TensorLike) => T;
variable LogicalXor
const LogicalXor: string;
variable logSigmoid
const logSigmoid: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable logSoftmax
const logSoftmax: <T extends Tensor<Rank>>( logits: TensorLike | T, axis?: number) => T;
variable LogSoftmax
const LogSoftmax: string;
variable logSumExp
const logSumExp: <T extends Tensor<Rank>>( x: Tensor<Rank> | TensorLike, axis?: number | number[], keepDims?: boolean) => T;
variable losses
const losses: { absoluteDifference: <T extends any, O extends any>( labels: any, predictions: any, weights?: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor< import('@tensorflow/tfjs-core/dist/types').Rank >, reduction?: any ) => O; computeWeightedLoss: <T_1 extends any, O_1 extends any>( losses: any, weights?: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor< import('@tensorflow/tfjs-core/dist/types').Rank >, reduction?: any ) => O_1; cosineDistance: <T_2 extends any, O_2 extends any>( labels: any, predictions: any, axis: number, weights?: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor< import('@tensorflow/tfjs-core/dist/types').Rank >, reduction?: any ) => O_2; hingeLoss: <T_3 extends any, O_3 extends any>( labels: any, predictions: any, weights?: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor< import('@tensorflow/tfjs-core/dist/types').Rank >, reduction?: any ) => O_3; huberLoss: <T_4 extends any, O_4 extends any>( labels: any, predictions: any, weights?: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor< import('@tensorflow/tfjs-core/dist/types').Rank >, delta?: number, reduction?: any ) => O_4; logLoss: <T_5 extends any, O_5 extends any>( labels: any, predictions: any, weights?: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor< import('@tensorflow/tfjs-core/dist/types').Rank >, epsilon?: number, reduction?: any ) => O_5; meanSquaredError: <T_6 extends any, O_6 extends any>( labels: any, predictions: any, weights?: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor< import('@tensorflow/tfjs-core/dist/types').Rank >, reduction?: any ) => O_6; sigmoidCrossEntropy: <T_7 extends any, O_7 extends any>( multiClassLabels: any, logits: any, weights?: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor< import('@tensorflow/tfjs-core/dist/types').Rank >, labelSmoothing?: number, reduction?: any ) => O_7; softmaxCrossEntropy: <T_8 extends any, O_8 extends any>( onehotLabels: any, logits: any, weights?: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor< import('@tensorflow/tfjs-core/dist/types').Rank >, labelSmoothing?: number, reduction?: any ) => O_8;};
variable LowerBound
const LowerBound: string;
variable LRN
const LRN: string;
variable LRNGrad
const LRNGrad: string;
variable matMul
const matMul: <T extends Tensor<Rank>>( a: Tensor<Rank> | TensorLike, b: Tensor<Rank> | TensorLike, transposeA?: boolean, transposeB?: boolean) => T;
variable MatrixBandPart
const MatrixBandPart: string;
variable max
const max: <T extends Tensor<Rank>>( x: Tensor<Rank> | TensorLike, axis?: number | number[], keepDims?: boolean) => T;
variable Max
const Max: string;
variable maximum
const maximum: <T extends Tensor<Rank>>( a: Tensor<Rank> | TensorLike, b: Tensor<Rank> | TensorLike) => T;
variable Maximum
const Maximum: string;
variable maxPool
const maxPool: <T extends Tensor3D | Tensor4D>( x: TensorLike | T, filterSize: number | [number, number], strides: number | [number, number], pad: number | 'valid' | 'same' | conv_util.ExplicitPadding, dimRoundingMode?: 'floor' | 'round' | 'ceil') => T;
variable MaxPool
const MaxPool: string;
variable maxPool3d
const maxPool3d: <T extends Tensor4D | Tensor5D>( x: TensorLike | T, filterSize: number | [number, number, number], strides: number | [number, number, number], pad: number | 'valid' | 'same', dimRoundingMode?: 'floor' | 'round' | 'ceil', dataFormat?: 'NDHWC' | 'NCDHW') => T;
variable MaxPool3D
const MaxPool3D: string;
variable MaxPool3DGrad
const MaxPool3DGrad: string;
variable MaxPoolGrad
const MaxPoolGrad: string;
variable maxPoolWithArgmax
const maxPoolWithArgmax: <T extends Tensor4D>( x: TensorLike | T, filterSize: number | [number, number], strides: number | [number, number], pad: number | 'valid' | 'same', includeBatchInIndex?: boolean) => NamedTensorMap;
variable MaxPoolWithArgmax
const MaxPoolWithArgmax: string;
variable mean
const mean: <T extends Tensor<Rank>>( x: Tensor<Rank> | TensorLike, axis?: number | number[], keepDims?: boolean) => T;
variable Mean
const Mean: string;
variable min
const min: <T extends Tensor<Rank>>( x: Tensor<Rank> | TensorLike, axis?: number | number[], keepDims?: boolean) => T;
variable Min
const Min: string;
variable minimum
const minimum: <T extends Tensor<Rank>>( a: Tensor<Rank> | TensorLike, b: Tensor<Rank> | TensorLike) => T;
variable Minimum
const Minimum: string;
variable mirrorPad
const mirrorPad: <T extends Tensor<Rank>>( x: TensorLike | T, paddings: [number, number][], mode: 'reflect' | 'symmetric') => T;
variable MirrorPad
const MirrorPad: string;
variable mod
const mod: <T extends Tensor<Rank>>( a: Tensor<Rank> | TensorLike, b: Tensor<Rank> | TensorLike) => T;
variable Mod
const Mod: string;
variable moments
const moments: ( x: Tensor<Rank> | TensorLike, axis?: number | number[], keepDims?: boolean) => { mean: Tensor<Rank>; variance: Tensor<Rank> };
variable movingAverage
const movingAverage: <T extends Tensor<Rank>>( v: TensorLike | T, x: TensorLike | T, decay: number | Scalar, step?: number | Scalar, zeroDebias?: boolean) => T;
variable mul
const mul: <T extends Tensor<Rank>>( a: Tensor<Rank> | TensorLike, b: Tensor<Rank> | TensorLike) => T;
variable multinomial
const multinomial: ( logits: TensorLike | Tensor1D | Tensor2D, numSamples: number, seed?: number, normalized?: boolean) => Tensor1D | Tensor2D;
variable Multinomial
const Multinomial: string;
variable Multiply
const Multiply: string;
variable multiRNNCell
const multiRNNCell: ( lstmCells: LSTMCellFunc[], data: TensorLike | Tensor2D, c: (TensorLike | Tensor2D)[], h: (TensorLike | Tensor2D)[]) => [Tensor2D[], Tensor2D[]];
variable neg
const neg: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Neg
const Neg: string;
variable NonMaxSuppressionV3
const NonMaxSuppressionV3: string;
variable NonMaxSuppressionV4
const NonMaxSuppressionV4: string;
variable NonMaxSuppressionV5
const NonMaxSuppressionV5: string;
variable norm
const norm: ( x: Tensor<Rank> | TensorLike, ord?: number | 'euclidean' | 'fro', axis?: number | number[], keepDims?: boolean) => Tensor<Rank>;
variable notEqual
const notEqual: <T extends Tensor<Rank>>( a: Tensor<Rank> | TensorLike, b: Tensor<Rank> | TensorLike) => T;
variable NotEqual
const NotEqual: string;
variable oneHot
const oneHot: ( indices: Tensor<Rank> | TensorLike, depth: number, onValue?: number, offValue?: number, dtype?: keyof DataTypeMap) => Tensor<Rank>;
variable OneHot
const OneHot: string;
variable onesLike
const onesLike: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable OnesLike
const OnesLike: string;
variable OP_SCOPE_SUFFIX
const OP_SCOPE_SUFFIX: string;
variable outerProduct
const outerProduct: ( v1: TensorLike | Tensor1D, v2: TensorLike | Tensor1D) => Tensor2D;
variable Pack
const Pack: string;
variable pad
const pad: <T extends Tensor<Rank>>( x: TensorLike | T, paddings: [number, number][], constantValue?: number) => T;
variable pad1d
const pad1d: ( x: TensorLike | Tensor1D, paddings: [number, number], constantValue?: number) => Tensor1D;
variable pad2d
const pad2d: ( x: TensorLike | Tensor2D, paddings: [[number, number], [number, number]], constantValue?: number) => Tensor2D;
variable pad3d
const pad3d: ( x: TensorLike | Tensor3D, paddings: [[number, number], [number, number], [number, number]], constantValue?: number) => Tensor3D;
variable pad4d
const pad4d: ( x: TensorLike | Tensor4D, paddings: [ [number, number], [number, number], [number, number], [number, number] ], constantValue?: number) => Tensor4D;
variable PadV2
const PadV2: string;
variable pool
const pool: <T extends Tensor3D | Tensor4D>( input: TensorLike | T, windowShape: number | [number, number], poolingType: 'avg' | 'max', pad: number | 'valid' | 'same' | conv_util.ExplicitPadding, dilations?: number | [number, number], strides?: number | [number, number], dimRoundingMode?: 'floor' | 'round' | 'ceil') => T;
variable Pool
const Pool: string;
variable pow
const pow: <T extends Tensor<Rank>>( base: Tensor<Rank> | TensorLike, exp: Tensor<Rank> | TensorLike) => T;
variable Pow
const Pow: string;
variable prelu
const prelu: <T extends Tensor<Rank>>(x: TensorLike | T, alpha: TensorLike | T) => T;
variable Prelu
const Prelu: string;
variable prod
const prod: <T extends Tensor<Rank>>( x: Tensor<Rank> | TensorLike, axis?: number | number[], keepDims?: boolean) => T;
variable Prod
const Prod: string;
variable raggedGather
const raggedGather: ( paramsNestedSplits: Tensor<Rank>[], paramsDenseValues: Tensor<Rank> | TensorLike, indices: Tensor<Rank> | TensorLike, outputRaggedRank: number) => RaggedGatherMap;
variable RaggedGather
const RaggedGather: string;
variable raggedRange
const raggedRange: ( starts: Tensor<Rank> | TensorLike, limits: Tensor<Rank> | TensorLike, deltas: Tensor<Rank> | TensorLike) => NamedTensorMap;
variable RaggedRange
const RaggedRange: string;
variable raggedTensorToTensor
const raggedTensorToTensor: ( shape: Tensor<Rank> | TensorLike, values: Tensor<Rank> | TensorLike, defaultValue: Tensor<Rank> | TensorLike, rowPartitionTensors: Tensor<Rank>[], rowPartitionTypes: string[]) => Tensor<Rank>;
variable RaggedTensorToTensor
const RaggedTensorToTensor: string;
variable rand
const rand: <R extends Rank>( shape: ShapeMap[R], randFunction: () => number, dtype?: keyof DataTypeMap) => Tensor<R>;
variable randomGamma
const randomGamma: <R extends Rank>( shape: ShapeMap[R], alpha: number, beta?: number, dtype?: 'float32' | 'int32', seed?: number) => Tensor<R>;
variable randomNormal
const randomNormal: <R extends Rank>( shape: ShapeMap[R], mean?: number, stdDev?: number, dtype?: 'float32' | 'int32', seed?: number) => Tensor<R>;
variable randomStandardNormal
const randomStandardNormal: <R extends Rank>( shape: ShapeMap[R], dtype?: 'float32' | 'int32', seed?: number) => Tensor<R>;
variable randomUniform
const randomUniform: <R extends Rank>( shape: ShapeMap[R], minval?: number, maxval?: number, dtype?: keyof DataTypeMap, seed?: string | number) => Tensor<R>;
variable randomUniformInt
const randomUniformInt: <R extends Rank>( shape: ShapeMap[R], minval: number, maxval: number, seed?: string | number) => Tensor<R>;
variable Range
const Range: string;
variable real
const real: <T extends Tensor<Rank>>(input: TensorLike | T) => T;
variable Real
const Real: string;
variable RealDiv
const RealDiv: string;
variable reciprocal
const reciprocal: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Reciprocal
const Reciprocal: string;
variable relu
const relu: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Relu
const Relu: string;
variable relu6
const relu6: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Relu6
const Relu6: string;
variable reshape
const reshape: <R extends Rank>( x: Tensor<Rank> | TensorLike, shape: ShapeMap[R]) => Tensor<R>;
variable Reshape
const Reshape: string;
variable ResizeBilinear
const ResizeBilinear: string;
variable ResizeBilinearGrad
const ResizeBilinearGrad: string;
variable ResizeNearestNeighbor
const ResizeNearestNeighbor: string;
variable ResizeNearestNeighborGrad
const ResizeNearestNeighborGrad: string;
variable reverse
const reverse: <T extends Tensor<Rank>>( x: TensorLike | T, axis?: number | number[]) => T;
variable Reverse
const Reverse: string;
variable reverse1d
const reverse1d: (x: TensorLike | Tensor1D) => Tensor1D;
variable reverse2d
const reverse2d: (x: TensorLike | Tensor2D, axis?: number | number[]) => Tensor2D;
variable reverse3d
const reverse3d: (x: TensorLike | Tensor3D, axis?: number | number[]) => Tensor3D;
variable reverse4d
const reverse4d: (x: TensorLike | Tensor4D, axis?: number | number[]) => Tensor4D;
variable rfft
const rfft: (input: Tensor<Rank>, fftLength?: number) => Tensor<Rank>;
variable RotateWithOffset
const RotateWithOffset: string;
variable round
const round: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Round
const Round: string;
variable rsqrt
const rsqrt: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Rsqrt
const Rsqrt: string;
variable scatterND
const scatterND: <R extends Rank>( indices: Tensor<Rank> | TensorLike, updates: Tensor<Rank> | TensorLike, shape: ShapeMap[R]) => Tensor<R>;
variable ScatterNd
const ScatterNd: string;
variable searchSorted
const searchSorted: ( sortedSequence: Tensor<Rank> | TensorLike, values: Tensor<Rank> | TensorLike, side?: 'left' | 'right') => Tensor<Rank>;
variable SearchSorted
const SearchSorted: string;
variable Select
const Select: string;
variable selu
const selu: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Selu
const Selu: string;
variable separableConv2d
const separableConv2d: <T extends Tensor3D | Tensor4D>( x: TensorLike | T, depthwiseFilter: TensorLike | Tensor4D, pointwiseFilter: TensorLike | Tensor4D, strides: number | [number, number], pad: 'valid' | 'same', dilation?: number | [number, number], dataFormat?: 'NHWC' | 'NCHW') => T;
variable setdiff1dAsync
const setdiff1dAsync: ( x: Tensor<Rank> | TensorLike, y: Tensor<Rank> | TensorLike) => Promise<[Tensor<Rank>, Tensor<Rank>]>;
variable sigmoid
const sigmoid: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Sigmoid
const Sigmoid: string;
variable sign
const sign: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Sign
const Sign: string;
variable signal
const signal: { hammingWindow: (windowLength: number) => any; hannWindow: (windowLength: number) => any; frame: ( signal: any, frameLength: number, frameStep: number, padEnd?: boolean, padValue?: number ) => any; stft: ( signal: any, frameLength: number, frameStep: number, fftLength?: number, windowFn?: (length: number) => any ) => any;};
variable sin
const sin: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Sin
const Sin: string;
variable sinh
const sinh: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Sinh
const Sinh: string;
variable slice
const slice: <R extends Rank, T extends Tensor<R>>( x: TensorLike | T, begin: number | number[], size?: number | number[]) => T;
variable Slice
const Slice: string;
variable slice1d
const slice1d: (x: TensorLike | Tensor1D, begin: number, size: number) => Tensor1D;
variable slice2d
const slice2d: ( x: TensorLike | Tensor2D, begin: [number, number], size: [number, number]) => Tensor2D;
variable slice3d
const slice3d: ( x: TensorLike | Tensor3D, begin: [number, number, number], size: [number, number, number]) => Tensor3D;
variable slice4d
const slice4d: ( x: TensorLike | Tensor4D, begin: [number, number, number, number], size: [number, number, number, number]) => Tensor4D;
variable softmax
const softmax: <T extends Tensor<Rank>>(logits: TensorLike | T, dim?: number) => T;
variable Softmax
const Softmax: string;
variable softplus
const softplus: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Softplus
const Softplus: string;
variable spaceToBatchND
const spaceToBatchND: <T extends Tensor<Rank>>( x: TensorLike | T, blockShape: number[], paddings: number[][]) => T;
variable SpaceToBatchND
const SpaceToBatchND: string;
variable sparse
const sparse: { sparseFillEmptyRows: ( indices: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor2D, values: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor1D, denseShape: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor1D, defaultValue: | import('@tensorflow/tfjs-core/dist/types').ScalarLike | import('@tensorflow/tfjs-core/dist/tensor').Scalar ) => any; sparseReshape: ( inputIndices: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor2D, inputShape: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor1D, newShape: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor1D ) => any; sparseSegmentMean: ( data: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor< import('@tensorflow/tfjs-core/dist/types').Rank >, indices: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor1D, segmentIds: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor1D ) => any; sparseSegmentSum: ( data: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor< import('@tensorflow/tfjs-core/dist/types').Rank >, indices: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor1D, segmentIds: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor1D ) => any;};
variable SparseFillEmptyRows
const SparseFillEmptyRows: string;
variable SparseReshape
const SparseReshape: string;
variable SparseSegmentMean
const SparseSegmentMean: string;
variable SparseSegmentSum
const SparseSegmentSum: string;
variable sparseToDense
const sparseToDense: <R extends Rank>( sparseIndices: Tensor<Rank> | TensorLike, sparseValues: Tensor<Rank> | TensorLike, outputShape: ShapeMap[R], defaultValue?: Scalar | ScalarLike) => Tensor<R>;
variable SparseToDense
const SparseToDense: string;
variable spectral
const spectral: { fft: (input: any) => any; ifft: (input: any) => any; rfft: (input: any, fftLength?: number) => any; irfft: (input: any) => any;};
variable split
const split: <T extends Tensor<Rank>>( x: Tensor<Rank> | TensorLike, numOrSizeSplits: number | number[], axis?: number) => T[];
variable SplitV
const SplitV: string;
variable sqrt
const sqrt: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Sqrt
const Sqrt: string;
variable square
const square: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Square
const Square: string;
variable squaredDifference
const squaredDifference: <T extends Tensor<Rank>>( a: Tensor<Rank> | TensorLike, b: Tensor<Rank> | TensorLike) => T;
variable SquaredDifference
const SquaredDifference: string;
variable squeeze
const squeeze: <T extends Tensor<Rank>>( x: Tensor<Rank> | TensorLike, axis?: number[]) => T;
variable stack
const stack: <T extends Tensor<Rank>>( tensors: (TensorLike | T)[], axis?: number) => Tensor<Rank>;
variable StaticRegexReplace
const StaticRegexReplace: string;
variable step
const step: <T extends Tensor<Rank>>(x: TensorLike | T, alpha?: number) => T;
variable Step
const Step: string;
TensorFlow.js-only kernels
variable stridedSlice
const stridedSlice: ( x: Tensor<Rank> | TensorLike, begin: number[], end: number[], strides?: number[], beginMask?: number, endMask?: number, ellipsisMask?: number, newAxisMask?: number, shrinkAxisMask?: number) => Tensor<Rank>;
variable StridedSlice
const StridedSlice: string;
variable string
const string: { stringNGrams: ( data: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor1D, dataSplits: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor< import('@tensorflow/tfjs-core/dist/types').Rank >, separator: string, nGramWidths: number[], leftPad: string, rightPad: string, padWidth: number, preserveShortSequences: boolean ) => any; stringSplit: ( input: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor1D, delimiter: | import('@tensorflow/tfjs-core/dist/types').ScalarLike | import('@tensorflow/tfjs-core/dist/tensor').Scalar, skipEmpty?: boolean ) => any; stringToHashBucketFast: ( input: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor< import('@tensorflow/tfjs-core/dist/types').Rank >, numBuckets: number ) => any; staticRegexReplace: ( input: | import('@tensorflow/tfjs-core/dist/types').TensorLike | import('@tensorflow/tfjs-core/dist/tensor').Tensor< import('@tensorflow/tfjs-core/dist/types').Rank >, pattern: string, rewrite: string, replaceGlobal?: boolean ) => any;};
variable StringNGrams
const StringNGrams: string;
variable StringSplit
const StringSplit: string;
variable StringToHashBucketFast
const StringToHashBucketFast: string;
variable sub
const sub: <T extends Tensor<Rank>>( a: Tensor<Rank> | TensorLike, b: Tensor<Rank> | TensorLike) => T;
variable Sub
const Sub: string;
variable sum
const sum: <T extends Tensor<Rank>>( x: Tensor<Rank> | TensorLike, axis?: number | number[], keepDims?: boolean) => T;
variable Sum
const Sum: string;
variable tan
const tan: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Tan
const Tan: string;
variable tanh
const tanh: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable Tanh
const Tanh: string;
variable tensorScatterUpdate
const tensorScatterUpdate: <R extends Rank>( tensor: TensorLike | Tensor<R>, indices: Tensor<Rank> | TensorLike, updates: Tensor<Rank> | TensorLike) => Tensor<R>;
variable TensorScatterUpdate
const TensorScatterUpdate: string;
variable tile
const tile: <T extends Tensor<Rank>>(x: TensorLike | T, reps: number[]) => T;
variable Tile
const Tile: string;
variable topk
const topk: <T extends Tensor<Rank>>( x: TensorLike | T, k?: number, sorted?: boolean) => { values: T; indices: T };
variable TopK
const TopK: string;
variable train
const train: typeof OptimizerConstructors;
variable Transform
const Transform: string;
variable transpose
const transpose: <T extends Tensor<Rank>>( x: TensorLike | T, perm?: number[], conjugate?: boolean) => T;
variable Transpose
const Transpose: string;
variable truncatedNormal
const truncatedNormal: <R extends Rank>( shape: ShapeMap[R], mean?: number, stdDev?: number, dtype?: 'float32' | 'int32', seed?: number) => Tensor<R>;
variable unique
const unique: <T extends Tensor<Rank>>( x: TensorLike | T, axis?: number) => { values: T; indices: Tensor1D };
variable Unique
const Unique: string;
variable Unpack
const Unpack: string;
variable unsortedSegmentSum
const unsortedSegmentSum: <T extends Tensor<Rank>>( x: TensorLike | T, segmentIds: TensorLike | Tensor1D, numSegments: number) => T;
variable UnsortedSegmentSum
const UnsortedSegmentSum: string;
variable unstack
const unstack: (x: Tensor<Rank> | TensorLike, axis?: number) => Tensor<Rank>[];
variable UpperBound
const UpperBound: string;
variable version_core
const version_core: string;
See the LICENSE file.
variable where
const where: <T extends Tensor<Rank>>( condition: Tensor<Rank> | TensorLike, a: TensorLike | T, b: TensorLike | T) => T;
variable whereAsync
const whereAsync: (condition: Tensor<Rank> | TensorLike) => Promise<Tensor2D>;
variable zerosLike
const zerosLike: <T extends Tensor<Rank>>(x: TensorLike | T) => T;
variable ZerosLike
const ZerosLike: string;
Functions
function backend
backend: () => KernelBackend;
Gets the current backend. If no backends have been initialized, this will attempt to initialize the best backend. Will throw an error if the highest priority backend has async initialization, in which case you should call 'await tf.ready()' before running other code.
{heading: 'Backends'}
function buffer
buffer: <R extends Rank, D extends keyof DataTypeMap = 'float32'>( shape: ShapeMap[R], dtype?: D, values?: DataTypeMap[D]) => TensorBuffer<R, D>;
Creates an empty
tf.TensorBuffer
with the specifiedshape
anddtype
.The values are stored in CPU as
TypedArray
. Fill the buffer usingbuffer.set()
, or by modifying directlybuffer.values
.When done, call
buffer.toTensor()
to get an immutabletf.Tensor
with those values.// Create a buffer and set values at particular indices.const buffer = tf.buffer([2, 2]);buffer.set(3, 0, 0);buffer.set(5, 1, 0);// Convert the buffer back to a tensor.buffer.toTensor().print();Parameter shape
An array of integers defining the output tensor shape.
Parameter dtype
The dtype of the buffer. Defaults to 'float32'.
Parameter values
The values of the buffer as
TypedArray
. Defaults to zeros.{heading: 'Tensors', subheading: 'Creation'}
function copyRegisteredKernels
copyRegisteredKernels: ( registeredBackendName: string, newBackendName: string) => void;
Finds kernels that have already been registered to a backend and re-registers them for a new backend. Useful for registering custom backends.
Parameter registeredBackendName
Already registered backend.
Parameter newBackendName
New backend.
function cosineWindow
cosineWindow: (windowLength: number, a: number, b: number) => Tensor1D;
function customGrad
customGrad: <T extends Tensor<Rank>>( f: CustomGradientFunc<T>) => (...args: Tensor[]) => T;
Overrides the gradient computation of a function
f
.Takes a function
f(...inputs, save) => {value: Tensor, gradFunc: (dy, saved) => Tensor[]}
and returns another functiong(...inputs)
which takes the same inputs asf
. When called,g
returnsf().value
. In backward mode, custom gradients with respect to each input off
are computed usingf().gradFunc
.The
save
function passed tof
should be used for saving tensors needed in the gradient. And thesaved
passed to thegradFunc
is aNamedTensorMap
, which contains those saved tensors.const customOp = tf.customGrad((x, save) => {// Save x to make sure it's available later for the gradient.save([x]);// Override gradient of our custom x ^ 2 op to be dy * abs(x);return {value: x.square(),// Note `saved.x` which points to the `x` we saved earlier.gradFunc: (dy, saved) => [dy.mul(saved[0].abs())]};});const x = tf.tensor1d([-1, -2, 3]);const dx = tf.grad(x => customOp(x));console.log(`f(x):`);customOp(x).print();console.log(`f'(x):`);dx(x).print();Parameter f
The function to evaluate in forward mode, which should return
{value: Tensor, gradFunc: (dy, saved) => Tensor[]}
, wheregradFunc
returns the custom gradients off
with respect to its inputs.{heading: 'Training', subheading: 'Gradients'}
function deprecationWarn
deprecationWarn: (msg: string) => void;
Warn users about deprecated functionality.
function disableDeprecationWarnings
disableDeprecationWarnings: () => void;
Globally disables deprecation warnings
function dispose
dispose: (container: TensorContainer) => void;
Disposes any
tf.Tensor
s found within the provided object.Parameter container
an object that may be a
tf.Tensor
or may directly containtf.Tensor
s, such as aTensor[]
or{key: Tensor, ...}
. If the object is not atf.Tensor
or does not containTensors
, nothing happens. In general it is safe to pass any object here, except thatPromise
s are not supported.{heading: 'Performance', subheading: 'Memory'}
function disposeVariables
disposeVariables: () => void;
Dispose all variables kept in backend engine.
{heading: 'Environment'}
function enableDebugMode
enableDebugMode: () => void;
Enables debug mode which will log information about all executed kernels: the elapsed time of the kernel execution, as well as the rank, shape, and size of the output tensor.
Debug mode will significantly slow down your application as it will download the result of every operation to the CPU. This should not be used in production. Debug mode does not affect the timing information of the kernel execution as we do not measure download time in the kernel execution time.
See also:
tf.profile
,tf.memory
.{heading: 'Environment'}
function enableProdMode
enableProdMode: () => void;
Enables production mode which disables correctness checks in favor of performance.
{heading: 'Environment'}
function enclosingPowerOfTwo
enclosingPowerOfTwo: (value: number) => number;
function engine
engine: () => Engine;
It returns the global engine that keeps track of all tensors and backends.
{heading: 'Environment'}
function env
env: () => Environment;
Returns the current environment (a global singleton).
The environment object contains the evaluated feature values as well as the active platform.
{heading: 'Environment'}
function fill
fill: <R extends Rank>( shape: ShapeMap[R], value: number | string, dtype?: DataType) => Tensor<R>;
Creates a
tf.Tensor
filled with a scalar value.tf.fill([2, 2], 4).print();Parameter shape
An array of integers defining the output tensor shape.
Parameter value
The scalar value to fill the tensor with.
Parameter dtype
The type of an element in the resulting tensor. Defaults to 'float32' if the given param value is a number, otherwise 'string'.
{heading: 'Tensors', subheading: 'Creation'}
function findBackend
findBackend: (name: string) => KernelBackend;
Finds the backend registered under the provided name. Returns null if the name is not in the registry, or the registration hasn't finished yet.
function findBackendFactory
findBackendFactory: ( name: string) => () => KernelBackend | Promise<KernelBackend>;
Finds the backend factory registered under the provided name. Returns a function that produces a new backend when called. Returns null if the name is not in the registry.
function getBackend
getBackend: () => string;
Returns the current backend name (cpu, webgl, etc). The backend is responsible for creating tensors and executing operations on those tensors.
{heading: 'Backends'}
function getGradient
getGradient: (kernelName: string) => GradConfig;
Returns the registered gradient info associated with the provided kernel.
Parameter kernelName
The official TF kernel name.
function getKernel
getKernel: (kernelName: string, backendName: string) => KernelConfig;
Returns the kernel function (code) associated with the provided names.
Parameter kernelName
The official name of the kernel.
Parameter backendName
The official name of the backend.
function getKernelsForBackend
getKernelsForBackend: (backendName: string) => KernelConfig[];
function grad
grad: ( f: (x: Tensor) => Tensor) => (x: TensorLike | Tensor, dy?: TensorLike | Tensor) => Tensor;
Provided
f(x)
, returns another functiong(x, dy?)
, which gives the gradient off(x)
with respect tox
.If
dy
is provided, the gradient off(x).mul(dy).sum()
with respect tox
is computed instead.f(x)
must take a single tensorx
and return a single tensory
. Iff()
takes multiple inputs, usetf.grads
instead.// f(x) = x ^ 2const f = x => x.square();// f'(x) = 2xconst g = tf.grad(f);const x = tf.tensor1d([2, 3]);g(x).print();// f(x) = x ^ 3const f = x => x.pow(tf.scalar(3, 'int32'));// f'(x) = 3x ^ 2const g = tf.grad(f);// f''(x) = 6xconst gg = tf.grad(g);const x = tf.tensor1d([2, 3]);gg(x).print();Parameter f
The function f(x), to compute gradient for.
{heading: 'Training', subheading: 'Gradients'}
function grads
grads: ( f: (...args: Tensor[]) => Tensor) => (args: Array<Tensor | TensorLike>, dy?: Tensor | TensorLike) => Tensor[];
Provided
f(x1, x2,...)
, returns another functiong([x1, x2,...], dy?)
, which gives an array of gradients off()
with respect to each input [x1
,x2
,...].If
dy
is passed when callingg()
, the gradient off(x1,...).mul(dy).sum()
with respect to each input is computed instead. The providedf
must take one or more tensors and return a single tensory
. Iff()
takes a single input, we recommend usingtf.grad
instead.// f(a, b) = a * bconst f = (a, b) => a.mul(b);// df / da = b, df / db = aconst g = tf.grads(f);const a = tf.tensor1d([2, 3]);const b = tf.tensor1d([-2, -3]);const [da, db] = g([a, b]);console.log('da');da.print();console.log('db');db.print();Parameter f
The function
f(x1, x2,...)
to compute gradients for.{heading: 'Training', subheading: 'Gradients'}
function keep
keep: <T extends Tensor<Rank>>(result: T) => T;
Keeps a
tf.Tensor
generated inside atf.tidy
from being disposed automatically.let b;const y = tf.tidy(() => {const one = tf.scalar(1);const a = tf.scalar(2);// b will not be cleaned up by the tidy. a and one will be cleaned up// when the tidy ends.b = tf.keep(a.square());console.log('numTensors (in tidy): ' + tf.memory().numTensors);// The value returned inside the tidy function will return// through the tidy, in this case to the variable y.return b.add(one);});console.log('numTensors (outside tidy): ' + tf.memory().numTensors);console.log('y:');y.print();console.log('b:');b.print();Parameter result
The tensor to keep from being disposed.
{heading: 'Performance', subheading: 'Memory'}
function linspace
linspace: (start: number, stop: number, num: number) => Tensor1D;
Return an evenly spaced sequence of numbers over the given interval.
tf.linspace(0, 9, 10).print();Parameter start
The start value of the sequence.
Parameter stop
The end value of the sequence.
Parameter num
The number of values to generate.
{heading: 'Tensors', subheading: 'Creation'}
function lowerBound
lowerBound: ( sortedSequence: Tensor | TensorLike, values: Tensor | TensorLike) => Tensor;
Searches for where a value would go in a sorted sequence.
This is not a method for checking containment (like javascript in).
The typical use case for this operation is "binning", "bucketing", or "discretizing". The values are assigned to bucket-indices based on the edges listed in 'sortedSequence'. This operation returns the bucket-index for each value.
The index returned corresponds to the first edge greater than or equal to the value.
The axis is not settable for this operation. It always operates on the innermost dimension (axis=-1). The operation will accept any number of outer dimensions.
Note: This operation assumes that 'lowerBound' is sorted along the innermost axis, maybe using 'sort(..., axis=-1)'. If the sequence is not sorted no error is raised and the content of the returned tensor is not well defined.
const edges = tf.tensor1d([-1, 3.3, 9.1, 10.0]);let values = tf.tensor1d([0.0, 4.1, 12.0]);const result1 = tf.lowerBound(edges, values);result1.print(); // [1, 2, 4]const seq = tf.tensor1d([0, 3, 9, 10, 10]);values = tf.tensor1d([0, 4, 10]);const result2 = tf.lowerBound(seq, values);result2.print(); // [0, 2, 3]const sortedSequence = tf.tensor2d([[0., 3., 8., 9., 10.],[1., 2., 3., 4., 5.]]);values = tf.tensor2d([[9.8, 2.1, 4.3],[0.1, 6.6, 4.5, ]]);const result3 = tf.lowerBound(sortedSequence, values);result3.print(); // [[4, 1, 2], [0, 5, 4]]Parameter sortedSequence
: N-D. Sorted sequence.
Parameter values
: N-D. Search values. An N-D int32 tensor the size of values containing the result of applying lower bound to each value. The result is not a global index to the entire Tensor, but the index in the last dimension. {heading: 'Operations', subheading: 'Evaluation'}
function memory
memory: () => MemoryInfo;
Returns memory info at the current time in the program. The result is an object with the following properties:
-
numBytes
: Number of bytes allocated (undisposed) at this time. -numTensors
: Number of unique tensors allocated. -numDataBuffers
: Number of unique data buffers allocated (undisposed) at this time, which is ≤ the number of tensors (e.g.a.reshape(newShape)
makes a new Tensor that shares the same data buffer witha
). -unreliable
: True if the memory usage is unreliable. Seereasons
whenunreliable
is true. -reasons
:string[]
, reasons why the memory is unreliable, present ifunreliable
is true.WebGL Properties: -
numBytesInGPU
: Number of bytes allocated (undisposed) in the GPU only at this time.{heading: 'Performance', subheading: 'Memory'}
function meshgrid
meshgrid: <T extends Tensor<Rank>>( x?: T | TensorLike, y?: T | TensorLike, { indexing }?: { indexing?: string }) => T[];
Broadcasts parameters for evaluation on an N-D grid.
Given N one-dimensional coordinate arrays
*args
, returns a listoutputs
of N-D coordinate arrays for evaluating expressions on an N-D grid.Notes:
meshgrid
supports cartesian ('xy') and matrix ('ij') indexing conventions. When theindexing
argument is set to 'xy' (the default), the broadcasting instructions for the first two dimensions are swapped. Examples: Callingconst [X, Y] = meshgrid(x, y)
with the tensorsconst x = [1, 2, 3];const y = [4, 5, 6];const [X, Y] = tf.meshgrid(x, y);// X = [[1, 2, 3],// [1, 2, 3],// [1, 2, 3]]// Y = [[4, 4, 4],// [5, 5, 5],// [6, 6, 6]]Parameter x
Tensor with rank geq 1.
Parameter y
Tensor with rank geq 1.
Parameter indexing
{heading: 'Operations', subheading: 'Slicing and Joining'}
function nextFrame
nextFrame: () => Promise<void>;
Returns a promise that resolves when a requestAnimationFrame has completed.
On Node.js this uses setImmediate instead of requestAnimationFrame.
This is simply a sugar method so that users can do the following:
await tf.nextFrame();
{heading: 'Performance', subheading: 'Timing'}
function ones
ones: <R extends Rank>(shape: ShapeMap[R], dtype?: DataType) => Tensor<R>;
Creates a
tf.Tensor
with all elements set to 1.tf.ones([2, 2]).print();Parameter shape
An array of integers defining the output tensor shape.
Parameter dtype
The type of an element in the resulting tensor. Defaults to 'float'.
{heading: 'Tensors', subheading: 'Creation'}
function op
op: <T extends Function>(f: { [name: string]: T }) => T;
Used for wrapping functions that perform math operations on Tensors. The function will be wrapped in a named scope that cleans all memory usage after the function is done.
function print
print: <T extends Tensor<Rank>>(x: T, verbose?: boolean) => void;
Prints information about the
tf.Tensor
including its data.const verbose = true;tf.tensor2d([1, 2, 3, 4], [2, 2]).print(verbose);Parameter x
The tensor to be printed.
Parameter verbose
Whether to print verbose information about the
Tensor
, including dtype and size.{heading: 'Tensors', subheading: 'Creation'}
function profile
profile: ( f: () => TensorContainer | Promise<TensorContainer>) => Promise<ProfileInfo>;
Executes the provided function
f()
and returns a promise that resolves with information about the function's memory use: -newBytes
: the number of new bytes allocated -newTensors
: the number of new tensors created -peakBytes
: the peak number of bytes allocated -kernels
: an array of objects for each kernel involved that reports their input and output shapes, number of bytes used, and number of new tensors created. -kernelNames
: an array of unique strings with just the names of the kernels in thekernels
array.const profile = await tf.profile(() => {const x = tf.tensor1d([1, 2, 3]);let x2 = x.square();x2.dispose();x2 = x.square();x2.dispose();return x;});console.log(`newBytes: ${profile.newBytes}`);console.log(`newTensors: ${profile.newTensors}`);console.log(`byte usage over all kernels: ${profile.kernels.map(k =>k.totalBytesSnapshot)}`);{heading: 'Performance', subheading: 'Profile'}
function range
range: ( start: number, stop: number, step?: number, dtype?: 'float32' | 'int32') => Tensor1D;
Creates a new
tf.Tensor1D
filled with the numbers in the range provided.The tensor is a half-open interval meaning it includes start, but excludes stop. Decrementing ranges and negative step values are also supported.
tf.range(0, 9, 2).print();Parameter start
An integer start value
Parameter stop
An integer stop value
Parameter step
An integer increment (will default to 1 or -1)
Parameter dtype
The data type of the output tensor. Defaults to 'float32'.
{heading: 'Tensors', subheading: 'Creation'}
function ready
ready: () => Promise<void>;
Returns a promise that resolves when the currently selected backend (or the highest priority one) has initialized. Await this promise when you are using a backend that has async initialization.
{heading: 'Backends'}
function registerBackend
registerBackend: ( name: string, factory: () => KernelBackend | Promise<KernelBackend>, priority?: number) => boolean;
Registers a global backend. The registration should happen when importing a module file (e.g. when importing
backend_webgl.ts
), and is used for modular builds (e.g. custom tfjs bundle with only webgl support).Parameter factory
The backend factory function. When called, it should return a backend instance, or a promise of an instance.
Parameter priority
The priority of the backend (higher = more important). In case multiple backends are registered, the priority is used to find the best backend. Defaults to 1. False if there is already a registered backend under this name, true if not.
{heading: 'Backends'}
function registerGradient
registerGradient: (config: GradConfig) => void;
Registers a gradient function for a given kernel in the global registry, to be used during the back-propagation of that kernel.
Parameter config
An object with the following properties: -
kernelName
The name of the kernel that the gradient function is for. -gradFunc
The function to run during back-propagation.
function registerKernel
registerKernel: (config: KernelConfig) => void;
Registers the function (forward pass) for the kernel in a global registry.
Parameter config
A config object with the following properties: -
kernelName
The official name of the kernel. -backendName
The official name of the backend. -kernelFunc
The function to run during the forward pass of the kernel. -setupFunc
Optional. Gets called once, after the backend initializes. -disposeFunc
Optional. Gets called once, right before the backend is disposed.
function removeBackend
removeBackend: (name: string) => void;
Removes a backend and the registered factory.
{heading: 'Backends'}
function scalar
scalar: ( value: number | boolean | string | Uint8Array, dtype?: DataType) => Scalar;
Creates rank-0
tf.Tensor
(scalar) with the provided value and dtype.The same functionality can be achieved with
tf.tensor
, but in general we recommend usingtf.scalar
as it makes the code more readable.tf.scalar(3.14).print();Parameter value
The value of the scalar.
Parameter dtype
The data type.
{heading: 'Tensors', subheading: 'Creation'}
function setBackend
setBackend: (backendName: string) => Promise<boolean>;
Sets the backend (cpu, webgl, wasm, etc) responsible for creating tensors and executing operations on those tensors. Returns a promise that resolves to a boolean if the backend initialization was successful.
Note this disposes the current backend, if any, as well as any tensors associated with it. A new backend is initialized, even if it is of the same type as the previous one.
Parameter backendName
The name of the backend. Currently supports
'webgl'|'cpu'
in the browser,'tensorflow'
under node.js (requires tfjs-node), and'wasm'
(requires tfjs-backend-wasm).{heading: 'Backends'}
function setPlatform
setPlatform: (platformName: string, platform: Platform) => void;
Sets the global platform.
Parameter platformName
The name of this platform.
Parameter platform
A platform implementation.
function sumOutType
sumOutType: (type: DataType) => DataType;
Returns the output type after summation.
function tensor
tensor: <R extends Rank>( values: TensorLike | WebGLData | WebGPUData, shape?: ShapeMap[R], dtype?: DataType) => Tensor<R>;
Creates a
tf.Tensor
with the provided values, shape and dtype.// Pass an array of values to create a vector.tf.tensor([1, 2, 3, 4]).print();// Pass a nested array of values to make a matrix or a higher// dimensional tensor.tf.tensor([[1, 2], [3, 4]]).print();// Pass a flat array and specify a shape yourself.tf.tensor([1, 2, 3, 4], [2, 2]).print();// Pass a `WebGLData` object and specify a shape yourself.// This makes it possible for TF.js applications to avoid GPU / CPU sync.// For example, if your application includes a preprocessing step on the GPU,// you could upload the GPU output directly to TF.js, rather than first// downloading the values.// Example for WebGL2:if (tf.findBackend('custom-webgl') == null) {const customCanvas = document.createElement('canvas');const customBackend = new tf.MathBackendWebGL(customCanvas);tf.registerBackend('custom-webgl', () => customBackend);}const savedBackend = tf.getBackend();await tf.setBackend('custom-webgl');const gl = tf.backend().gpgpu.gl;const texture = gl.createTexture();const tex2d = gl.TEXTURE_2D;const width = 2;const height = 2;gl.bindTexture(tex2d, texture);gl.texParameteri(tex2d, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);gl.texParameteri(tex2d, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);gl.texParameteri(tex2d, gl.TEXTURE_MIN_FILTER, gl.NEAREST);gl.texParameteri(tex2d, gl.TEXTURE_MAG_FILTER, gl.NEAREST);gl.texImage2D(tex2d, 0, gl.RGBA32F, // internalFormatwidth, height, 0,gl.RGBA, // textureFormatgl.FLOAT, // textureTypenew Float32Array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]));// Currently, the `texture` has 4 pixels:// Pixel0 is {R:0, G:1, B:2, A:3}// Pixel1 is {R:4, G:5, B:6, A:7}// Pixel2 is {R:8, G:9, B:10, A:11}// Pixel3 is {R:12, G:13, B:14, A:15}const logicalShape = [height * width * 2];const a = tf.tensor({texture, height, width, channels: 'BR'}, logicalShape);a.print();// Tensor value will be [2, 0, 6, 4, 10, 8, 14, 12], since [2, 0] is the// values of 'B' and 'R' channels of Pixel0, [6, 4] is the values of 'B' and'R'// channels of Pixel1...// For postprocessing on the GPU, it's possible to retrieve the texture// backing any tensor by calling the tensor's `dataToGPU` method like// so:const tex = a.dataToGPU();await tf.setBackend(savedBackend);// Pass a `WebGPUData` object and specify a shape yourself.// This makes it possible for TF.js applications to avoid GPU / CPU sync.// For example, if your application includes a preprocessing step on the GPU,// you could upload the GPU output directly to TF.js, rather than first// downloading the values. Unlike WebGL, this optionally supports zero copy// by WebGPUData.zeroCopy. When zeroCopy is false or undefined(default), this// passing GPUBuffer can be destroyed after tensor is created. When zeroCopy// is true, this GPUBuffer is bound directly by the tensor, so do not destroy// this GPUBuffer until all access is done.// Example for WebGPU:function createGPUBufferFromData(device, data, dtype) {const bytesPerElement = 4;const sizeInBytes = data.length * bytesPerElement;const gpuWriteBuffer = device.createBuffer({mappedAtCreation: true,size: sizeInBytes,usage: GPUBufferUsage.MAP_WRITE | GPUBufferUsage.COPY_SRC});const arrayBuffer = gpuWriteBuffer.getMappedRange();if (dtype === 'float32') {new Float32Array(arrayBuffer).set(data);} else if (dtype === 'int32') {new Int32Array(arrayBuffer).set(data);} else {throw new Error(`Creating tensor from GPUBuffer only supports` +`'float32'|'int32' dtype, while the dtype is ${dtype}.`);}gpuWriteBuffer.unmap();const gpuReadBuffer = device.createBuffer({mappedAtCreation: false,size: sizeInBytes,usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.STORAGE |GPUBufferUsage.COPY_SRC});const copyEncoder = device.createCommandEncoder();copyEncoder.copyBufferToBuffer(gpuWriteBuffer, 0, gpuReadBuffer, 0, sizeInBytes);const copyCommands = copyEncoder.finish();device.queue.submit([copyCommands]);gpuWriteBuffer.destroy();return gpuReadBuffer;}const savedBackend = tf.getBackend();await tf.setBackend('webgpu').catch(() => {throw new Error('Failed to use WebGPU backend. Please use Chrome Canary to run.')});const dtype = 'float32';const device = tf.backend().device;const aData = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];const bData = [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4];const expected = [2, 4, 6, 8, 6, 8, 10, 12, 10, 12, 14, 16, 14, 16, 18, 20];const aBuffer = createGPUBufferFromData(device, aData, dtype);const shape = [aData.length];// To use zeroCopy, use {buffer: aBuffer, zeroCopy: true} instead and destroy// aBuffer untill all access is done.const a = tf.tensor({buffer: aBuffer}, shape, dtype);const b = tf.tensor(bData, shape, dtype);const result = tf.add(a, b);result.print();a.dispose();b.dispose();result.dispose();aBuffer.destroy();await tf.setBackend(savedBackend);Parameter values
The values of the tensor. Can be nested array of numbers, or a flat array, or a
TypedArray
(At the moment it supports Uint8Array, Uint8ClampedArray, Int32Array, Float32Array) data types, or aWebGLData
object, or aWebGPUData
object. If the values are strings, they will be encoded as utf-8 and kept asUint8Array[]
. If the values is aWebGLData
object, the dtype could only be 'float32' or 'int32' and the object has to have: 1. texture, aWebGLTexture
, the texture must share the sameWebGLRenderingContext
with TFJS's WebGL backend (you could create a custom WebGL backend from your texture's canvas) and the internal texture format for the input texture must be floating point or normalized integer; 2. height, the height of the texture; 3. width, the width of the texture; 4. channels, a non-empty subset of 'RGBA', indicating the values of which channels will be passed to the tensor, such as 'R' or 'BR' (The order of the channels affect the order of tensor values. ). (If the values passed from texture is less than the tensor size, zeros will be padded at the rear.). If the values is aWebGPUData
object, the dtype could only be 'float32' or 'int32 and the object has to have: buffer, aGPUBuffer
. The buffer must: 1. share the sameGPUDevice
with TFJS's WebGPU backend; 2. buffer.usage should at least support GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC; 3. buffer.size should not be smaller than the byte size of tensor shape. WebGPUData optionally supports zero copy by flag zeroCopy. When zeroCopy is false or undefined(default),this passing GPUBuffer can be destroyed after tensor is created. When zeroCopy is true, this GPUBuffer is bound directly by the tensor, so do not destroy this GPUBuffer until all access is done.Parameter shape
The shape of the tensor. Optional. If not provided, it is inferred from
values
.Parameter dtype
The data type.
{heading: 'Tensors', subheading: 'Creation'}
function tensor1d
tensor1d: (values: TensorLike1D, dtype?: DataType) => Tensor1D;
Creates rank-1
tf.Tensor
with the provided values, shape and dtype.The same functionality can be achieved with
tf.tensor
, but in general we recommend usingtf.tensor1d
as it makes the code more readable.tf.tensor1d([1, 2, 3]).print();Parameter values
The values of the tensor. Can be array of numbers, or a
TypedArray
.Parameter dtype
The data type.
{heading: 'Tensors', subheading: 'Creation'}
function tensor2d
tensor2d: ( values: TensorLike2D, shape?: [number, number], dtype?: DataType) => Tensor2D;
Creates rank-2
tf.Tensor
with the provided values, shape and dtype.The same functionality can be achieved with
tf.tensor
, but in general we recommend usingtf.tensor2d
as it makes the code more readable.```js // Pass a nested array. tf.tensor2d([[1, 2], [3, 4]]).print();
```js// Pass a flat array and specify a shape.tf.tensor2d([1, 2, 3, 4], [2, 2]).print();Parameter values
The values of the tensor. Can be nested array of numbers, or a flat array, or a
TypedArray
.Parameter shape
The shape of the tensor. If not provided, it is inferred from
values
.Parameter dtype
The data type.
{heading: 'Tensors', subheading: 'Creation'}
function tensor3d
tensor3d: ( values: TensorLike3D, shape?: [number, number, number], dtype?: DataType) => Tensor3D;
Creates rank-3
tf.Tensor
with the provided values, shape and dtype.The same functionality can be achieved with
tf.tensor
, but in general we recommend usingtf.tensor3d
as it makes the code more readable.```js // Pass a nested array. tf.tensor3d([[[1], [2]], [[3], [4]]]).print();
```js// Pass a flat array and specify a shape.tf.tensor3d([1, 2, 3, 4], [2, 2, 1]).print();Parameter values
The values of the tensor. Can be nested array of numbers, or a flat array, or a
TypedArray
.Parameter shape
The shape of the tensor. If not provided, it is inferred from
values
.Parameter dtype
The data type.
{heading: 'Tensors', subheading: 'Creation'}
function tensor4d
tensor4d: ( values: TensorLike4D, shape?: [number, number, number, number], dtype?: DataType) => Tensor4D;
Creates rank-4
tf.Tensor
with the provided values, shape and dtype.The same functionality can be achieved with
tf.tensor
, but in general we recommend usingtf.tensor4d
as it makes the code more readable.```js // Pass a nested array. tf.tensor4d([[[[1], [2]], [[3], [4]]]]).print();
```js// Pass a flat array and specify a shape.tf.tensor4d([1, 2, 3, 4], [1, 2, 2, 1]).print();Parameter values
The values of the tensor. Can be nested array of numbers, or a flat array, or a
TypedArray
.Parameter shape
The shape of the tensor. Optional. If not provided, it is inferred from
values
.Parameter dtype
The data type.
{heading: 'Tensors', subheading: 'Creation'}
function tensor5d
tensor5d: ( values: TensorLike5D, shape?: [number, number, number, number, number], dtype?: DataType) => Tensor5D;
Creates rank-5
tf.Tensor
with the provided values, shape and dtype.The same functionality can be achieved with
tf.tensor
, but in general we recommend usingtf.tensor5d
as it makes the code more readable.```js // Pass a nested array. tf.tensor5d([[[[[1],[2]],[[3],[4]]],[[[5],[6]],[[7],[8]]]]]).print();
```js// Pass a flat array and specify a shape.tf.tensor5d([1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 2, 2, 1]).print();Parameter values
The values of the tensor. Can be nested array of numbers, or a flat array, or a
TypedArray
.Parameter shape
The shape of the tensor. Optional. If not provided, it is inferred from
values
.Parameter dtype
The data type.
{heading: 'Tensors', subheading: 'Creation'}
function tensor6d
tensor6d: ( values: TensorLike6D, shape?: [number, number, number, number, number, number], dtype?: DataType) => Tensor6D;
Creates rank-6
tf.Tensor
with the provided values, shape and dtype.The same functionality can be achieved with
tf.tensor
, but in general we recommend usingtf.tensor6d
as it makes the code more readable.```js // Pass a nested array. tf.tensor6d([[[[[[1],[2]],[[3],[4]]],[[[5],[6]],[[7],[8]]]]]]).print();
```js// Pass a flat array and specify a shape.tf.tensor6d([1, 2, 3, 4, 5, 6, 7, 8], [1, 1, 2, 2, 2, 1]).print();Parameter values
The values of the tensor. Can be nested array of numbers, or a flat array, or a
TypedArray
.Parameter shape
The shape of the tensor. Optional. If not provided, it is inferred from
values
.Parameter dtype
The data type.
{heading: 'Tensors', subheading: 'Creation'}
function tidy
tidy: <T extends TensorContainer>( nameOrFn: string | ScopeFn<T>, fn?: ScopeFn<T>) => T;
Executes the provided function
fn
and after it is executed, cleans up all intermediate tensors allocated byfn
except those returned byfn
.fn
must not return a Promise (async functions not allowed). The returned result can be a complex object.Using this method helps avoid memory leaks. In general, wrap calls to operations in
tf.tidy
for automatic memory cleanup.NOTE: Variables do *not* get cleaned up when inside a tidy(). If you want to dispose variables, please use
tf.disposeVariables
or call dispose() directly on variables.// y = 2 ^ 2 + 1const y = tf.tidy(() => {// a, b, and one will be cleaned up when the tidy ends.const one = tf.scalar(1);const a = tf.scalar(2);const b = a.square();console.log('numTensors (in tidy): ' + tf.memory().numTensors);// The value returned inside the tidy function will return// through the tidy, in this case to the variable y.return b.add(one);});console.log('numTensors (outside tidy): ' + tf.memory().numTensors);y.print();Parameter nameOrFn
The name of the closure, or the function to execute. If a name is provided, the 2nd argument should be the function. If debug mode is on, the timing and the memory usage of the function will be tracked and displayed on the console using the provided name.
Parameter fn
The function to execute.
{heading: 'Performance', subheading: 'Memory'}
function time
time: (f: () => void) => Promise<TimingInfo>;
Executes
f()
and returns a promise that resolves with timing information.The result is an object with the following properties:
-
wallMs
: Wall execution time. -kernelMs
: Kernel execution time, ignoring data transfer. If using the WebGL backend and the query timer extension is not available, this will return an error object. - OnWebGL
The following additional properties exist: -uploadWaitMs
: CPU blocking time on texture uploads. -downloadWaitMs
: CPU blocking time on texture downloads (readPixels).const x = tf.randomNormal([20, 20]);const time = await tf.time(() => x.matMul(x));console.log(`kernelMs: ${time.kernelMs}, wallTimeMs: ${time.wallMs}`);Parameter f
The function to execute and time.
{heading: 'Performance', subheading: 'Timing'}
function unregisterGradient
unregisterGradient: (kernelName: string) => void;
Removes the registered gradient from the global registry.
function unregisterKernel
unregisterKernel: (kernelName: string, backendName: string) => void;
Removes the kernel function from the registry.
Parameter kernelName
The official name of the kernel.
Parameter backendName
The official name of the backend.
function upcastType
upcastType: (typeA: DataType, typeB: DataType) => DataType;
function upperBound
upperBound: ( sortedSequence: Tensor | TensorLike, values: Tensor | TensorLike) => Tensor;
Searches for where a value would go in a sorted sequence.
This is not a method for checking containment (like javascript in).
The typical use case for this operation is "binning", "bucketing", or "discretizing". The values are assigned to bucket-indices based on the edges listed in 'sortedSequence'. This operation returns the bucket-index for each value.
The index returned corresponds to the first edge greater than the value.
The axis is not settable for this operation. It always operates on the innermost dimension (axis=-1). The operation will accept any number of outer dimensions.
Note: This operation assumes that 'upperBound' is sorted along the innermost axis, maybe using 'sort(..., axis=-1)'. If the sequence is not sorted no error is raised and the content of the returned tensor is not well defined.
const seq = tf.tensor1d([0, 3, 9, 10, 10]);const values = tf.tensor1d([0, 4, 10]);const result = tf.upperBound(seq, values);result.print(); // [1, 2, 5]Parameter sortedSequence
: N-D. Sorted sequence.
Parameter values
: N-D. Search values. An N-D int32 tensor the size of values containing the result of applying upper bound to each value. The result is not a global index to the entire Tensor, but the index in the last dimension. {heading: 'Operations', subheading: 'Evaluation'}
function valueAndGrad
valueAndGrad: <I extends Tensor<Rank>, O extends Tensor<Rank>>( f: (x: I) => O) => (x: I, dy?: O) => { value: O; grad: I };
Like
tf.grad
, but also returns the value off()
. Useful whenf()
returns a metric you want to show.The result is a rich object with the following properties: - grad: The gradient of
f(x)
w.r.t.x
(result oftf.grad
). - value: The value returned byf(x)
.// f(x) = x ^ 2const f = x => x.square();// f'(x) = 2xconst g = tf.valueAndGrad(f);const x = tf.tensor1d([2, 3]);const {value, grad} = g(x);console.log('value');value.print();console.log('grad');grad.print();{heading: 'Training', subheading: 'Gradients'}
function valueAndGrads
valueAndGrads: <O extends Tensor<Rank>>( f: (...args: Tensor[]) => O) => (args: Tensor[], dy?: O) => { grads: Tensor[]; value: O };
Like
tf.grads
, but returns also the value off()
. Useful whenf()
returns a metric you want to show.The result is a rich object with the following properties: - grads: The gradients of
f()
w.r.t. each input (result oftf.grads
). - value: The value returned byf(x)
.// f(a, b) = a * bconst f = (a, b) => a.mul(b);// df/da = b, df/db = aconst g = tf.valueAndGrads(f);const a = tf.tensor1d([2, 3]);const b = tf.tensor1d([-2, -3]);const {value, grads} = g([a, b]);const [da, db] = grads;console.log('value');value.print();console.log('da');da.print();console.log('db');db.print();{heading: 'Training', subheading: 'Gradients'}
function variable
variable: <R extends Rank>( initialValue: Tensor<R>, trainable?: boolean, name?: string, dtype?: DataType) => Variable<R>;
Creates a new variable with the provided initial value.
const x = tf.variable(tf.tensor([1, 2, 3]));x.assign(tf.tensor([4, 5, 6]));x.print();Parameter initialValue
Initial value for the tensor.
Parameter trainable
If true, optimizers are allowed to update it.
Parameter name
Name of the variable. Defaults to a unique id.
Parameter dtype
If set, initialValue will be converted to the given type.
{heading: 'Tensors', subheading: 'Creation'}
function variableGrads
variableGrads: ( f: () => Scalar, varList?: Variable[]) => { value: Scalar; grads: NamedTensorMap };
Computes and returns the gradient of f(x) with respect to the list of trainable variables provided by
varList
. If no list is provided, it defaults to all trainable variables.const a = tf.variable(tf.tensor1d([3, 4]));const b = tf.variable(tf.tensor1d([5, 6]));const x = tf.tensor1d([1, 2]);// f(a, b) = a * x ^ 2 + b * xconst f = () => a.mul(x.square()).add(b.mul(x)).sum();// df/da = x ^ 2, df/db = xconst {value, grads} = tf.variableGrads(f);Object.keys(grads).forEach(varName => grads[varName].print());Parameter f
The function to execute. f() should return a scalar.
Parameter varList
The list of variables to compute the gradients with respect to. Defaults to all trainable variables.
Returns
An object with the following keys and values: -
value
: The value of the functionf
. -grads
: A map from the names of the variables to the gradients. If thevarList
argument is provided explicitly and contains a subset of non-trainable variables, this map in the return value will contain keys that map the names of the non-trainable variables tonull
.{heading: 'Training', subheading: 'Gradients'}
function zeros
zeros: <R extends Rank>(shape: ShapeMap[R], dtype?: DataType) => Tensor<R>;
Creates a
tf.Tensor
with all elements set to 0.tf.zeros([2, 2]).print();Parameter shape
An array of integers defining the output tensor shape.
Parameter dtype
The type of an element in the resulting tensor. Can be 'float32', 'int32' or 'bool'. Defaults to 'float'.
{heading: 'Tensors', subheading: 'Creation'}
Classes
class AdadeltaOptimizer
class AdadeltaOptimizer extends Optimizer {}
Optimizer
constructor
constructor(learningRate: number, rho: number, epsilon?: number);
property className
static readonly className: string;
property epsilon
protected epsilon: number;
property learningRate
protected learningRate: number;
property rho
protected rho: number;
method applyGradients
applyGradients: (variableGradients: NamedVariableMap | NamedTensor[]) => void;
method dispose
dispose: () => void;
method fromConfig
static fromConfig: <T extends Serializable>( cls: SerializableConstructor<T>, config: ConfigDict) => T;
method getConfig
getConfig: () => ConfigDict;
method getWeights
getWeights: () => Promise<NamedTensor[]>;
method setWeights
setWeights: (weightValues: NamedTensor[]) => Promise<void>;
class AdagradOptimizer
class AdagradOptimizer extends Optimizer {}
Optimizer
constructor
constructor(learningRate: number, initialAccumulatorValue?: number);
property className
static readonly className: string;
property learningRate
protected learningRate: number;
method applyGradients
applyGradients: (variableGradients: NamedVariableMap | NamedTensor[]) => void;
method dispose
dispose: () => void;
method fromConfig
static fromConfig: <T extends Serializable>( cls: SerializableConstructor<T>, config: ConfigDict) => T;
method getConfig
getConfig: () => ConfigDict;
method getWeights
getWeights: () => Promise<NamedTensor[]>;
method setWeights
setWeights: (weightValues: NamedTensor[]) => Promise<void>;
class AdamaxOptimizer
class AdamaxOptimizer extends Optimizer {}
constructor
constructor( learningRate: number, beta1: number, beta2: number, epsilon?: number, decay?: number);
property beta1
protected beta1: number;
property beta2
protected beta2: number;
property className
static readonly className: string;
property decay
protected decay: number;
property epsilon
protected epsilon: number;
property learningRate
protected learningRate: number;
method applyGradients
applyGradients: (variableGradients: NamedVariableMap | NamedTensor[]) => void;
method dispose
dispose: () => void;
method fromConfig
static fromConfig: <T extends Serializable>( cls: SerializableConstructor<T>, config: ConfigDict) => T;
method getConfig
getConfig: () => ConfigDict;
method getWeights
getWeights: () => Promise<NamedTensor[]>;
method setWeights
setWeights: (weightValues: NamedTensor[]) => Promise<void>;
class AdamOptimizer
class AdamOptimizer extends Optimizer {}
constructor
constructor( learningRate: number, beta1: number, beta2: number, epsilon?: number);
property beta1
protected beta1: number;
property beta2
protected beta2: number;
property className
static readonly className: string;
property epsilon
protected epsilon: number;
property learningRate
protected learningRate: number;
method applyGradients
applyGradients: (variableGradients: NamedVariableMap | NamedTensor[]) => void;
method dispose
dispose: () => void;
method fromConfig
static fromConfig: <T extends Serializable>( cls: SerializableConstructor<T>, config: ConfigDict) => T;
method getConfig
getConfig: () => ConfigDict;
method getWeights
getWeights: () => Promise<NamedTensor[]>;
method setWeights
setWeights: (weightValues: NamedTensor[]) => Promise<void>;
class DataStorage
class DataStorage<T> {}
Convenient class for storing tensor-related data.
constructor
constructor(backend: KernelBackend, dataMover: DataMover);
method delete
delete: (dataId: DataId) => boolean;
method get
get: (dataId: DataId) => T;
method has
has: (dataId: DataId) => boolean;
method numDataIds
numDataIds: () => number;
method set
set: (dataId: DataId, value: T) => void;
class Environment
class Environment {}
The environment contains evaluated flags as well as the registered platform. This is always used as a global singleton and can be retrieved with
tf.env()
.{heading: 'Environment'}
constructor
constructor(global: any);
property features
readonly features: Flags;
property getQueryParams
getQueryParams: (queryString: string) => { [key: string]: string };
property global
global: any;
property platform
platform: Platform;
property platformName
platformName: string;
method get
get: (flagName: string) => FlagValue;
method getAsync
getAsync: (flagName: string) => Promise<FlagValue>;
method getBool
getBool: (flagName: string) => boolean;
method getFlags
getFlags: () => Flags;
method getNumber
getNumber: (flagName: string) => number;
method getString
getString: (flagName: string) => string;
method registerFlag
registerFlag: ( flagName: string, evaluationFn: FlagEvaluationFn, setHook?: (value: FlagValue) => void) => void;
method reset
reset: () => void;
method set
set: (flagName: string, value: FlagValue) => void;
method setFlags
setFlags: (flags: Flags) => void;
method setPlatform
setPlatform: (platformName: string, platform: Platform) => void;
class KernelBackend
class KernelBackend implements TensorStorage, Backend, BackendTimer {}
The interface that defines the kernels that should be implemented when adding a new backend. New backends don't need to implement every one of the methods, this can be done gradually (throw an error for unimplemented methods).
method createTensorFromGPUData
createTensorFromGPUData: ( values: WebGLData | WebGPUData, shape: number[], dtype: DataType) => Tensor;
method dispose
dispose: () => void;
method disposeData
disposeData: (dataId: object, force?: boolean) => boolean;
method epsilon
epsilon: () => number;
Returns the smallest representable number.
method floatPrecision
floatPrecision: () => 16 | 32;
Returns the highest precision for floats in bits (e.g. 16 or 32)
method incRef
incRef: (dataId: DataId) => void;
method memory
memory: () => { unreliable: boolean; reasons?: string[] };
method move
move: ( dataId: DataId, values: BackendValues, shape: number[], dtype: DataType, refCount: number) => void;
method numDataIds
numDataIds: () => number;
method read
read: (dataId: object) => Promise<BackendValues>;
method readSync
readSync: (dataId: object) => BackendValues;
method readToGPU
readToGPU: (dataId: object, options?: DataToGPUOptions) => GPUData;
method refCount
refCount: (dataId: DataId) => number;
method time
time: (f: () => void) => Promise<BackendTimingInfo>;
method timerAvailable
timerAvailable: () => boolean;
method write
write: (values: BackendValues, shape: number[], dtype: DataType) => DataId;
class MomentumOptimizer
class MomentumOptimizer extends SGDOptimizer {}
Optimizer
constructor
constructor(learningRate: number, momentum: number, useNesterov?: boolean);
property className
static readonly className: string;
property learningRate
protected learningRate: number;
method applyGradients
applyGradients: (variableGradients: NamedVariableMap | NamedTensor[]) => void;
method dispose
dispose: () => void;
method fromConfig
static fromConfig: <T extends Serializable>( cls: SerializableConstructor<T>, config: ConfigDict) => T;
method getConfig
getConfig: () => ConfigDict;
method getWeights
getWeights: () => Promise<NamedTensor[]>;
method setMomentum
setMomentum: (momentum: number) => void;
Sets the momentum of the optimizer.
Parameter momentum
method setWeights
setWeights: (weightValues: NamedTensor[]) => Promise<void>;
class Optimizer
abstract class Optimizer extends Serializable {}
{heading: 'Training', subheading: 'Classes', namespace: 'train'}
property iterations
readonly iterations: number;
The number of iterations that this optimizer instance has been invoked for.
property iterations_
protected iterations_: number;
method applyGradients
abstract applyGradients: ( variableGradients: NamedTensorMap | NamedTensor[]) => void;
Updates variables by using the computed gradients.
Parameter variableGradients
A mapping of variable name to its gradient value.
{heading: 'Training', subheading: 'Optimizers'}
method computeGradients
computeGradients: ( f: () => Scalar, varList?: Variable[]) => { value: Scalar; grads: NamedTensorMap };
Executes f() and computes the gradient of the scalar output of f() with respect to the list of trainable variables provided by
varList
. If no list is provided, it defaults to all trainable variables.Parameter f
The function to execute and whose output to use for computing gradients with respect to variables.
Parameter varList
An optional list of variables to compute gradients with respect to. If specified, only the trainable variables in varList will have gradients computed with respect to. Defaults to all trainable variables.
{heading: 'Training', subheading: 'Optimizers'}
method dispose
dispose: () => void;
Dispose the variables (if any) owned by this optimizer instance.
method extractIterations
protected extractIterations: ( weightValues: NamedTensor[]) => Promise<NamedTensor[]>;
Extract the first element of the weight values and set it as the iterations counter variable of this instance of optimizer.
Parameter weightValues
Returns
Weight values with the first element consumed and excluded.
method getWeights
getWeights: () => Promise<NamedTensor[]>;
method incrementIterations
protected incrementIterations: () => void;
method minimize
minimize: ( f: () => Scalar, returnCost?: boolean, varList?: Variable[]) => Scalar | null;
Executes
f()
and minimizes the scalar output off()
by computing gradients of y with respect to the list of trainable variables provided byvarList
. If no list is provided, it defaults to all trainable variables.Parameter f
The function to execute and whose output to minimize.
Parameter returnCost
Whether to return the scalar cost value produced by executing
f()
.Parameter varList
An optional list of variables to update. If specified, only the trainable variables in varList will be updated by minimize. Defaults to all trainable variables.
{heading: 'Training', subheading: 'Optimizers'}
method saveIterations
saveIterations: () => Promise<NamedTensor>;
method setWeights
setWeights: (weightValues: NamedTensor[]) => Promise<void>;
class OptimizerConstructors
class OptimizerConstructors {}
method adadelta
static adadelta: ( learningRate?: number, rho?: number, epsilon?: number) => AdadeltaOptimizer;
Constructs a
tf.AdadeltaOptimizer
that uses the Adadelta algorithm. See [https://arxiv.org/abs/1212.5701](https://arxiv.org/abs/1212.5701)Parameter learningRate
The learning rate to use for the Adadelta gradient descent algorithm.
Parameter rho
The learning rate decay over each update.
Parameter epsilon
A constant epsilon used to better condition the grad update.
{heading: 'Training', subheading: 'Optimizers', namespace: 'train'}
method adagrad
static adagrad: ( learningRate: number, initialAccumulatorValue?: number) => AdagradOptimizer;
Constructs a
tf.AdagradOptimizer
that uses the Adagrad algorithm. See [http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf]( http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf) or [http://ruder.io/optimizing-gradient-descent/index.html#adagrad]( http://ruder.io/optimizing-gradient-descent/index.html#adagrad)Parameter learningRate
The learning rate to use for the Adagrad gradient descent algorithm.
Parameter initialAccumulatorValue
Starting value for the accumulators, must be positive.
{heading: 'Training', subheading: 'Optimizers', namespace: 'train'}
method adam
static adam: ( learningRate?: number, beta1?: number, beta2?: number, epsilon?: number) => AdamOptimizer;
Constructs a
tf.AdamOptimizer
that uses the Adam algorithm. See [https://arxiv.org/abs/1412.6980](https://arxiv.org/abs/1412.6980)Parameter learningRate
The learning rate to use for the Adam gradient descent algorithm.
Parameter beta1
The exponential decay rate for the 1st moment estimates.
Parameter beta2
The exponential decay rate for the 2nd moment estimates.
Parameter epsilon
A small constant for numerical stability.
{heading: 'Training', subheading: 'Optimizers', namespace: 'train'}
method adamax
static adamax: ( learningRate?: number, beta1?: number, beta2?: number, epsilon?: number, decay?: number) => AdamaxOptimizer;
Constructs a
tf.AdamaxOptimizer
that uses the Adamax algorithm. See [https://arxiv.org/abs/1412.6980](https://arxiv.org/abs/1412.6980)Parameter learningRate
The learning rate to use for the Adamax gradient descent algorithm.
Parameter beta1
The exponential decay rate for the 1st moment estimates.
Parameter beta2
The exponential decay rate for the 2nd moment estimates.
Parameter epsilon
A small constant for numerical stability.
Parameter decay
The learning rate decay over each update.
{heading: 'Training', subheading: 'Optimizers', namespace: 'train'}
method momentum
static momentum: ( learningRate: number, momentum: number, useNesterov?: boolean) => MomentumOptimizer;
Constructs a
tf.MomentumOptimizer
that uses momentum gradient descent.See [http://proceedings.mlr.press/v28/sutskever13.pdf]( http://proceedings.mlr.press/v28/sutskever13.pdf)
Parameter learningRate
The learning rate to use for the Momentum gradient descent algorithm.
Parameter momentum
The momentum to use for the momentum gradient descent algorithm.
{heading: 'Training', subheading: 'Optimizers', namespace: 'train'}
method rmsprop
static rmsprop: ( learningRate: number, decay?: number, momentum?: number, epsilon?: number, centered?: boolean) => RMSPropOptimizer;
Constructs a
tf.RMSPropOptimizer
that uses RMSProp gradient descent. This implementation uses plain momentum and is not centered version of RMSProp.See [http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf]( http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)
Parameter learningRate
The learning rate to use for the RMSProp gradient descent algorithm.
Parameter decay
The discounting factor for the history/coming gradient.
Parameter momentum
The momentum to use for the RMSProp gradient descent algorithm.
Parameter epsilon
Small value to avoid zero denominator.
Parameter centered
If true, gradients are normalized by the estimated variance of the gradient.
{heading: 'Training', subheading: 'Optimizers', namespace: 'train'}
method sgd
static sgd: (learningRate: number) => SGDOptimizer;
Constructs a
tf.SGDOptimizer
that uses stochastic gradient descent.// Fit a quadratic function by learning the coefficients a, b, c.const xs = tf.tensor1d([0, 1, 2, 3]);const ys = tf.tensor1d([1.1, 5.9, 16.8, 33.9]);const a = tf.scalar(Math.random()).variable();const b = tf.scalar(Math.random()).variable();const c = tf.scalar(Math.random()).variable();// y = a * x^2 + b * x + c.const f = x => a.mul(x.square()).add(b.mul(x)).add(c);const loss = (pred, label) => pred.sub(label).square().mean();const learningRate = 0.01;const optimizer = tf.train.sgd(learningRate);// Train the model.for (let i = 0; i < 10; i++) {optimizer.minimize(() => loss(f(xs), ys));}// Make predictions.console.log(`a: ${a.dataSync()}, b: ${b.dataSync()}, c: ${c.dataSync()}`);const preds = f(xs).dataSync();preds.forEach((pred, i) => {console.log(`x: ${i}, pred: ${pred}`);});Parameter learningRate
The learning rate to use for the SGD algorithm.
{heading: 'Training', subheading: 'Optimizers', namespace: 'train'}
class RMSPropOptimizer
class RMSPropOptimizer extends Optimizer {}
Optimizer
constructor
constructor( learningRate: number, decay?: number, momentum?: number, epsilon?: number, centered?: boolean);
property className
static readonly className: string;
property decay
protected decay: number;
property epsilon
protected epsilon: number;
property learningRate
protected learningRate: number;
property momentum
protected momentum: number;
method applyGradients
applyGradients: (variableGradients: NamedTensorMap | NamedTensor[]) => void;
method dispose
dispose: () => void;
method fromConfig
static fromConfig: <T extends Serializable>( cls: SerializableConstructor<T>, config: ConfigDict) => T;
method getConfig
getConfig: () => ConfigDict;
method getWeights
getWeights: () => Promise<NamedTensor[]>;
method setWeights
setWeights: (weightValues: NamedTensor[]) => Promise<void>;
class SGDOptimizer
class SGDOptimizer extends Optimizer {}
Optimizer
constructor
constructor(learningRate: number);
property c
protected c: Scalar;
property className
static readonly className: string;
property learningRate
protected learningRate: number;
method applyGradients
applyGradients: (variableGradients: NamedTensorMap | NamedTensor[]) => void;
method dispose
dispose: () => void;
method fromConfig
static fromConfig: <T extends Serializable>( cls: SerializableConstructor<T>, config: ConfigDict) => T;
method getConfig
getConfig: () => ConfigDict;
method getWeights
getWeights: () => Promise<NamedTensor[]>;
method setLearningRate
setLearningRate: (learningRate: number) => void;
Sets the learning rate of the optimizer.
method setWeights
setWeights: (weightValues: NamedTensor[]) => Promise<void>;
class Tensor
class Tensor<R extends Rank = Rank> implements TensorInfo {}
A
tf.Tensor
object represents an immutable, multidimensional array of numbers that has a shape and a data type.For performance reasons, functions that create tensors do not necessarily perform a copy of the data passed to them (e.g. if the data is passed as a
Float32Array
), and changes to the data will change the tensor. This is not a feature and is not supported. To avoid this behavior, use the tensor before changing the input data or create a copy withcopy = tf.add(yourTensor, 0)
.See
tf.tensor
for details on how to create atf.Tensor
.{heading: 'Tensors', subheading: 'Classes'}
constructor
constructor( shape: | number[] | [number, number] | [number] | [number, number, number] | [number, number, number, number] | [number, number, number, number, number] | [number, number, number, number, number, number], dtype: keyof DataTypeMap, dataId: {}, id: number);
property dataId
dataId: {};
Id of the bucket holding the data for this tensor. Multiple arrays can point to the same bucket (e.g. when calling array.reshape()).
property dtype
readonly dtype: keyof DataTypeMap;
The data type for the array.
property id
readonly id: number;
Unique id of this tensor.
property isDisposed
readonly isDisposed: boolean;
property isDisposedInternal
protected isDisposedInternal: boolean;
property kept
kept: boolean;
Whether this tensor has been globally kept.
property kerasMask
kerasMask?: Tensor<Rank>;
The keras mask that some keras layers attach to the tensor
property rank
readonly rank: number;
property rankType
readonly rankType: Rank;
The rank type for the array (see
Rank
enum).
property scopeId
scopeId: number;
The id of the scope this tensor is being tracked in.
property shape
readonly shape: | number[] | [number, number] | [number] | [number, number, number] | [number, number, number, number] | [number, number, number, number, number] | [number, number, number, number, number, number];
The shape of the tensor.
property size
readonly size: number;
Number of elements in the tensor.
property strides
readonly strides: number[];
Number of elements to skip in each dimension when indexing. See https://docs.scipy.org/doc/numpy/reference/generated/\ numpy.ndarray.strides.html
method array
array: () => Promise<ArrayMap[R]>;
Returns the tensor data as a nested array. The transfer of data is done asynchronously.
{heading: 'Tensors', subheading: 'Classes'}
method arraySync
arraySync: () => ArrayMap[R];
Returns the tensor data as a nested array. The transfer of data is done synchronously.
{heading: 'Tensors', subheading: 'Classes'}
method buffer
buffer: <D extends keyof DataTypeMap = 'float32'>() => Promise< TensorBuffer<R, D>>;
Returns a promise of
tf.TensorBuffer
that holds the underlying data.{heading: 'Tensors', subheading: 'Classes'}
method bufferSync
bufferSync: <D extends keyof DataTypeMap = 'float32'>() => TensorBuffer<R, D>;
Returns a
tf.TensorBuffer
that holds the underlying data. {heading: 'Tensors', subheading: 'Classes'}
method bytes
bytes: () => Promise<Uint8Array[] | Uint8Array>;
Returns the underlying bytes of the tensor's data.
method clone
clone: <T extends Tensor<Rank>>(this: T) => T;
Returns a copy of the tensor. See
tf.clone
for details. {heading: 'Tensors', subheading: 'Classes'}
method data
data: <D extends keyof DataTypeMap = NumericDataType>() => Promise< DataTypeMap[D]>;
Asynchronously downloads the values from the
tf.Tensor
. Returns a promise ofTypedArray
that resolves when the computation has finished.{heading: 'Tensors', subheading: 'Classes'}
method dataSync
dataSync: <D extends keyof DataTypeMap = NumericDataType>() => DataTypeMap[D];
Synchronously downloads the values from the
tf.Tensor
. This blocks the UI thread until the values are ready, which can cause performance issues.{heading: 'Tensors', subheading: 'Classes'}
method dataToGPU
dataToGPU: (options?: DataToGPUOptions) => GPUData;
Copy the tensor's data to a new GPU resource. Comparing to the
dataSync()
anddata()
, this method prevents data from being downloaded to CPU.For WebGL backend, the data will be stored on a densely packed texture. This means that the texture will use the RGBA channels to store value.
For WebGPU backend, the data will be stored on a buffer. There is no parameter, so can not use a user-defined size to create the buffer.
Parameter options
: For WebGL, - customTexShape: Optional. If set, will use the user defined texture shape to create the texture.
Returns
For WebGL backend, a GPUData contains the new texture and its information. { tensorRef: The tensor that is associated with this texture, texture: WebGLTexture, texShape: [number, number] // [height, width] }
For WebGPU backend, a GPUData contains the new buffer. { tensorRef: The tensor that is associated with this buffer, buffer: GPUBuffer, }
Remember to dispose the GPUData after it is used by
res.tensorRef.dispose()
.{heading: 'Tensors', subheading: 'Classes'}
method dispose
dispose: () => void;
Disposes
tf.Tensor
from memory.{heading: 'Tensors', subheading: 'Classes'}
method print
print: (verbose?: boolean) => void;
Prints the
tf.Tensor
. Seetf.print
for details.Parameter verbose
Whether to print verbose information about the tensor, including dtype and size.
{heading: 'Tensors', subheading: 'Classes'}
method throwIfDisposed
throwIfDisposed: () => void;
method toString
toString: (verbose?: boolean) => string;
Returns a human-readable description of the tensor. Useful for logging.
{heading: 'Tensors', subheading: 'Classes'}
method variable
variable: (trainable?: boolean, name?: string, dtype?: DataType) => Variable<R>;
class TensorBuffer
class TensorBuffer<R extends Rank, D extends DataType = 'float32'> {}
A mutable object, similar to
tf.Tensor
, that allows users to set values at locations before converting to an immutabletf.Tensor
.See
tf.buffer
for creating a tensor buffer.{heading: 'Tensors', subheading: 'Classes'}
constructor
constructor( shape: | number[] | [number, number] | [number] | [number, number, number] | [number, number, number, number] | [number, number, number, number, number] | [number, number, number, number, number, number], dtype: keyof DataTypeMap, values?: string[] | Uint8Array | Float32Array | Int32Array);
property dtype
dtype: keyof DataTypeMap;
property rank
readonly rank: number;
property shape
shape: | number[] | [number, number] | [number] | [number, number, number] | [number, number, number, number] | [number, number, number, number, number] | [number, number, number, number, number, number];
property size
size: number;
property strides
strides: number[];
property values
values: string[] | Uint8Array | Float32Array | Int32Array;
method get
get: (...locs: number[]) => SingleValueMap[D];
Returns the value in the buffer at the provided location.
Parameter locs
The location indices.
{heading: 'Tensors', subheading: 'Creation'}
method indexToLoc
indexToLoc: (index: number) => number[];
method locToIndex
locToIndex: (locs: number[]) => number;
method set
set: (value: SingleValueMap[D], ...locs: number[]) => void;
Sets a value in the buffer at a given location.
Parameter value
The value to set.
Parameter locs
The location indices.
{heading: 'Tensors', subheading: 'Creation'}
method toTensor
toTensor: () => Tensor<R>;
Creates an immutable
tf.Tensor
object from the buffer.{heading: 'Tensors', subheading: 'Creation'}
class Variable
class Variable<R extends Rank = Rank> extends Tensor<R> {}
A mutable
tf.Tensor
, useful for persisting state, e.g. for training.{heading: 'Tensors', subheading: 'Classes'}
constructor
constructor( initialValue: Tensor<R>, trainable: boolean, name: string, tensorId: number);
property name
name: string;
property trainable
trainable: boolean;
method assign
assign: (newValue: Tensor<R>) => void;
Assign a new
tf.Tensor
to this variable. The newtf.Tensor
must have the same shape and dtype as the oldtf.Tensor
.Parameter newValue
New tensor to be assigned to this variable.
{heading: 'Tensors', subheading: 'Classes'}
method dispose
dispose: () => void;
Interfaces
interface AllAttrs
interface AllAttrs {}
interface AnyAttrs
interface AnyAttrs {}
interface ArgMaxAttrs
interface ArgMaxAttrs {}
property axis
axis: number;
interface ArgMinAttrs
interface ArgMinAttrs {}
property axis
axis: number;
interface AvgPool3DAttrs
interface AvgPool3DAttrs {}
property dataFormat
dataFormat: 'NDHWC' | 'NCDHW';
property dimRoundingMode
dimRoundingMode?: 'floor' | 'round' | 'ceil';
property filterSize
filterSize: [number, number, number] | number;
property pad
pad: 'valid' | 'same' | number;
property strides
strides: [number, number, number] | number;
interface AvgPool3DGradAttrs
interface AvgPool3DGradAttrs {}
property dimRoundingMode
dimRoundingMode?: 'floor' | 'round' | 'ceil';
property filterSize
filterSize: [number, number, number] | number;
property pad
pad: 'valid' | 'same' | number;
property strides
strides: [number, number, number] | number;
interface AvgPoolAttrs
interface AvgPoolAttrs {}
property dimRoundingMode
dimRoundingMode?: 'floor' | 'round' | 'ceil';
property filterSize
filterSize: [number, number] | number;
property pad
pad: 'valid' | 'same' | number | ExplicitPadding;
property strides
strides: [number, number] | number;
interface AvgPoolGradAttrs
interface AvgPoolGradAttrs {}
property filterSize
filterSize: [number, number] | number;
property pad
pad: 'valid' | 'same' | number | ExplicitPadding;
property strides
strides: [number, number] | number;
interface BackendTimingInfo
interface BackendTimingInfo {}
property kernelMs
kernelMs: | number | { error: string; };
method getExtraProfileInfo
getExtraProfileInfo: () => string;
interface BatchMatMulAttrs
interface BatchMatMulAttrs {}
property transposeA
transposeA: boolean;
property transposeB
transposeB: boolean;
interface BatchToSpaceNDAttrs
interface BatchToSpaceNDAttrs {}
property blockShape
blockShape: number[];
property crops
crops: number[][];
interface BincountAttrs
interface BincountAttrs {}
property size
size: number;
interface BroadCastToAttrs
interface BroadCastToAttrs {}
property inputShape
inputShape: number[];
property shape
shape: number[];
interface ClipByValueAttrs
interface ClipByValueAttrs {}
property clipValueMax
clipValueMax: number;
property clipValueMin
clipValueMin: number;
interface ConcatAttrs
interface ConcatAttrs {}
property axis
axis: number;
interface Conv2DAttrs
interface Conv2DAttrs {}
property dataFormat
dataFormat: 'NHWC' | 'NCHW';
property dilations
dilations: [number, number] | number;
property dimRoundingMode
dimRoundingMode?: 'floor' | 'round' | 'ceil';
property pad
pad: 'valid' | 'same' | number | ExplicitPadding;
property strides
strides: [number, number] | number;
interface Conv2DBackpropFilterAttrs
interface Conv2DBackpropFilterAttrs {}
property dataFormat
dataFormat: 'NHWC' | 'NCHW';
property dimRoundingMode
dimRoundingMode?: 'floor' | 'round' | 'ceil';
property filterShape
filterShape: [number, number, number, number];
property pad
pad: 'valid' | 'same' | number | ExplicitPadding;
property strides
strides: [number, number] | number;
interface Conv2DBackpropInputAttrs
interface Conv2DBackpropInputAttrs {}
property dataFormat
dataFormat: 'NHWC' | 'NCHW';
property dimRoundingMode
dimRoundingMode?: 'floor' | 'round' | 'ceil';
property inputShape
inputShape: [number, number, number, number];
property pad
pad: 'valid' | 'same' | number | ExplicitPadding;
property strides
strides: [number, number] | number;
interface Conv3DAttrs
interface Conv3DAttrs {}
property dataFormat
dataFormat: 'NDHWC' | 'NCDHW';
property dilations
dilations: [number, number, number] | number;
property pad
pad: 'valid' | 'same';
property strides
strides: [number, number, number] | number;
interface Conv3DBackpropFilterV2Attrs
interface Conv3DBackpropFilterV2Attrs {}
property filterShape
filterShape: [number, number, number, number, number];
property pad
pad: 'valid' | 'same';
property strides
strides: [number, number, number] | number;
interface Conv3DBackpropInputV2Attrs
interface Conv3DBackpropInputV2Attrs {}
property inputShape
inputShape: [number, number, number, number, number];
property pad
pad: 'valid' | 'same';
property strides
strides: [number, number, number] | number;
interface CropAndResizeAttrs
interface CropAndResizeAttrs {}
property cropSize
cropSize: [number, number];
property extrapolationValue
extrapolationValue: number;
property method
method: 'bilinear' | 'nearest';
interface CumprodAttrs
interface CumprodAttrs {}
interface CumsumAttrs
interface CumsumAttrs {}
interface DataMover
interface DataMover {}
method moveData
moveData: (backend: KernelBackend, dataId: DataId) => void;
To be called by backends whenever they see a dataId that they don't own. Upon calling this method, the mover will fetch the tensor from another backend and register it with the current active backend.
interface DataToGPUWebGLOption
interface DataToGPUWebGLOption {}
property customTexShape
customTexShape?: [number, number];
interface DataTypeMap
interface DataTypeMap {}
interface DenseBincountAttrs
interface DenseBincountAttrs {}
property binaryOutput
binaryOutput?: boolean;
property size
size: number;
interface DepthToSpaceAttrs
interface DepthToSpaceAttrs {}
property blockSize
blockSize: number;
property dataFormat
dataFormat: 'NHWC' | 'NCHW';
interface DepthwiseConv2dNativeAttrs
interface DepthwiseConv2dNativeAttrs {}
property dataFormat
dataFormat: 'NHWC' | 'NCHW';
property dilations
dilations: [number, number] | number;
property dimRoundingMode
dimRoundingMode?: 'floor' | 'round' | 'ceil';
property pad
pad: 'valid' | 'same' | number | ExplicitPadding;
property strides
strides: [number, number] | number;
interface DepthwiseConv2dNativeBackpropFilterAttrs
interface DepthwiseConv2dNativeBackpropFilterAttrs {}
property dilations
dilations: [number, number] | number;
property dimRoundingMode
dimRoundingMode?: 'floor' | 'round' | 'ceil';
property filterShape
filterShape: [number, number, number, number];
property pad
pad: 'valid' | 'same' | number | ExplicitPadding;
property strides
strides: [number, number] | number;
interface DepthwiseConv2dNativeBackpropInputAttrs
interface DepthwiseConv2dNativeBackpropInputAttrs {}
property dilations
dilations: [number, number] | number;
property dimRoundingMode
dimRoundingMode?: 'floor' | 'round' | 'ceil';
property inputShape
inputShape: [number, number, number, number];
property pad
pad: 'valid' | 'same' | number | ExplicitPadding;
property strides
strides: [number, number] | number;
interface Dilation2DAttrs
interface Dilation2DAttrs {}
interface DrawAttrs
interface DrawAttrs {}
interface EinsumAttrs
interface EinsumAttrs {}
property equation
equation: string;
interface ExpandDimsAttrs
interface ExpandDimsAttrs {}
property dim
dim: number;
interface FillAttrs
interface FillAttrs {}
interface FromPixelsAttrs
interface FromPixelsAttrs {}
property numChannels
numChannels: number;
interface FromPixelsInputs
interface FromPixelsInputs {}
property pixels
pixels: | PixelData | ImageData | HTMLImageElement | HTMLCanvasElement | HTMLVideoElement | ImageBitmap;
interface FusedBatchNormAttrs
interface FusedBatchNormAttrs {}
property varianceEpsilon
varianceEpsilon: number;
interface FusedConv2DAttrs
interface FusedConv2DAttrs {}
property activation
activation: Activation;
property dataFormat
dataFormat: 'NHWC' | 'NCHW';
property dilations
dilations: [number, number] | number;
property dimRoundingMode
dimRoundingMode: 'floor' | 'round' | 'ceil';
property leakyreluAlpha
leakyreluAlpha?: number;
property pad
pad: 'valid' | 'same' | number | ExplicitPadding;
property strides
strides: [number, number] | number;
interface FusedConv2DInputs
interface FusedConv2DInputs extends NamedTensorInfoMap {}
property bias
bias?: TensorInfo;
property filter
filter: TensorInfo;
property preluActivationWeights
preluActivationWeights?: TensorInfo;
property x
x: TensorInfo;
interface FusedDepthwiseConv2DAttrs
interface FusedDepthwiseConv2DAttrs {}
property activation
activation: Activation;
property dataFormat
dataFormat: 'NHWC' | 'NCHW';
property dilations
dilations: [number, number] | number;
property dimRoundingMode
dimRoundingMode: 'floor' | 'round' | 'ceil';
property leakyreluAlpha
leakyreluAlpha?: number;
property pad
pad: 'valid' | 'same' | number | ExplicitPadding;
property strides
strides: [number, number] | number;
interface FusedDepthwiseConv2DInputs
interface FusedDepthwiseConv2DInputs extends NamedTensorInfoMap {}
property bias
bias?: TensorInfo;
property filter
filter: TensorInfo;
property preluActivationWeights
preluActivationWeights?: TensorInfo;
property x
x: TensorInfo;
interface GatherV2Attrs
interface GatherV2Attrs {}
interface GPUData
interface GPUData {}
interface GradConfig
interface GradConfig {}
Config object for registering a gradient in the global registry.
property gradFunc
gradFunc: GradFunc;
property inputsToSave
inputsToSave?: string[];
property kernelName
kernelName: string;
property outputsToSave
outputsToSave?: boolean[];
property saveAllInputs
saveAllInputs?: boolean;
interface InferenceModel
interface InferenceModel {}
Common interface for a machine learning model that can do inference.
property inputs
readonly inputs: ModelTensorInfo[];
Return the array of input tensor info.
property outputs
readonly outputs: ModelTensorInfo[];
Return the array of output tensor info.
method execute
execute: ( inputs: Tensor | Tensor[] | NamedTensorMap, outputs: string | string[]) => Tensor | Tensor[];
Single Execute the inference for the input tensors and return activation values for specified output node names without batching.
Parameter input
The input tensors, when there is single input for the model, inputs param should be a Tensor. For models with multiple inputs, inputs params should be in either Tensor[] if the input order is fixed, or otherwise NamedTensorMap format.
Parameter outputs
string|string[]. List of output node names to retrieve activation from.
Returns
Activation values for the output nodes result tensors. The return type matches specified parameter outputs type. The output would be single Tensor if single output is specified, otherwise Tensor[] for multiple outputs.
method predict
predict: ( inputs: Tensor | Tensor[] | NamedTensorMap, config: ModelPredictConfig) => Tensor | Tensor[] | NamedTensorMap;
Execute the inference for the input tensors.
Parameter input
The input tensors, when there is single input for the model, inputs param should be a Tensor. For models with multiple inputs, inputs params should be in either Tensor[] if the input order is fixed, or otherwise NamedTensorMap format. For batch inference execution, the tensors for each input need to be concatenated together. For example with mobilenet, the required input shape is [1, 244, 244, 3], which represents the [batch, height, width, channel]. If we are provide a batched data of 100 images, the input tensor should be in the shape of [100, 244, 244, 3].
Parameter config
Prediction configuration for specifying the batch size.
Returns
Inference result tensors. The output would be single Tensor if model has single output node, otherwise Tensor[] or NamedTensorMap[] will be returned for model with multiple outputs.
interface KernelConfig
interface KernelConfig {}
Config object for registering a kernel in the global registry.
property backendName
backendName: string;
property disposeFunc
disposeFunc?: KernelDisposeFunc;
property kernelFunc
kernelFunc: KernelFunc;
property kernelName
kernelName: string;
property setupFunc
setupFunc?: KernelSetupFunc;
interface LeakyReluAttrs
interface LeakyReluAttrs {}
property alpha
alpha: number;
interface LinSpaceAttrs
interface LinSpaceAttrs {}
interface LogSoftmaxAttrs
interface LogSoftmaxAttrs {}
property axis
axis: number;
interface LRNAttrs
interface LRNAttrs {}
property alpha
alpha: number;
property beta
beta: number;
property bias
bias: number;
property depthRadius
depthRadius: number;
interface LRNGradAttrs
interface LRNGradAttrs {}
property alpha
alpha: number;
property beta
beta: number;
property bias
bias: number;
property depthRadius
depthRadius: number;
interface MatrixBandPartAttrs
interface MatrixBandPartAttrs {}
interface MaxAttrs
interface MaxAttrs {}
property keepDims
keepDims: boolean;
property reductionIndices
reductionIndices: number | number[];
interface MaxPool3DAttrs
interface MaxPool3DAttrs {}
property dataFormat
dataFormat: 'NDHWC' | 'NCDHW';
property dimRoundingMode
dimRoundingMode?: 'floor' | 'round' | 'ceil';
property filterSize
filterSize: [number, number, number] | number;
property pad
pad: 'valid' | 'same' | number;
property strides
strides: [number, number, number] | number;
interface MaxPool3DGradAttrs
interface MaxPool3DGradAttrs {}
property dimRoundingMode
dimRoundingMode?: 'floor' | 'round' | 'ceil';
property filterSize
filterSize: [number, number, number] | number;
property pad
pad: 'valid' | 'same' | number;
property strides
strides: [number, number, number] | number;
interface MaxPoolAttrs
interface MaxPoolAttrs {}
property dimRoundingMode
dimRoundingMode?: 'floor' | 'round' | 'ceil';
property filterSize
filterSize: [number, number] | number;
property pad
pad: 'valid' | 'same' | number | ExplicitPadding;
property strides
strides: [number, number] | number;
interface MaxPoolGradAttrs
interface MaxPoolGradAttrs {}
property dimRoundingMode
dimRoundingMode?: 'floor' | 'round' | 'ceil';
property filterSize
filterSize: [number, number] | number;
property pad
pad: 'valid' | 'same' | number | ExplicitPadding;
property strides
strides: [number, number] | number;
interface MaxPoolWithArgmaxAttrs
interface MaxPoolWithArgmaxAttrs {}
property filterSize
filterSize: [number, number] | number;
property includeBatchInIndex
includeBatchInIndex: boolean;
property pad
pad: 'valid' | 'same' | number;
property strides
strides: [number, number] | number;
interface MeanAttrs
interface MeanAttrs {}
interface MetaGraph
interface MetaGraph {}
Interface for SavedModel/GraphModel MetaGraph info.
property signatureDefs
signatureDefs: SignatureDef;
property tags
tags: string[];
interface MetaGraphInfo
interface MetaGraphInfo {}
Deprecated
Deprecated interface for SavedModel/GraphModel MetaGraph info. User MetaGraph instead.
property signatureDefs
signatureDefs: SignatureDefInfo;
property tags
tags: string[];
interface MinAttrs
interface MinAttrs {}
interface MirrorPadAttrs
interface MirrorPadAttrs {}
interface ModelPredictConfig
interface ModelPredictConfig {}
interface ModelTensorInfo
interface ModelTensorInfo {}
Interface for model input/output tensor info.
interface MultinomialAttrs
interface MultinomialAttrs {}
property normalized
normalized: boolean;
property numSamples
numSamples: number;
property seed
seed: number;
interface NamedAttrMap
interface NamedAttrMap {}
index signature
[name: string]: Attribute;
interface NamedTensorInfoMap
interface NamedTensorInfoMap {}
index signature
[name: string]: TensorInfo | undefined;
interface NonMaxSuppressionV3Attrs
interface NonMaxSuppressionV3Attrs {}
property iouThreshold
iouThreshold: number;
property maxOutputSize
maxOutputSize: number;
property scoreThreshold
scoreThreshold: number;
interface NonMaxSuppressionV4Attrs
interface NonMaxSuppressionV4Attrs {}
property iouThreshold
iouThreshold: number;
property maxOutputSize
maxOutputSize: number;
property padToMaxOutputSize
padToMaxOutputSize: boolean;
property scoreThreshold
scoreThreshold: number;
interface NonMaxSuppressionV5Attrs
interface NonMaxSuppressionV5Attrs {}
property iouThreshold
iouThreshold: number;
property maxOutputSize
maxOutputSize: number;
property scoreThreshold
scoreThreshold: number;
property softNmsSigma
softNmsSigma: number;
interface OneHotAttrs
interface OneHotAttrs {}
interface PadV2Attrs
interface PadV2Attrs {}
property constantValue
constantValue: number;
property paddings
paddings: Array<[number, number]>;
interface PixelData
interface PixelData {}
Type for representing image data in Uint8Array type.
interface Platform
interface Platform {}
At any given time a single platform is active and represents and implementation of this interface. In practice, a platform is an environment where TensorFlow.js can be executed, e.g. the browser or Node.js.
method decode
decode: (bytes: Uint8Array, encoding: string) => string;
Decode the provided bytes into a string using the provided encoding.
method encode
encode: (text: string, encoding: string) => Uint8Array;
Encode the provided string into an array of bytes using the provided encoding.
method fetch
fetch: ( path: string, requestInits?: RequestInit, options?: RequestDetails) => Promise<Response>;
Makes an HTTP request.
Parameter path
The URL path to make a request to
Parameter init
The request init. See init here: https://developer.mozilla.org/en-US/docs/Web/API/Request/Request
method isTypedArray
isTypedArray: ( a: unknown) => a is Uint8Array | Float32Array | Int32Array | Uint8ClampedArray;
method now
now: () => number;
Returns the current high-resolution time in milliseconds relative to an arbitrary time in the past. It works across different platforms (node.js, browsers).
method setTimeoutCustom
setTimeoutCustom: (functionRef: Function, delay: number) => void;
interface ProdAttrs
interface ProdAttrs {}
interface RaggedGatherAttrs
interface RaggedGatherAttrs {}
property outputRaggedRank
outputRaggedRank: number;
interface RaggedTensorToTensorAttrs
interface RaggedTensorToTensorAttrs {}
property rowPartitionTypes
rowPartitionTypes: string[];
interface RangeAttrs
interface RangeAttrs {}
interface RecursiveArray
interface RecursiveArray<T extends any> {}
index signature
[index: number]: T | RecursiveArray<T>;
interface ReshapeAttrs
interface ReshapeAttrs {}
property shape
shape: number[];
interface ResizeBilinearAttrs
interface ResizeBilinearAttrs {}
property alignCorners
alignCorners: boolean;
property halfPixelCenters
halfPixelCenters: boolean;
property size
size: [number, number];
interface ResizeNearestNeighborAttrs
interface ResizeNearestNeighborAttrs {}
property alignCorners
alignCorners: boolean;
property halfPixelCenters
halfPixelCenters: boolean;
property size
size: [number, number];
interface ReverseAttrs
interface ReverseAttrs {}
property dims
dims: number | number[];
interface RotateWithOffsetAttrs
interface RotateWithOffsetAttrs {}
interface SavedModelTensorInfo
interface SavedModelTensorInfo {}
Deprecated
Deprecated interface for SavedModel/GraphModel signature input/output Tensor info. User ModelTensorInfo instead.
interface ScatterNdAttrs
interface ScatterNdAttrs {}
property shape
shape: number[];
interface SearchSortedAttrs
interface SearchSortedAttrs {}
property side
side: 'left' | 'right';
interface ShapeMap
interface ShapeMap {}
number[]
interface SignatureDef
interface SignatureDef {}
Interface for SavedModel/GraphModel SignatureDef info.
index signature
[key: string]: SignatureDefEntry;
interface SignatureDefEntry
interface SignatureDefEntry {}
Interface for SavedModel/GraphModel SignatureDef entry.
interface SignatureDefInfo
interface SignatureDefInfo {}
Deprecated
Deprecated interface for SavedModel/GraphModel SignatureDef info. User SignatureDef instead.
index signature
[key: string]: { inputs: { [key: string]: SavedModelTensorInfo; }; outputs: { [key: string]: SavedModelTensorInfo; };};
interface SliceAttrs
interface SliceAttrs {}
interface SoftmaxAttrs
interface SoftmaxAttrs {}
property dim
dim: number;
interface SpaceToBatchNDAttrs
interface SpaceToBatchNDAttrs {}
property blockShape
blockShape: number[];
property paddings
paddings: number[][];
interface SparseToDenseAttrs
interface SparseToDenseAttrs {}
property outputShape
outputShape: number[];
interface SplitVAttrs
interface SplitVAttrs {}
property axis
axis: number;
property numOrSizeSplits
numOrSizeSplits: number[] | number;
interface StaticRegexReplaceAttrs
interface StaticRegexReplaceAttrs {}
property pattern
pattern: string;
property replaceGlobal
replaceGlobal: boolean;
property rewrite
rewrite: string;
interface StridedSliceAttrs
interface StridedSliceAttrs {}
property begin
begin: number[];
property beginMask
beginMask: number;
property ellipsisMask
ellipsisMask: number;
property end
end: number[];
property endMask
endMask: number;
property newAxisMask
newAxisMask: number;
property shrinkAxisMask
shrinkAxisMask: number;
property strides
strides: number[];
interface StringNGramsAttrs
interface StringNGramsAttrs {}
property leftPad
leftPad: string;
property nGramWidths
nGramWidths: number[];
property padWidth
padWidth: number;
property preserveShortSequences
preserveShortSequences: boolean;
property rightPad
rightPad: string;
property separator
separator: string;
interface StringSplitAttrs
interface StringSplitAttrs {}
property skipEmpty
skipEmpty: boolean;
interface StringToHashBucketFastAttrs
interface StringToHashBucketFastAttrs {}
property numBuckets
numBuckets: number;
interface SumAttrs
interface SumAttrs {}
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method abs
abs: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method acos
acos: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method acosh
acosh: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method add
add: <T extends Tensor<Rank>>(b: Tensor | TensorLike) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method all
all: <T extends Tensor<Rank>>( this: T, axis?: number | number[], keepDims?: boolean) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method any
any: <T extends Tensor<Rank>>( this: T, axis?: number | number[], keepDims?: boolean) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method argMax
argMax: <T extends Tensor<Rank>>(axis?: number) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method argMin
argMin: <T extends Tensor<Rank>>(axis?: number) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method asScalar
asScalar: <T extends Tensor<Rank>>() => Scalar;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method asType
asType: <T extends Tensor<Rank>>(this: T, dtype: DataType) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method as1D
as1D: <T extends Tensor<Rank>>() => Tensor1D;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method as2D
as2D: <T extends Tensor<Rank>>(rows: number, columns: number) => Tensor2D;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method as3D
as3D: <T extends Tensor<Rank>>( rows: number, columns: number, depth: number) => Tensor3D;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method as4D
as4D: <T extends Tensor<Rank>>( rows: number, columns: number, depth: number, depth2: number) => Tensor4D;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method as5D
as5D: <T extends Tensor<Rank>>( rows: number, columns: number, depth: number, depth2: number, depth3: number) => Tensor5D;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method asin
asin: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method asinh
asinh: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method atan
atan: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method atan2
atan2: <T extends Tensor<Rank>>(b: Tensor | TensorLike) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method atanh
atanh: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method avgPool
avgPool: <T extends Tensor3D | Tensor4D>( filterSize: [number, number] | number, strides: [number, number] | number, pad: 'valid' | 'same' | number | ExplicitPadding, dimRoundingMode?: 'floor' | 'round' | 'ceil') => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method batchToSpaceND
batchToSpaceND: <R extends Rank>( blockShape: number[], crops: number[][]) => Tensor<R>;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method batchNorm
batchNorm: <T extends Tensor<Rank>>( mean: Tensor<R> | Tensor1D | TensorLike, variance: Tensor<R> | Tensor1D | TensorLike, offset?: Tensor<R> | Tensor1D | TensorLike, scale?: Tensor<R> | Tensor1D | TensorLike, varianceEpsilon?: number) => Tensor<R>;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method broadcastTo
broadcastTo: <R extends Rank>(shape: ShapeMap[R]) => Tensor<R>;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method cast
cast: <T extends Tensor<Rank>>(dtype: DataType) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method ceil
ceil: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method clipByValue
clipByValue: <T extends Tensor<Rank>>(min: number, max: number) => Tensor;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method concat
concat: <T extends Tensor<Rank>>( tensors: T | Array<T | TensorLike>, axis?: number) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method conv1d
conv1d: <T extends Tensor2D | Tensor3D>( filter: Tensor3D | TensorLike3D, stride: number, pad: 'valid' | 'same' | number | ExplicitPadding, dataFormat?: 'NWC' | 'NCW', dilation?: number, dimRoundingMode?: 'floor' | 'round' | 'ceil') => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method conv2dTranspose
conv2dTranspose: <T extends Tensor3D | Tensor4D>( filter: Tensor4D | TensorLike4D, outputShape: [number, number, number, number] | [number, number, number], strides: [number, number] | number, pad: 'valid' | 'same' | number, dimRoundingMode?: 'floor' | 'round' | 'ceil') => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method conv2d
conv2d: <T extends Tensor3D | Tensor4D>( filter: Tensor4D | TensorLike4D, strides: [number, number] | number, pad: 'valid' | 'same' | number, dataFormat?: 'NHWC' | 'NCHW', dilations?: [number, number] | number, dimRoundingMode?: 'floor' | 'round' | 'ceil') => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method cos
cos: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method cosh
cosh: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method cumprod
cumprod: <R extends Rank>( axis?: number, exclusive?: boolean, reverse?: boolean) => Tensor<R>;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method cumsum
cumsum: <R extends Rank>( axis?: number, exclusive?: boolean, reverse?: boolean) => Tensor<R>;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method depthToSpace
depthToSpace: <T extends Tensor4D>( blockSize: number, dataFormat: 'NHWC' | 'NCHW') => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method depthwiseConv2d
depthwiseConv2d: <T extends Tensor3D | Tensor4D>( filter: Tensor4D | TensorLike4D, strides: [number, number] | number, pad: 'valid' | 'same' | number, dataFormat?: 'NHWC' | 'NCHW', dilations?: [number, number] | number, dimRoundingMode?: 'floor' | 'round' | 'ceil') => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method dilation2d
dilation2d: <T extends Tensor3D | Tensor4D>( filter: Tensor3D | TensorLike3D, strides: [number, number] | number, pad: 'valid' | 'same', dilations?: [number, number] | number, dataFormat?: 'NHWC') => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method divNoNan
divNoNan: <T extends Tensor<Rank>>(b: Tensor | TensorLike) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method div
div: <T extends Tensor<Rank>>(b: Tensor | TensorLike) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method dot
dot: <T extends Tensor<Rank>>(b: Tensor | TensorLike) => Tensor;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method elu
elu: <T extends Tensor<Rank>>() => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method equal
equal: <T extends Tensor<Rank>>(b: Tensor | TensorLike) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method erf
erf: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method euclideanNorm
euclideanNorm: <T extends Tensor<Rank>>( this: T, axis?: number | number[], keepDims?: boolean) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method exp
exp: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method expandDims
expandDims: <T extends Tensor<Rank>>(axis?: number) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method expm1
expm1: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method fft
fft: <T extends Tensor<Rank>>(this: Tensor) => Tensor;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method flatten
flatten: <T extends Tensor<Rank>>() => Tensor1D;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method floor
floor: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method floorDiv
floorDiv: <T extends Tensor<Rank>>(b: Tensor | TensorLike) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method gather
gather: <T extends Tensor<Rank>>( this: T, indices: Tensor | TensorLike, axis?: number, batchDims?: number) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method greaterEqual
greaterEqual: <T extends Tensor<Rank>>(b: Tensor | TensorLike) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method greater
greater: <T extends Tensor<Rank>>(b: Tensor | TensorLike) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method ifft
ifft: <T extends Tensor<Rank>>(this: Tensor) => Tensor;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method irfft
irfft: <T extends Tensor<Rank>>(this: Tensor) => Tensor;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method isFinite
isFinite: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method isInf
isInf: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method isNaN
isNaN: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method leakyRelu
leakyRelu: <T extends Tensor<Rank>>(alpha: number) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method lessEqual
lessEqual: <T extends Tensor<Rank>>(b: Tensor | TensorLike) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method less
less: <T extends Tensor<Rank>>(b: Tensor | TensorLike) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method localResponseNormalization
localResponseNormalization: <T extends Tensor<Rank>>( depthRadius?: number, bias?: number, alpha?: number, beta?: number) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method logSigmoid
logSigmoid: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method logSoftmax
logSoftmax: <T extends Tensor<Rank>>(this: T, axis?: number) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method logSumExp
logSumExp: <T extends Tensor<Rank>>( this: T, axis?: number | number[], keepDims?: boolean) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method log
log: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method log1p
log1p: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method logicalAnd
logicalAnd: <T extends Tensor<Rank>>(b: Tensor | TensorLike) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method logicalNot
logicalNot: <T extends Tensor<Rank>>() => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method logicalOr
logicalOr: <T extends Tensor<Rank>>(b: Tensor | TensorLike) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method logicalXor
logicalXor: <T extends Tensor<Rank>>(b: Tensor | TensorLike) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method matMul
matMul: <T extends Tensor<Rank>>( b: Tensor | TensorLike, transposeA?: boolean, transposeB?: boolean) => Tensor;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method maxPool
maxPool: <T extends Tensor3D | Tensor4D>( filterSize: [number, number] | number, strides: [number, number] | number, pad: 'valid' | 'same' | number | ExplicitPadding, dimRoundingMode?: 'floor' | 'round' | 'ceil') => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method max
max: <T extends Tensor<Rank>>(axis?: number | number[], keepDims?: boolean) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method maximum
maximum: <T extends Tensor<Rank>>(b: Tensor | TensorLike) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method mean
mean: <T extends Tensor<Rank>>( axis?: number | number[], keepDims?: boolean) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method min
min: <T extends Tensor<Rank>>(axis?: number | number[], keepDims?: boolean) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method minimum
minimum: <T extends Tensor<Rank>>(b: Tensor | TensorLike) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method mirrorPad
mirrorPad: <T extends Tensor<Rank>>( paddings: Array<[number, number]>, mode: 'reflect' | 'symmetric') => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method mod
mod: <T extends Tensor<Rank>>(b: Tensor | TensorLike) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method mul
mul: <T extends Tensor<Rank>>(b: Tensor | TensorLike) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method neg
neg: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method norm
norm: <T extends Tensor<Rank>>( ord?: number | 'euclidean' | 'fro', axis?: number | number[], keepDims?: boolean) => Tensor;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method notEqual
notEqual: <T extends Tensor<Rank>>(b: Tensor | TensorLike) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method oneHot
oneHot: (depth: number, onValue: number, offValue: number) => Tensor;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method onesLike
onesLike: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method pad
pad: <T extends Tensor<Rank>>( paddings: Array<[number, number]>, constantValue?: number) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method pool
pool: <T extends Tensor3D | Tensor4D>( windowShape: [number, number] | number, poolingType: 'avg' | 'max', padding: 'valid' | 'same' | number | ExplicitPadding, diationRate?: [number, number] | number, strides?: [number, number] | number, dimRoundingMode?: 'floor' | 'round' | 'ceil') => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method pow
pow: <T extends Tensor<Rank>>(exp: Tensor | TensorLike) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method prelu
prelu: <T extends Tensor<Rank>>(alpha: T | TensorLike) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method prod
prod: <T extends Tensor<Rank>>( this: T, axis?: number | number[], keepDims?: boolean) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method reciprocal
reciprocal: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method relu
relu: <T extends Tensor<Rank>>() => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method relu6
relu6: <T extends Tensor<Rank>>() => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method reshapeAs
reshapeAs: <T extends Tensor<Rank>>(x: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method reshape
reshape: <T extends Tensor<Rank>>(shape: number[]) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method resizeBilinear
resizeBilinear: <T extends Tensor3D | Tensor4D>( newShape2D: [number, number], alignCorners?: boolean, halfPixelCenters?: boolean) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method resizeNearestNeighbor
resizeNearestNeighbor: <T extends Tensor3D | Tensor4D>( newShape2D: [number, number], alignCorners?: boolean, halfFloatCenters?: boolean) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method reverse
reverse: <T extends Tensor<Rank>>(this: T, axis?: number | number[]) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method rfft
rfft: <T extends Tensor<Rank>>(this: Tensor) => Tensor;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method round
round: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method rsqrt
rsqrt: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method selu
selu: <T extends Tensor<Rank>>() => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method separableConv2d
separableConv2d: <T extends Tensor3D | Tensor4D>( depthwiseFilter: Tensor4D | TensorLike4D, pointwiseFilter: Tensor4D | TensorLike, strides: [number, number] | number, pad: 'valid' | 'same', dilation?: [number, number] | number, dataFormat?: 'NHWC' | 'NCHW') => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method sigmoid
sigmoid: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method sign
sign: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method sin
sin: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method sinh
sinh: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method slice
slice: <T extends Tensor<Rank>>( this: T, begin: number | number[], size?: number | number[]) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method softmax
softmax: <T extends Tensor<Rank>>(this: T, dim?: number) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method softplus
softplus: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method spaceToBatchND
spaceToBatchND: <R extends Rank>( blockShape: number[], paddings: number[][]) => Tensor<R>;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method split
split: <T extends Tensor<Rank>>( numOrSizeSplits: number[] | number, axis?: number) => T[];
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method sqrt
sqrt: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method square
square: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method squaredDifference
squaredDifference: <T extends Tensor<Rank>>(b: Tensor | TensorLike) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method squeeze
squeeze: <T extends Tensor<Rank>>(axis?: number[]) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method stack
stack: <T extends Tensor<Rank>>(x: Tensor | Tensor[], axis?: number) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method step
step: <T extends Tensor<Rank>>(this: T, alpha?: number) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method stridedSlice
stridedSlice: <T extends Tensor<Rank>>( this: Tensor, begin: number[], end: number[], strides: number[], beginMask?: number, endMask?: number, ellipsisMask?: number, newAxisMask?: number, shrinkAxisMask?: number) => Tensor;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method sub
sub: <T extends Tensor<Rank>>(b: Tensor | TensorLike) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method sum
sum: <T extends Tensor<Rank>>(axis?: number | number[], keepDims?: boolean) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method tan
tan: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method tanh
tanh: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method tile
tile: <T extends Tensor<Rank>>(b: number[]) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method toBool
toBool: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method toFloat
toFloat: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method toInt
toInt: <T extends Tensor<Rank>>(this: T) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method topk
topk: <T extends Tensor<Rank>>( this: T, k?: number, sorted?: boolean) => { values: T; indices: T };
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method transpose
transpose: <T extends Tensor<Rank>>(perm?: number[]) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method unique
unique: <T extends Tensor<Rank>>( this: T, axis?: number) => { values: T; indices: T };
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method unsortedSegmentSum
unsortedSegmentSum: <T extends Tensor<Rank>>( this: T, segmentIds: Tensor1D | TensorLike1D, numSegments: number) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method unstack
unstack: <T extends Tensor<Rank>>(axis?: number) => T[];
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method where
where: <T extends Tensor<Rank>>( condition: Tensor | TensorLike, x: Tensor | TensorLike) => T;
interface Tensor
interface Tensor<R extends Rank = Rank> {}
method zerosLike
zerosLike: <T extends Tensor<Rank>>(this: T) => T;
interface TensorContainerArray
interface TensorContainerArray extends Array<TensorContainer> {}
interface TensorContainerObject
interface TensorContainerObject {}
index signature
[x: string]: TensorContainer;
interface TensorInfo
interface TensorInfo {}
Holds metadata for a given tensor.
interface TensorScatterUpdateAttrs
interface TensorScatterUpdateAttrs {}
interface TimingInfo
interface TimingInfo extends BackendTimingInfo {}
property wallMs
wallMs: number;
interface TransformAttrs
interface TransformAttrs {}
property fillMode
fillMode: 'constant' | 'reflect' | 'wrap' | 'nearest';
property fillValue
fillValue: number;
property interpolation
interpolation: 'nearest' | 'bilinear';
property outputShape
outputShape?: [number, number];
interface TransposeAttrs
interface TransposeAttrs {}
property perm
perm: number[];
interface UniqueAttrs
interface UniqueAttrs {}
property axis
axis: number;
interface UnpackAttrs
interface UnpackAttrs {}
property axis
axis: number;
interface UnsortedSegmentSumAttrs
interface UnsortedSegmentSumAttrs {}
property numSegments
numSegments: number;
interface WebGLData
interface WebGLData {}
Type for representing a texture data to create a tensor.
interface WebGPUData
interface WebGPUData {}
Type for representing a buffer data to create a tensor. Buffer usage should at least support GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC. When zeroCopy is false or undefined (default), this GPUBuffer will be copied to the tensor's resource buffer. When zeroCopy is true, tensor will use this GPUBuffer as tensor's resource buffer, user should not destroy this GPUBuffer until all access is done. If not specified at creating a tensor, tensor type is float32.
Enums
enum Rank
enum Rank { R0 = 'R0', R1 = 'R1', R2 = 'R2', R3 = 'R3', R4 = 'R4', R5 = 'R5', R6 = 'R6',}
enum Reduction
enum Reduction { NONE = 0, MEAN = 1, SUM = 2, SUM_BY_NONZERO_WEIGHTS = 3,}
Copyright 2020 Google LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================
member MEAN
MEAN = 1
member NONE
NONE = 0
member SUM
SUM = 2
member SUM_BY_NONZERO_WEIGHTS
SUM_BY_NONZERO_WEIGHTS = 3
Type Aliases
type AbsInputs
type AbsInputs = UnaryInputs;
type AcoshInputs
type AcoshInputs = UnaryInputs;
type AcosInputs
type AcosInputs = UnaryInputs;
type AddInputs
type AddInputs = BinaryInputs;
type AddNInputs
type AddNInputs = TensorInfo[];
type AllInputs
type AllInputs = Pick<NamedTensorInfoMap, 'x'>;
type AnyInputs
type AnyInputs = Pick<NamedTensorInfoMap, 'x'>;
type ArgMaxInputs
type ArgMaxInputs = Pick<NamedTensorInfoMap, 'x'>;
type ArgMinInputs
type ArgMinInputs = Pick<NamedTensorInfoMap, 'x'>;
type AsinhInputs
type AsinhInputs = UnaryInputs;
type AsinInputs
type AsinInputs = UnaryInputs;
type Atan2Inputs
type Atan2Inputs = BinaryInputs;
type AtanhInputs
type AtanhInputs = UnaryInputs;
type AtanInputs
type AtanInputs = UnaryInputs;
type Attribute
type Attribute = AttributeValue | RecursiveArray<AttributeValue>;
These are extra non-tensor/primitive params passed to kernel functions.
type AvgPool3DGradInputs
type AvgPool3DGradInputs = Pick<NamedTensorInfoMap, 'dy' | 'input'>;
type AvgPool3DInputs
type AvgPool3DInputs = Pick<NamedTensorInfoMap, 'x'>;
type AvgPoolGradInputs
type AvgPoolGradInputs = Pick<NamedTensorInfoMap, 'dy' | 'input'>;
type AvgPoolInputs
type AvgPoolInputs = Pick<NamedTensorInfoMap, 'x'>;
type BackendValues
type BackendValues = Float32Array | Int32Array | Uint8Array | Uint8Array[];
The underlying tensor data that gets stored in a backend.
type BatchMatMulInputs
type BatchMatMulInputs = Pick<NamedTensorInfoMap, 'a' | 'b'>;
type BatchToSpaceNDInputs
type BatchToSpaceNDInputs = Pick<NamedTensorInfoMap, 'x'>;
type BinaryInputs
type BinaryInputs = Pick<NamedTensorInfoMap, 'a' | 'b'>;
type BincountInputs
type BincountInputs = Pick<NamedTensorInfoMap, 'x' | 'weights'>;
type BitwiseAndInputs
type BitwiseAndInputs = BinaryInputs;
type BroadcastArgsInputs
type BroadcastArgsInputs = Pick<NamedTensorInfoMap, 's0' | 's1'>;
type BroadcastToInputs
type BroadcastToInputs = Pick<NamedTensorInfoMap, 'x'>;
type CastInputs
type CastInputs = UnaryInputs;
type CeilInputs
type CeilInputs = UnaryInputs;
type ClipByValueInputs
type ClipByValueInputs = UnaryInputs;
type ComplexAbsInputs
type ComplexAbsInputs = UnaryInputs;
type ComplexInputs
type ComplexInputs = Pick<NamedTensorInfoMap, 'real' | 'imag'>;
type ConcatInputs
type ConcatInputs = TensorInfo[];
type Conv2DBackpropFilterInputs
type Conv2DBackpropFilterInputs = Pick<NamedTensorInfoMap, 'x' | 'dy'>;
type Conv2DBackpropInputInputs
type Conv2DBackpropInputInputs = Pick<NamedTensorInfoMap, 'dy' | 'filter'>;
type Conv2DInputs
type Conv2DInputs = Pick<NamedTensorInfoMap, 'x' | 'filter'>;
type Conv3DBackpropFilterV2Inputs
type Conv3DBackpropFilterV2Inputs = Pick<NamedTensorInfoMap, 'x' | 'dy'>;
type Conv3DBackpropInputV2Inputs
type Conv3DBackpropInputV2Inputs = Pick<NamedTensorInfoMap, 'dy' | 'filter'>;
type Conv3DInputs
type Conv3DInputs = Pick<NamedTensorInfoMap, 'x' | 'filter'>;
type CoshInputs
type CoshInputs = UnaryInputs;
type CosInputs
type CosInputs = UnaryInputs;
type CropAndResizeInputs
type CropAndResizeInputs = Pick<NamedTensorInfoMap, 'image' | 'boxes' | 'boxInd'>;
type CumprodInputs
type CumprodInputs = Pick<NamedTensorInfoMap, 'x'>;
type CumsumInputs
type CumsumInputs = Pick<NamedTensorInfoMap, 'x'>;
type DataId
type DataId = object;
We wrap data id since we use weak map to avoid memory leaks. Since we have our own memory management, we have a reference counter mapping a tensor to its data, so there is always a pointer (even if that data is otherwise garbage collectable). See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/ Global_Objects/WeakMap
type DataToGPUOptions
type DataToGPUOptions = DataToGPUWebGLOption;
type DataType
type DataType = keyof DataTypeMap;
'float32'|'int32'|'bool'|'complex64'|'string'
type DataTypeFor
type DataTypeFor<T extends number | string | boolean> = T extends number | boolean ? NumericDataType : T extends string ? 'string' : never;
type DataValues
type DataValues = DataTypeMap[DataType];
Tensor data used in tensor creation and user-facing API.
type DenseBincountInputs
type DenseBincountInputs = Pick<NamedTensorInfoMap, 'x' | 'weights'>;
type DepthToSpaceInputs
type DepthToSpaceInputs = Pick<NamedTensorInfoMap, 'x'>;
type DepthwiseConv2dNativeBackpropFilterInputs
type DepthwiseConv2dNativeBackpropFilterInputs = Pick< NamedTensorInfoMap, 'x' | 'dy'>;
type DepthwiseConv2dNativeBackpropInputInputs
type DepthwiseConv2dNativeBackpropInputInputs = Pick< NamedTensorInfoMap, 'dy' | 'filter'>;
type DepthwiseConv2dNativeInputs
type DepthwiseConv2dNativeInputs = Pick<NamedTensorInfoMap, 'x' | 'filter'>;
type DiagInputs
type DiagInputs = Pick<NamedTensorInfoMap, 'x'>;
type Dilation2DBackpropFilterInputs
type Dilation2DBackpropFilterInputs = Pick< NamedTensorInfoMap, 'x' | 'filter' | 'dy'>;
type Dilation2DBackpropInputInputs
type Dilation2DBackpropInputInputs = Pick<NamedTensorInfoMap, 'x' | 'filter' | 'dy'>;
type Dilation2DInputs
type Dilation2DInputs = Pick<NamedTensorInfoMap, 'x' | 'filter'>;
type DrawInputs
type DrawInputs = Pick<NamedTensorInfoMap, 'image'>;
type EinsumInputs
type EinsumInputs = TensorInfo[];
type EluGradInputs
type EluGradInputs = Pick<NamedTensorInfoMap, 'dy' | 'y'>;
type EluInputs
type EluInputs = Pick<NamedTensorInfoMap, 'x'>;
type EqualInputs
type EqualInputs = BinaryInputs;
type ErfInputs
type ErfInputs = UnaryInputs;
type ExpandDimsInputs
type ExpandDimsInputs = Pick<NamedTensorInfoMap, 'input'>;
type ExpInputs
type ExpInputs = UnaryInputs;
type Expm1Inputs
type Expm1Inputs = UnaryInputs;
type FFTInputs
type FFTInputs = Pick<NamedTensorInfoMap, 'input'>;
type FlipLeftRightInputs
type FlipLeftRightInputs = Pick<NamedTensorInfoMap, 'image'>;
type FloorDivInputs
type FloorDivInputs = BinaryInputs;
type FloorInputs
type FloorInputs = UnaryInputs;
type ForwardFunc
type ForwardFunc<T> = (backend: KernelBackend, save?: GradSaveFunc) => T;
A function that computes an output. The save function is for saving tensors computed in the forward pass, that we need in the backward pass.
type FusedBatchNormInputs
type FusedBatchNormInputs = Pick< NamedTensorInfoMap, 'x' | 'scale' | 'offset' | 'mean' | 'variance'>;
type GatherNdInputs
type GatherNdInputs = Pick<NamedTensorInfoMap, 'params' | 'indices'>;
type GatherV2Inputs
type GatherV2Inputs = Pick<NamedTensorInfoMap, 'x' | 'indices'>;
type GradFunc
type GradFunc = ( dy: Tensor | Tensor[], saved: Tensor[], attrs: NamedAttrMap) => NamedGradientMap;
The function to run when computing a gradient during backprop.
type GradSaveFunc
type GradSaveFunc = (save: Tensor[]) => void;
type GreaterEqualInputs
type GreaterEqualInputs = BinaryInputs;
type GreaterInputs
type GreaterInputs = BinaryInputs;
type IdentityInputs
type IdentityInputs = Pick<NamedTensorInfoMap, 'x'>;
type IFFTInputs
type IFFTInputs = Pick<NamedTensorInfoMap, 'input'>;
type ImagInputs
type ImagInputs = Pick<NamedTensorInfoMap, 'input'>;
type IsFiniteInputs
type IsFiniteInputs = UnaryInputs;
type IsInfInputs
type IsInfInputs = UnaryInputs;
type IsNanInputs
type IsNanInputs = UnaryInputs;
type KernelDisposeFunc
type KernelDisposeFunc = KernelSetupFunc;
Function that gets called right before the backend is disposed.
type KernelFunc
type KernelFunc = (params: { inputs: NamedTensorInfoMap; backend: {}; attrs?: NamedAttrMap;}) => TensorInfo | TensorInfo[];
Specifies the code to run when executing a kernel.
type KernelSetupFunc
type KernelSetupFunc = (backend: {}) => void;
Function that gets called after the backend initializes.
type LeakyReluInputs
type LeakyReluInputs = Pick<NamedTensorInfoMap, 'x'>;
type LessEqualInputs
type LessEqualInputs = BinaryInputs;
type LessInputs
type LessInputs = BinaryInputs;
type Log1pInputs
type Log1pInputs = UnaryInputs;
type LogicalAndInputs
type LogicalAndInputs = BinaryInputs;
type LogicalNotInputs
type LogicalNotInputs = Pick<NamedTensorInfoMap, 'x'>;
type LogicalOrInputs
type LogicalOrInputs = BinaryInputs;
type LogicalXorInputs
type LogicalXorInputs = BinaryInputs;
type LogInputs
type LogInputs = UnaryInputs;
type LogSoftmaxInputs
type LogSoftmaxInputs = Pick<NamedTensorInfoMap, 'logits'>;
type LowerBoundInputs
type LowerBoundInputs = Pick<NamedTensorInfoMap, 'sortedSequence' | 'values'>;
type LRNGradInputs
type LRNGradInputs = Pick<NamedTensorInfoMap, 'x' | 'y' | 'dy'>;
type LRNInputs
type LRNInputs = Pick<NamedTensorInfoMap, 'x'>;
type LSTMCellFunc
type LSTMCellFunc = { (data: Tensor2D, c: Tensor2D, h: Tensor2D): [Tensor2D, Tensor2D];};
(data: Tensor2D, c: Tensor2D, h: Tensor2D): [Tensor2D, Tensor2D]
type MatrixBandPartInputs
type MatrixBandPartInputs = Pick< NamedTensorInfoMap, 'input' | 'numLower' | 'numUpper'>;
type MaximumInputs
type MaximumInputs = BinaryInputs;
type MaxInputs
type MaxInputs = Pick<NamedTensorInfoMap, 'x'>;
type MaxPool3DGradInputs
type MaxPool3DGradInputs = Pick<NamedTensorInfoMap, 'dy' | 'input' | 'output'>;
type MaxPool3DInputs
type MaxPool3DInputs = Pick<NamedTensorInfoMap, 'x'>;
type MaxPoolGradInputs
type MaxPoolGradInputs = Pick<NamedTensorInfoMap, 'dy' | 'input' | 'output'>;
type MaxPoolInputs
type MaxPoolInputs = Pick<NamedTensorInfoMap, 'x'>;
type MaxPoolWithArgmaxInputs
type MaxPoolWithArgmaxInputs = Pick<NamedTensorInfoMap, 'x'>;
type MeanInputs
type MeanInputs = Pick<NamedTensorInfoMap, 'x'>;
type MemoryInfo
type MemoryInfo = { numTensors: number; numDataBuffers: number; numBytes: number; unreliable?: boolean; reasons: string[];};
type MinimumInputs
type MinimumInputs = BinaryInputs;
type MinInputs
type MinInputs = Pick<NamedTensorInfoMap, 'x'>;
type MirrorPadInputs
type MirrorPadInputs = Pick<NamedTensorInfoMap, 'x'>;
type ModInputs
type ModInputs = BinaryInputs;
type MultinomialInputs
type MultinomialInputs = Pick<NamedTensorInfoMap, 'logits'>;
type MultiplyInputs
type MultiplyInputs = BinaryInputs;
type NamedTensorMap
type NamedTensorMap = { [name: string]: Tensor;};
{[name: string]: Tensor}
type NegInputs
type NegInputs = UnaryInputs;
type NonMaxSuppressionV3Inputs
type NonMaxSuppressionV3Inputs = Pick<NamedTensorInfoMap, 'boxes' | 'scores'>;
type NonMaxSuppressionV4Inputs
type NonMaxSuppressionV4Inputs = Pick<NamedTensorInfoMap, 'boxes' | 'scores'>;
type NonMaxSuppressionV5Inputs
type NonMaxSuppressionV5Inputs = Pick<NamedTensorInfoMap, 'boxes' | 'scores'>;
type NotEqualInputs
type NotEqualInputs = BinaryInputs;
type NumericDataType
type NumericDataType = 'float32' | 'int32' | 'bool' | 'complex64';
type OneHotInputs
type OneHotInputs = Pick<NamedTensorInfoMap, 'indices'>;
type OnesLikeInputs
type OnesLikeInputs = UnaryInputs;
type PackInputs
type PackInputs = TensorInfo[];
type PadV2Inputs
type PadV2Inputs = Pick<NamedTensorInfoMap, 'x'>;
type PoolInputs
type PoolInputs = Pick<NamedTensorInfoMap, 'input'>;
type PowInputs
type PowInputs = BinaryInputs;
type PreluInputs
type PreluInputs = Pick<NamedTensorInfoMap, 'x' | 'alpha'>;
type ProdInputs
type ProdInputs = Pick<NamedTensorInfoMap, 'x'>;
type RaggedGatherInputs
type RaggedGatherInputs = { paramsNestedSplits: TensorInfo[];} & Pick<NamedTensorInfoMap, 'paramsDenseValues' | 'indices'>;
type RaggedRangeInputs
type RaggedRangeInputs = Pick<NamedTensorInfoMap, 'starts' | 'limits' | 'deltas'>;
type RaggedTensorToTensorInputs
type RaggedTensorToTensorInputs = Pick< NamedTensorInfoMap, 'shape' | 'values' | 'defaultValue'> & { rowPartitionTensors: TensorInfo[];};
type RealDivInputs
type RealDivInputs = BinaryInputs;
type RealInputs
type RealInputs = Pick<NamedTensorInfoMap, 'input'>;
type ReciprocalInputs
type ReciprocalInputs = UnaryInputs;
type Relu6Inputs
type Relu6Inputs = Pick<NamedTensorInfoMap, 'x'>;
type ReluInputs
type ReluInputs = Pick<NamedTensorInfoMap, 'x'>;
type ReshapeInputs
type ReshapeInputs = Pick<NamedTensorInfoMap, 'x'>;
type ResizeBilinearGradAttrs
type ResizeBilinearGradAttrs = ResizeBilinearAttrs;
type ResizeBilinearGradInputs
type ResizeBilinearGradInputs = Pick<NamedTensorInfoMap, 'images' | 'dy'>;
type ResizeBilinearInputs
type ResizeBilinearInputs = Pick<NamedTensorInfoMap, 'images'>;
type ResizeNearestNeighborGradAttrs
type ResizeNearestNeighborGradAttrs = ResizeNearestNeighborAttrs;
type ResizeNearestNeighborGradInputs
type ResizeNearestNeighborGradInputs = Pick<NamedTensorInfoMap, 'images' | 'dy'>;
type ResizeNearestNeighborInputs
type ResizeNearestNeighborInputs = Pick<NamedTensorInfoMap, 'images'>;
type ReverseInputs
type ReverseInputs = Pick<NamedTensorInfoMap, 'x'>;
type RotateWithOffsetInputs
type RotateWithOffsetInputs = Pick<NamedTensorInfoMap, 'image'>;
type RoundInputs
type RoundInputs = UnaryInputs;
type RsqrtInputs
type RsqrtInputs = UnaryInputs;
type Scalar
type Scalar = Tensor<Rank.R0>;
Tensor
type ScalarLike
type ScalarLike = number | boolean | string | Uint8Array;
type ScatterNdInputs
type ScatterNdInputs = Pick<NamedTensorInfoMap, 'indices' | 'updates'>;
type SearchSortedInputs
type SearchSortedInputs = Pick<NamedTensorInfoMap, 'sortedSequence' | 'values'>;
type SelectInputs
type SelectInputs = Pick<NamedTensorInfoMap, 'condition' | 't' | 'e'>;
type SeluInputs
type SeluInputs = Pick<NamedTensorInfoMap, 'x'>;
type SigmoidInputs
type SigmoidInputs = UnaryInputs;
type SignInputs
type SignInputs = UnaryInputs;
type SinhInputs
type SinhInputs = UnaryInputs;
type SinInputs
type SinInputs = UnaryInputs;
type SliceInputs
type SliceInputs = Pick<NamedTensorInfoMap, 'x'>;
type SoftmaxInputs
type SoftmaxInputs = Pick<NamedTensorInfoMap, 'logits'>;
type SoftplusInputs
type SoftplusInputs = UnaryInputs;
type SpaceToBatchNDInputs
type SpaceToBatchNDInputs = Pick<NamedTensorInfoMap, 'x'>;
type SparseFillEmptyRowsInputs
type SparseFillEmptyRowsInputs = Pick< NamedTensorInfoMap, 'indices' | 'values' | 'denseShape' | 'defaultValue'>;
type SparseReshapeInputs
type SparseReshapeInputs = Pick< NamedTensorInfoMap, 'inputIndices' | 'inputShape' | 'newShape'>;
type SparseSegmentMeanInputs
type SparseSegmentMeanInputs = Pick< NamedTensorInfoMap, 'data' | 'indices' | 'segmentIds'>;
type SparseSegmentSumInputs
type SparseSegmentSumInputs = Pick< NamedTensorInfoMap, 'data' | 'indices' | 'segmentIds'>;
type SparseToDenseInputs
type SparseToDenseInputs = Pick< NamedTensorInfoMap, 'sparseIndices' | 'sparseValues' | 'defaultValue'>;
type SplitVInputs
type SplitVInputs = Pick<NamedTensorInfoMap, 'x'>;
type SqrtInputs
type SqrtInputs = UnaryInputs;
type SquaredDifferenceInputs
type SquaredDifferenceInputs = BinaryInputs;
type SquareInputs
type SquareInputs = Pick<NamedTensorInfoMap, 'x'>;
type StaticRegexReplaceInputs
type StaticRegexReplaceInputs = UnaryInputs;
type StepInputs
type StepInputs = UnaryInputs;
type StridedSliceInputs
type StridedSliceInputs = Pick<NamedTensorInfoMap, 'x'>;
type StringNGramsInputs
type StringNGramsInputs = Pick<NamedTensorInfoMap, 'data' | 'dataSplits'>;
type StringSplitInputs
type StringSplitInputs = Pick<NamedTensorInfoMap, 'input' | 'delimiter'>;
type StringToHashBucketFastInputs
type StringToHashBucketFastInputs = Pick<NamedTensorInfoMap, 'input'>;
type SubInputs
type SubInputs = BinaryInputs;
type SumInputs
type SumInputs = Pick<NamedTensorInfoMap, 'x'>;
type TanhInputs
type TanhInputs = UnaryInputs;
type TanInputs
type TanInputs = UnaryInputs;
type Tensor1D
type Tensor1D = Tensor<Rank.R1>;
Tensor
type Tensor2D
type Tensor2D = Tensor<Rank.R2>;
Tensor
type Tensor3D
type Tensor3D = Tensor<Rank.R3>;
Tensor
type Tensor4D
type Tensor4D = Tensor<Rank.R4>;
Tensor
type Tensor5D
type Tensor5D = Tensor<Rank.R5>;
Tensor
type TensorContainer
type TensorContainer = | void | Tensor | string | number | boolean | TensorContainerObject | TensorContainerArray | Float32Array | Int32Array | Uint8Array;
void|number|string|TypedArray|Tensor|Tensor[]|{[key: string]:Tensor|number|string}
type TensorLike
type TensorLike = | TypedArray | number | boolean | string | RecursiveArray<number | number[] | TypedArray> | RecursiveArray<boolean> | RecursiveArray<string> | Uint8Array[];
TypedArray|Array
type TensorScatterUpdateInputs
type TensorScatterUpdateInputs = Pick< NamedTensorInfoMap, 'tensor' | 'indices' | 'updates'>;
type TileInputs
type TileInputs = Pick<NamedTensorInfoMap, 'x'>;
type TopKInputs
type TopKInputs = Pick<NamedTensorInfoMap, 'x'>;
type TransformInputs
type TransformInputs = Pick<NamedTensorInfoMap, 'image' | 'transforms'>;
type TransposeInputs
type TransposeInputs = Pick<NamedTensorInfoMap, 'x'>;
type TypedArray
type TypedArray = Float32Array | Int32Array | Uint8Array;
type UnaryInputs
type UnaryInputs = Pick<NamedTensorInfoMap, 'x'>;
type UniqueInputs
type UniqueInputs = Pick<NamedTensorInfoMap, 'x'>;
type UnpackInputs
type UnpackInputs = Pick<NamedTensorInfoMap, 'value'>;
type UnsortedSegmentSumInputs
type UnsortedSegmentSumInputs = Pick<NamedTensorInfoMap, 'x' | 'segmentIds'>;
type UpperBoundInputs
type UpperBoundInputs = Pick<NamedTensorInfoMap, 'sortedSequence' | 'values'>;
type ZerosLikeInputs
type ZerosLikeInputs = UnaryInputs;
Namespaces
namespace backend_util
module 'dist/backends/backend_util.d.ts' {}
Copyright 2018 Google LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================
variable ERF_A1
const ERF_A1: number;
variable ERF_A2
const ERF_A2: number;
variable ERF_A3
const ERF_A3: number;
variable ERF_A4
const ERF_A4: number;
variable ERF_A5
const ERF_A5: number;
variable ERF_P
const ERF_P: number;
Copyright 2018 Google LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================
variable PARALLELIZE_THRESHOLD
const PARALLELIZE_THRESHOLD: number;
Copyright 2017 Google LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================
variable SELU_SCALE
const SELU_SCALE: number;
variable SELU_SCALEALPHA
const SELU_SCALEALPHA: number;
Copyright 2018 Google LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================
function applyActivation
applyActivation: ( x: Tensor, activation: Activation, preluActivationWeights?: Tensor, leakyreluAlpha?: number) => Tensor;
function assertAndGetBroadcastShape
assertAndGetBroadcastShape: (shapeA: number[], shapeB: number[]) => number[];
function assertAxesAreInnerMostDims
assertAxesAreInnerMostDims: (msg: string, axes: number[], rank: number) => void;
function assertParamsConsistent
assertParamsConsistent: (shapes: number[][], axis: number) => void;
Copyright 2017 Google LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================
function assignToTypedArray
assignToTypedArray: ( data: TypedArray, real: number, imag: number, index: number) => void;
Insert a given complex value into the TypedArray.
Parameter data
The array in which the complex value is inserted.
Parameter c
The complex value to be inserted.
Parameter index
An index of the target complex value.
function axesAreInnerMostDims
axesAreInnerMostDims: (axes: number[], rank: number) => boolean;
Returns true if the axis specifies the inner most dimensions of the array.
function calculateShapes
calculateShapes: ( updates: TensorInfo, indices: TensorInfo, shape: number[]) => ScatterShapeInfo;
Calculate the shape information for the output.
Parameter update
The tensor contains the update values.
Parameter indices
The tensor contains the indices for the update values.
Parameter shape
The shape of the output tensor.
Returns
ScatterShapeInfo
function checkEinsumDimSizes
checkEinsumDimSizes: ( nDims: number, idDims: number[][], tensors: Tensor[]) => void;
Checks that the dimension sizes from different input tensors match the equation.
function checkPadOnDimRoundingMode
checkPadOnDimRoundingMode: ( opDesc: string, pad: 'valid' | 'same' | number | ExplicitPadding, dimRoundingMode?: 'floor' | 'round' | 'ceil') => void;
Check validity of pad when using dimRoundingMode.
Parameter opDesc
A string of op description
Parameter pad
The type of padding algorithm. -
same
and stride 1: output will be of same size as input, regardless of filter size. -valid
output will be smaller than input if filter is larger than 1x1. - For more info, see this guide: [https://www.tensorflow.org/api_docs/python/tf/nn/convolution]( https://www.tensorflow.org/api_docs/python/tf/nn/convolution)Parameter dimRoundingMode
A string from: 'ceil', 'round', 'floor'. If none is provided, it will default to truncate.
Throws
unknown padding parameter
function combineLocations
combineLocations: ( outputLoc: number[], reduceLoc: number[], axes: number[]) => number[];
function combineRaggedTensorToTensorShapes
combineRaggedTensorToTensorShapes: ( raggedRank: number, shape: number[], valueShape: number[]) => number[];
function complexWithEvenIndex
complexWithEvenIndex: (complex: Float32Array) => { real: Float32Array; imag: Float32Array;};
Extracts even indexed complex values in the given array.
Parameter complex
The complex tensor values
function complexWithOddIndex
complexWithOddIndex: (complex: Float32Array) => { real: Float32Array; imag: Float32Array;};
Extracts odd indexed complete values in the given array.
Parameter complex
The complex tensor values
function computeConv2DInfo
computeConv2DInfo: ( inShape: [number, number, number, number], filterShape: [number, number, number, number], strides: number | [number, number], dilations: number | [number, number], pad: 'same' | 'valid' | number | ExplicitPadding, roundingMode?: 'floor' | 'round' | 'ceil', depthwise?: boolean, dataFormat?: 'channelsFirst' | 'channelsLast') => Conv2DInfo;
Computes the information for a forward pass of a convolution/pooling operation.
function computeConv3DInfo
computeConv3DInfo: ( inShape: [number, number, number, number, number], filterShape: [number, number, number, number, number], strides: number | [number, number, number], dilations: number | [number, number, number], pad: 'same' | 'valid' | number, depthwise?: boolean, dataFormat?: 'channelsFirst' | 'channelsLast', roundingMode?: 'floor' | 'round' | 'ceil') => Conv3DInfo;
Computes the information for a forward pass of a 3D convolution/pooling operation.
function computeDefaultPad
computeDefaultPad: ( inputShape: [number, number] | [number, number, number, number], fieldSize: number, stride: number, dilation?: number) => number;
function computeDilation2DInfo
computeDilation2DInfo: ( inputShape: [number, number, number, number], filterShape: [number, number, number], strides: number | [number, number], pad: 'same' | 'valid' | number, dataFormat: 'NHWC', dilations: number | [number, number]) => Conv2DInfo;
Parameter inputShape
Input tensor shape is of the following dimensions:
[batch, height, width, inChannels]
.Parameter filterShape
The filter shape is of the following dimensions:
[filterHeight, filterWidth, depth]
.Parameter strides
The strides of the sliding window for each dimension of the input tensor:
[strideHeight, strideWidth]
. Ifstrides
is a single number, thenstrideHeight == strideWidth
.Parameter pad
The type of padding algorithm. -
same
and stride 1: output will be of same size as input, regardless of filter size. -valid
: output will be smaller than input if filter is larger than 1*1x1. - For more info, see this guide: [https://www.tensorflow.org/api_docs/python/tf/nn/convolution]( https://www.tensorflow.org/api_docs/python/tf/nn/convolution)Parameter dataFormat
The data format of the input and output data. Defaults to 'NHWC'.
Parameter dilations
The dilation rates:
[dilationHeight, dilationWidth]
. Defaults to[1, 1]
. Ifdilations
is a single number, thendilationHeight == dilationWidth
.
function computeOptimalWindowSize
computeOptimalWindowSize: (inSize: number) => number;
function computeOutAndReduceShapes
computeOutAndReduceShapes: ( aShape: number[], axes: number[]) => [number[], number[]];
function computeOutShape
computeOutShape: (shapes: number[][], axis: number) => number[];
function computePool2DInfo
computePool2DInfo: ( inShape: [number, number, number, number], filterSize: [number, number] | number, strides: number | [number, number], dilations: number | [number, number], pad: 'same' | 'valid' | number | ExplicitPadding, roundingMode?: 'floor' | 'round' | 'ceil', dataFormat?: 'channelsFirst' | 'channelsLast') => Conv2DInfo;
function computePool3DInfo
computePool3DInfo: ( inShape: [number, number, number, number, number], filterSize: number | [number, number, number], strides: number | [number, number, number], dilations: number | [number, number, number], pad: 'same' | 'valid' | number, roundingMode?: 'floor' | 'round' | 'ceil', dataFormat?: 'NDHWC' | 'NCDHW') => Conv3DInfo;
Computes the information for a forward pass of a pooling3D operation.
function convertConv2DDataFormat
convertConv2DDataFormat: ( dataFormat: 'NHWC' | 'NCHW') => 'channelsLast' | 'channelsFirst';
Convert Conv2D dataFormat from 'NHWC'|'NCHW' to 'channelsLast'|'channelsFirst'
Parameter dataFormat
in 'NHWC'|'NCHW' mode dataFormat in 'channelsLast'|'channelsFirst' mode
Throws
unknown dataFormat
function decodeEinsumEquation
decodeEinsumEquation: ( equation: string, numTensors: number) => { allDims: string[]; summedDims: number[]; idDims: number[][] };
Parse an equation for einsum.
Parameter equation
The einsum equation (e.g., "ij,jk->ik").
Parameter numTensors
Number of tensors provided along with
equation
. Used to check matching number of input tensors.Returns
An object consisting of the following fields: - allDims: all dimension names as strings. - summedDims: a list of all dimensions being summed over, as indices to the elements of
allDims
. - idDims: indices of the dimensions in each input tensor, as indices to the elements of `allDims.
function eitherStridesOrDilationsAreOne
eitherStridesOrDilationsAreOne: ( strides: number | number[], dilations: number | number[]) => boolean;
function expandShapeToKeepDim
expandShapeToKeepDim: (shape: number[], axes: number[]) => number[];
function exponent
exponent: ( k: number, n: number, inverse: boolean) => { real: number; imag: number };
Make the exponent term used by FFT.
function exponents
exponents: ( n: number, inverse: boolean) => { real: Float32Array; imag: Float32Array };
Make the list of exponent terms used by FFT.
function fromStringArrayToUint8
fromStringArrayToUint8: (strings: string[]) => Uint8Array[];
function fromUint8ToStringArray
fromUint8ToStringArray: (vals: Uint8Array[]) => string[];
function getAxesPermutation
getAxesPermutation: (axes: number[], rank: number) => number[] | null;
Returns the axes permutation to be used with
tf.transpose
, if such permutation is necessary. Otherwise it returns null. This method is used by operations that operate only on inner-most axes.
function getBroadcastDims
getBroadcastDims: (inShape: number[], outShape: number[]) => number[];
Returns the dimensions in the input shape that are broadcasted to produce the provided output shape.
The returned dimensions are 0-indexed and sorted. An example: inShape = [4, 1, 3] outShape = [5, 4, 3, 3] result = [1]. Dimension 1 (2nd dimension of input) gets broadcasted 1 => 3.
function getComplexWithIndex
getComplexWithIndex: ( complex: Float32Array, index: number) => { real: number; imag: number };
Get the map representing a complex value in the given array.
Parameter complex
The complex tensor values.
Parameter index
An index of the target complex value.
function getEinsumComputePath
getEinsumComputePath: ( summedDims: number[], idDims: number[][]) => { path: number[]; steps: number[][] };
Gets path of computation for einsum.
Parameter summedDims
indices to the dimensions being summed over.
Parameter idDims
A look up table for the dimensions present in each input tensor.Each constituent array contains indices for the dimensions in the corresponding input tensor.
A map with two fields: - path: The path of computation, with each element indicating the dimension being summed over after the element-wise multiplication in that step. - steps: With the same length as
path
. Each element contains the indices to the input tensors being used for element-wise multiplication in the corresponding step.
function getEinsumPermutation
getEinsumPermutation: ( nDims: number, idDims: number[]) => { permutationIndices: number[]; expandDims: number[] };
Get the permutation for a given input tensor.
Parameter nDims
Total number of dimension of all tensors involved in the einsum operation.
Parameter idDims
Dimension indices involve in the tensor in question.
Returns
An object consisting of the following fields: - permutationIndices: Indices to permute the axes of the tensor with. - expandDims: Indices to the dimension that need to be expanded from the tensor after permutation.
function getFusedBiasGradient
getFusedBiasGradient: (bias: Tensor, dyActivation: Tensor) => Tensor;
function getFusedDyActivation
getFusedDyActivation: (dy: Tensor, y: Tensor, activation: Activation) => Tensor;
function getImageCenter
getImageCenter: ( center: number | [number, number], imageHeight: number, imageWidth: number) => [number, number];
Copyright 2020 Google LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================
function getInnerMostAxes
getInnerMostAxes: (numAxes: number, rank: number) => number[];
function getPermuted
getPermuted: ( reshapedRank: number, blockShapeRank: number, batchToSpace?: boolean) => number[];
Gets the permutation that will transpose the dimensions of the reshaped tensor to shape:
[batch / prod(block_shape),inputShape[1], blockShape[0], ..., inputShape[M], blockShape[M-1],inputShape[M+1], ..., inputShape[N-1]]
see step 2: https://www.tensorflow.org/api_docs/python/tf/batch_to_space_nd
function getRaggedRank
getRaggedRank: (rowPartitionTypes: RowPartitionType[]) => number;
function getReductionAxes
getReductionAxes: (inShape: number[], outShape: number[]) => number[];
Returns the axes in the output space that should be reduced to produce the input space.
function getReshaped
getReshaped: ( inputShape: number[], blockShape: number[], prod: number, batchToSpace?: boolean) => number[];
Gets the new shape of the input Tensor after it's been reshaped to: [blockShape[0], ..., blockShape[M-1], batch / prod(blockShape), inputShape[1], ..., inputShape[N-1]]
See step 1: https://www.tensorflow.org/api_docs/python/tf/batch_to_space_nd
function getReshapedPermuted
getReshapedPermuted: ( inputShape: number[], blockShape: number[], prod: number, batchToSpace?: boolean) => number[];
Gets the shape of the reshaped and permuted input Tensor before any cropping is applied. The new shape will be:
[batch / prod(blockShape),inputShape[1] * blockShape[0], ..., inputShape[M] * blockShape[M-1],inputShape[M+1], ..., inputShape[N-1]]
See step 3: https://www.tensorflow.org/api_docs/python/tf/batch_to_space_nd
function getRowPartitionTypesHelper
getRowPartitionTypesHelper: ( rowPartitionTypeStrings: string[]) => RowPartitionType[];
function getSliceBeginCoords
getSliceBeginCoords: (crops: number[][], blockShape: number) => number[];
Converts the crops argument into the beginning coordinates of a slice operation.
function getSliceSize
getSliceSize: ( uncroppedShape: number[], crops: number[][], blockShape: number) => number[];
Converts the crops argument into the size of a slice operation. When combined with getSliceBeginCoords this function allows the reshaped and permuted Tensor to be cropped to its final output shape of:
inputShape[1] * blockShape[0] - crops[0,0] - crops[0,1], ..., inputShape[M] * blockShape[M-1] -crops[M-1,0] - crops[M-1,1],inputShape[M+1], ..., inputShape[N-1]]
See step 4: https://www.tensorflow.org/api_docs/python/tf/batch_to_space_nd
function getSparseFillEmptyRowsIndicesDenseShapeMismatch
getSparseFillEmptyRowsIndicesDenseShapeMismatch: ( indicesLength: number) => string;
Generates sparse fill empty rows indices, dense shape mismatch error message.
Parameter indicesLength
The first dimension of indices.
function getSparseFillEmptyRowsNegativeIndexErrorMessage
getSparseFillEmptyRowsNegativeIndexErrorMessage: ( index: number, value: number) => string;
Generates sparse fill empty rows negative index error message.
Parameter index
The index with a negative value.
Parameter value
The negative value.
function getSparseFillEmptyRowsOutOfRangeIndexErrorMessage
getSparseFillEmptyRowsOutOfRangeIndexErrorMessage: ( index: number, value: number, limit: number) => string;
Generates sparse fill empty rows out of range index error message.
Parameter index
The index with an out of range value.
Parameter value
The out of range value.
Parameter limit
The upper limit for indices.
function getSparseReshapeEmptyTensorZeroOutputDimErrorMessage
getSparseReshapeEmptyTensorZeroOutputDimErrorMessage: () => string;
Generates sparse reshape empty tensor zero output dimension error message.
function getSparseReshapeInputOutputMismatchErrorMessage
getSparseReshapeInputOutputMismatchErrorMessage: ( inputShape: number[], outputShape: number[]) => string;
Generates sparse reshape input output inequality error message.
Parameter inputShape
the input shape.
Parameter outputShape
the requested output shape.
function getSparseReshapeInputOutputMultipleErrorMessage
getSparseReshapeInputOutputMultipleErrorMessage: ( inputShape: number[], outputShape: number[]) => string;
Generates sparse reshape input output multiple mismatch error message.
Parameter inputShape
the input shape.
Parameter outputShape
the requested output shape.
function getSparseReshapeMultipleNegativeOneOutputDimErrorMessage
getSparseReshapeMultipleNegativeOneOutputDimErrorMessage: ( dim1: number, dim2: number) => string;
Generates sparse reshape multiple negative 1 output dimension error message.
Parameter dim1
The first dimension with a negative 1 value.
Parameter dim2
The second dimension with a negative 1 value.
function getSparseReshapeNegativeOutputDimErrorMessage
getSparseReshapeNegativeOutputDimErrorMessage: ( dim: number, value: number) => string;
Generates sparse reshape negative output dimension error message.
Parameter dim
The dimension with a negative value.
Parameter value
The negative value.
function getSparseSegmentReductionIndicesOutOfRangeErrorMessage
getSparseSegmentReductionIndicesOutOfRangeErrorMessage: ( index: number, indexValue: number, inputRows: number) => string;
Generates sparse segment reduction input indice out of range error message.
Parameter index
The index that holds the out of range value.
Parameter indexValue
The value that is out of range.
Parameter inputRows
Upper bound of valid index values.
function getSparseSegmentReductionNegativeSegmentIdsErrorMessage
getSparseSegmentReductionNegativeSegmentIdsErrorMessage: () => string;
Generates sparse segment reduction negative segment ids error message.
function getSparseSegmentReductionNonIncreasingSegmentIdsErrorMessage
getSparseSegmentReductionNonIncreasingSegmentIdsErrorMessage: () => string;
Generates sparse segment reduction non increasing segment ids error message.
function getSparseSegmentReductionSegmentIdOutOfRangeErrorMessage
getSparseSegmentReductionSegmentIdOutOfRangeErrorMessage: ( segmentId: number, outputRows: number) => string;
Generates sparse segment reduction segment id out of range error message.
Parameter segmentId
The segment id index that is out of range.
Parameter outputRows
Upper bound of valid segment id values.
function getUndoAxesPermutation
getUndoAxesPermutation: (axes: number[]) => number[];
Returns the axes permutation that undoes the original permutation.
function isIdentityPermutation
isIdentityPermutation: (perm: number[]) => boolean;
Determines if an axes permutation is the identity permutation.
function log
log: (...msg: Array<{}>) => void;
function mergeRealAndImagArrays
mergeRealAndImagArrays: (real: Float32Array, imag: Float32Array) => Float32Array;
Merges real and imaginary Float32Arrays into a single complex Float32Array.
The memory layout is interleaved as follows: real: [r0, r1, r2] imag: [i0, i1, i2] complex: [r0, i0, r1, i1, r2, i2]
This is the inverse of splitRealAndImagArrays.
Parameter real
The real values of the complex tensor values.
Parameter imag
The imag values of the complex tensor values.
Returns
A complex tensor as a Float32Array with merged values.
function prepareAndValidate
prepareAndValidate: ( tensor: TensorInfo, indices: TensorInfo) => [number[], number, number, number[]];
Validate gather nd inputs.
Parameter tensor
The tensor contains the source values.
Parameter indices
The tensor contains the indices to slice the source.
Returns
[resultShape, numUpdates, sliceSize, strides]
function prepareSplitSize
prepareSplitSize: ( x: Tensor | TensorInfo, numOrSizeSplits: number[] | number, axis?: number) => number[];
Prepare the split size array. When the input is a number, the axis is evenly divided among the split size. When the input contains the negative value, the rest of the axis is allocated toward that.
function shouldFuse
shouldFuse: (gradientDepth: number, activation: Activation) => boolean;
function splitRealAndImagArrays
splitRealAndImagArrays: (complex: Float32Array) => { real: Float32Array; imag: Float32Array;};
Splits a complex Float32Array into real and imag parts.
The memory layout is interleaved as follows: complex: [r0, i0, r1, i1, r2, i2] real: [r0, r1, r2] imag: [i0, i1, i2]
This is the inverse of mergeRealAndImagArrays.
Parameter complex
The complex tensor values.
Returns
An object with real and imag Float32Array components of the complex tensor.
function stridesOrDilationsArePositive
stridesOrDilationsArePositive: (values: number | number[]) => boolean;
function tupleValuesAreOne
tupleValuesAreOne: (param: number | number[]) => boolean;
function upcastType
upcastType: (typeA: DataType, typeB: DataType) => DataType;
function validateDefaultValueShape
validateDefaultValueShape: ( defaultValueShape: number[], valueShape: number[]) => void;
function validateInput
validateInput: (updates: Tensor, indices: Tensor, shape: number[]) => void;
Validate scatter nd inputs.
Parameter update
The tensor contains the update values.
Parameter indices
The tensor contains the indices for the update values.
Parameter shape
The shape of the output tensor.
function validateUpdateShape
validateUpdateShape: (shape: number[], indices: Tensor, updates: Tensor) => void;
Check whether updates.shape = indices.shape[:batchDim] + shape[sliceDim:]
Parameter x
The input tensor.
function warn
warn: (...msg: Array<{}>) => void;
Copyright 2018 Google LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================
interface PixelData
interface PixelData {}
Type for representing image data in Uint8Array type.
interface ReduceInfo
interface ReduceInfo {}
property batchSize
batchSize: number;
property inSize
inSize: number;
property outSize
outSize: number;
property windowSize
windowSize: number;
interface ScatterShapeInfo
interface ScatterShapeInfo {}
property numUpdates
numUpdates: number;
property outputSize
outputSize: number;
property sliceRank
sliceRank: number;
property sliceSize
sliceSize: number;
property strides
strides: number[];
interface TimingInfo
interface TimingInfo extends BackendTimingInfo {}
property wallMs
wallMs: number;
enum RowPartitionType
enum RowPartitionType { FIRST_DIM_SIZE = 0, VALUE_ROWIDS = 1, ROW_LENGTHS = 2, ROW_SPLITS = 3, ROW_LIMITS = 4, ROW_STARTS = 5,}
Copyright 2022 Google LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================
member FIRST_DIM_SIZE
FIRST_DIM_SIZE = 0
member ROW_LENGTHS
ROW_LENGTHS = 2
member ROW_LIMITS
ROW_LIMITS = 4
member ROW_SPLITS
ROW_SPLITS = 3
member ROW_STARTS
ROW_STARTS = 5
member VALUE_ROWIDS
VALUE_ROWIDS = 1
type Activation
type Activation = | 'linear' | 'relu' | 'prelu' | 'elu' | 'relu6' | 'leakyrelu' | 'sigmoid';
type BackendValues
type BackendValues = Float32Array | Int32Array | Uint8Array | Uint8Array[];
The underlying tensor data that gets stored in a backend.
type Conv2DInfo
type Conv2DInfo = { batchSize: number; inHeight: number; inWidth: number; inChannels: number; outHeight: number; outWidth: number; outChannels: number; dataFormat: 'channelsFirst' | 'channelsLast'; strideHeight: number; strideWidth: number; dilationHeight: number; dilationWidth: number; filterHeight: number; filterWidth: number; effectiveFilterHeight: number; effectiveFilterWidth: number; padInfo: PadInfo; inShape: [number, number, number, number]; outShape: [number, number, number, number]; filterShape: [number, number, number, number];};
Information about the forward pass of a convolution/pooling operation. It includes input and output shape, strides, filter size and padding information.
type Conv3DInfo
type Conv3DInfo = { batchSize: number; inDepth: number; inHeight: number; inWidth: number; inChannels: number; outDepth: number; outHeight: number; outWidth: number; outChannels: number; dataFormat: 'channelsFirst' | 'channelsLast'; strideDepth: number; strideHeight: number; strideWidth: number; dilationDepth: number; dilationHeight: number; dilationWidth: number; filterDepth: number; filterHeight: number; filterWidth: number; effectiveFilterDepth: number; effectiveFilterHeight: number; effectiveFilterWidth: number; padInfo: PadInfo3D; inShape: [number, number, number, number, number]; outShape: [number, number, number, number, number]; filterShape: [number, number, number, number, number];};
Information about the forward pass of a 3D convolution/pooling operation. It includes input and output shape, strides, filter size and padding information.
type ExplicitPadding
type ExplicitPadding = [ [number, number], [number, number], [number, number], [number, number]];
type FusedBatchMatMulConfig
type FusedBatchMatMulConfig = { a: Tensor3D; b: Tensor3D; transposeA: boolean; transposeB: boolean; bias?: Tensor; activation?: Activation; preluActivationWeights?: Tensor; leakyreluAlpha?: number;};
type FusedConv2DConfig
type FusedConv2DConfig = { input: Tensor4D; filter: Tensor4D; convInfo: Conv2DInfo; bias?: Tensor; activation?: Activation; preluActivationWeights?: Tensor; leakyreluAlpha?: number;};
type MemoryInfo
type MemoryInfo = { numTensors: number; numDataBuffers: number; numBytes: number; unreliable?: boolean; reasons: string[];};
type PadInfo
type PadInfo = { top: number; left: number; right: number; bottom: number; type: PadType;};
type PadInfo3D
type PadInfo3D = { top: number; left: number; right: number; bottom: number; front: number; back: number; type: PadType;};
type TypedArray
type TypedArray = Float32Array | Int32Array | Uint8Array;
namespace segment_util
module 'dist/ops/segment_util.d.ts' {}
Copyright 2018 Google LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================
function collectGatherOpShapeInfo
collectGatherOpShapeInfo: ( x: TensorInfo, indices: TensorInfo, axis: number, batchDims: number) => GatherOpShapeInfo;
function computeOutShape
computeOutShape: ( aShape: number[], axis: number, numSegments: number) => number[];
function segOpComputeOptimalWindowSize
segOpComputeOptimalWindowSize: (inSize: number, numSegments: number) => number;
interface GatherOpShapeInfo
interface GatherOpShapeInfo {}
property batchSize
batchSize: number;
property dimSize
dimSize: number;
property outerSize
outerSize: number;
property outputShape
outputShape: number[];
property sliceSize
sliceSize: number;
interface SegOpInfo
interface SegOpInfo {}
property batchSize
batchSize: number;
property inSize
inSize: number;
property numSegments
numSegments: number;
property windowSize
windowSize: number;
namespace slice_util
module 'dist/ops/slice_util.d.ts' {}
Copyright 2021 Google LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================
function assertParamsValid
assertParamsValid: (input: TensorInfo, begin: number[], size: number[]) => void;
function computeFlatOffset
computeFlatOffset: (begin: number[], strides: number[]) => number;
function computeOutShape
computeOutShape: (begin: number[], end: number[], strides: number[]) => number[];
Computes the output shape given the strided slice params.
function getNormalizedAxes
getNormalizedAxes: ( inputShape: number[], ellipsisAxes: number[], numInterpolatedAxes: number, begin: number[], end: number[], strides: number[], beginMask: number, endMask: number, ellipsisMask: number) => { begin: number[]; end: number[]; strides: number[] };
function isSliceContinous
isSliceContinous: (shape: number[], begin: number[], size: number[]) => boolean;
Returns true if the slice occupies a continous set of elements in the 'flat' space.
function maskToAxes
maskToAxes: (mask: number) => number[];
Converts a binary mask to an array of axes. Used in stridedSlice().
function parseSliceParams
parseSliceParams: ( x: TensorInfo, begin: number | number[], size?: number | number[]) => number[][];
function sliceInfo
sliceInfo: ( xShape: number[], begin: number[], end: number[], strides: number[], beginMask: number, endMask: number, ellipsisMask: number, newAxisMask: number, shrinkAxisMask: number) => SliceInfo;
function startForAxis
startForAxis: ( beginMask: number, startIndices: number[], strides: number[], inputShape: number[], axis: number, ellipsisMask: number) => number;
function startIndicesWithElidedDims
startIndicesWithElidedDims: ( beginMask: number, ellipsisInsertionIndex: number, numElidedAxes: number, originalBegin: number[], inputShape: number[]) => number[];
function stopForAxis
stopForAxis: ( endMask: number, stopIndices: number[], strides: number[], inputShape: number[], axis: number, ellipsisMask: number) => number;
function stopIndicesWithElidedDims
stopIndicesWithElidedDims: ( endMask: number, ellipsisInsertionIndex: number, numElidedAxes: number, originalEnd: number[], inputShape: number[]) => number[];
function stridesForAxis
stridesForAxis: ( strides: number[], axis: number, ellipsisMask: number) => number;
function stridesWithElidedDims
stridesWithElidedDims: ( strides: number[], ellipsisInsertionIndex: number, numElidedAxes: number, inputShape: number[]) => number[];
type SliceInfo
type SliceInfo = { finalShapeSparse: number[]; finalShape: number[]; isIdentity: boolean; sliceDim0: boolean; isSimpleSlice: boolean; begin: number[]; end: number[]; strides: number[];};
namespace broadcast_util
module 'dist/ops/broadcast_util.d.ts' {}
Copyright 2017 Google LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================
function assertAndGetBroadcastShape
assertAndGetBroadcastShape: (shapeA: number[], shapeB: number[]) => number[];
function getBroadcastDims
getBroadcastDims: (inShape: number[], outShape: number[]) => number[];
Returns the dimensions in the input shape that are broadcasted to produce the provided output shape.
The returned dimensions are 0-indexed and sorted. An example: inShape = [4, 1, 3] outShape = [5, 4, 3, 3] result = [1]. Dimension 1 (2nd dimension of input) gets broadcasted 1 => 3.
function getReductionAxes
getReductionAxes: (inShape: number[], outShape: number[]) => number[];
Returns the axes in the output space that should be reduced to produce the input space.
namespace browser
module 'dist/ops/browser.d.ts' {}
Copyright 2019 Google LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================
variable fromPixels
const fromPixels: ( pixels: | PixelData | ImageData | HTMLImageElement | HTMLCanvasElement | HTMLVideoElement | ImageBitmap, numChannels?: number) => Tensor3D;
function draw
draw: ( image: Tensor2D | Tensor3D | TensorLike, canvas: HTMLCanvasElement, options?: DrawOptions) => void;
Draws a
tf.Tensor
to a canvas.When the dtype of the input is 'float32', we assume values in the range [0-1]. Otherwise, when input is 'int32', we assume values in the range [0-255].
Parameter image
The tensor to draw on the canvas. Must match one of these shapes: - Rank-2 with shape
[height, width
]: Drawn as grayscale. - Rank-3 with shape[height, width, 1]
: Drawn as grayscale. - Rank-3 with shape[height, width, 3]
: Drawn as RGB with alpha set inimageOptions
(defaults to 1, which is opaque). - Rank-3 with shape[height, width, 4]
: Drawn as RGBA.Parameter canvas
The canvas to draw to.
Parameter options
The configuration arguments for image to be drawn and the canvas to draw to.
{heading: 'Browser', namespace: 'browser'}
function fromPixels_
fromPixels_: ( pixels: | PixelData | ImageData | HTMLImageElement | HTMLCanvasElement | HTMLVideoElement | ImageBitmap, numChannels?: number) => Tensor3D;
Creates a
tf.Tensor
from an image.const image = new ImageData(1, 1);image.data[0] = 100;image.data[1] = 150;image.data[2] = 200;image.data[3] = 255;tf.browser.fromPixels(image).print();Parameter pixels
The input image to construct the tensor from. The supported image types are all 4-channel. You can also pass in an image object with following attributes:
{data: Uint8Array; width: number; height: number}
Parameter numChannels
The number of channels of the output tensor. A numChannels value less than 4 allows you to ignore channels. Defaults to 3 (ignores alpha channel of input image).
Returns
A Tensor3D with the shape
[height, width, numChannels]
.Note: fromPixels can be lossy in some cases, same image may result in slightly different tensor values, if rendered by different rendering engines. This means that results from different browsers, or even same browser with CPU and GPU rendering engines can be different. See discussion in details: https://github.com/tensorflow/tfjs/issues/5482
{heading: 'Browser', namespace: 'browser', ignoreCI: true}
function fromPixelsAsync
fromPixelsAsync: ( pixels: | PixelData | ImageData | HTMLImageElement | HTMLCanvasElement | HTMLVideoElement | ImageBitmap, numChannels?: number) => Promise<Tensor3D>;
Creates a
tf.Tensor
from an image in async way.const image = new ImageData(1, 1);image.data[0] = 100;image.data[1] = 150;image.data[2] = 200;image.data[3] = 255;(await tf.browser.fromPixelsAsync(image)).print();This API is the async version of fromPixels. The API will first check |WRAP_TO_IMAGEBITMAP| flag, and try to wrap the input to imageBitmap if the flag is set to true.
Parameter pixels
The input image to construct the tensor from. The supported image types are all 4-channel. You can also pass in an image object with following attributes:
{data: Uint8Array; width: number; height: number}
Parameter numChannels
The number of channels of the output tensor. A numChannels value less than 4 allows you to ignore channels. Defaults to 3 (ignores alpha channel of input image).
{heading: 'Browser', namespace: 'browser', ignoreCI: true}
function toPixels
toPixels: ( img: Tensor2D | Tensor3D | TensorLike, canvas?: HTMLCanvasElement) => Promise<Uint8ClampedArray>;
Draws a
tf.Tensor
of pixel values to a byte array or optionally a canvas.When the dtype of the input is 'float32', we assume values in the range [0-1]. Otherwise, when input is 'int32', we assume values in the range [0-255].
Returns a promise that resolves when the canvas has been drawn to.
Parameter img
A rank-2 tensor with shape
[height, width]
, or a rank-3 tensor of shape[height, width, numChannels]
. If rank-2, draws grayscale. If rank-3, must have depth of 1, 3 or 4. When depth of 1, draws grayscale. When depth of 3, we draw with the first three components of the depth dimension corresponding to r, g, b and alpha = 1. When depth of 4, all four components of the depth dimension correspond to r, g, b, a.Parameter canvas
The canvas to draw to.
{heading: 'Browser', namespace: 'browser'}
namespace device_util
module 'dist/device_util.d.ts' {}
Copyright 2017 Google LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================
function isBrowser
isBrowser: () => boolean;
function isMobile
isMobile: (nav?: Navigator) => boolean;
function mockIsMobile
mockIsMobile: (value: boolean | undefined) => void;
Copyright 2017 Google LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================
namespace fused
module 'dist/ops/fused_ops.d.ts' {}
Copyright 2019 Google LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================
variable conv2d
const conv2d: <T extends Tensor3D | Tensor4D>({ x, filter, strides, pad, dataFormat, dilations, dimRoundingMode, bias, activation, preluActivationWeights, leakyreluAlpha,}: { x: TensorLike | T; filter: TensorLike | Tensor4D; strides: number | [number, number]; pad: number | 'valid' | 'same' | conv_util.ExplicitPadding; dataFormat?: 'NHWC' | 'NCHW'; dilations?: number | [number, number]; dimRoundingMode?: 'floor' | 'round' | 'ceil'; bias?: Tensor<Rank> | TensorLike; activation?: Activation; preluActivationWeights?: Tensor<Rank>; leakyreluAlpha?: number;}) => T;
variable depthwiseConv2d
const depthwiseConv2d: <T extends Tensor3D | Tensor4D>({ x, filter, strides, pad, dataFormat, dilations, dimRoundingMode, bias, activation, preluActivationWeights, leakyreluAlpha,}: { x: TensorLike | T; filter: TensorLike | Tensor4D; strides: number | [number, number]; pad: number | 'valid' | 'same'; dataFormat?: 'NHWC' | 'NCHW'; dilations?: number | [number, number]; dimRoundingMode?: 'floor' | 'round' | 'ceil'; bias?: Tensor<Rank> | TensorLike; activation?: Activation; preluActivationWeights?: Tensor<Rank>; leakyreluAlpha?: number;}) => T;
variable matMul
const matMul: ({ a, b, transposeA, transposeB, bias, activation, preluActivationWeights, leakyreluAlpha,}: { a: Tensor<Rank> | TensorLike; b: Tensor<Rank> | TensorLike; transposeA?: boolean; transposeB?: boolean; bias?: Tensor<Rank> | TensorLike; activation?: Activation; preluActivationWeights?: Tensor<Rank>; leakyreluAlpha?: number;}) => Tensor<Rank>;
type Activation
type Activation = | 'linear' | 'relu' | 'prelu' | 'elu' | 'relu6' | 'leakyrelu' | 'sigmoid';
namespace gather_util
module 'dist/ops/gather_nd_util.d.ts' {}
Copyright 2018 Google LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================
function prepareAndValidate
prepareAndValidate: ( tensor: TensorInfo, indices: TensorInfo) => [number[], number, number, number[]];
Validate gather nd inputs.
Parameter tensor
The tensor contains the source values.
Parameter indices
The tensor contains the indices to slice the source.
Returns
[resultShape, numUpdates, sliceSize, strides]
namespace io
module 'dist/io/io.d.ts' {}
Copyright 2018 Google LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================
function browserFiles
browserFiles: (files: File[]) => IOHandler;
Creates an IOHandler that loads model artifacts from user-selected files.
This method can be used for loading from files such as user-selected files in the browser. When used in conjunction with
tf.loadLayersModel
, an instance oftf.LayersModel
(Keras-style) can be constructed from the loaded artifacts.// Note: This code snippet won't run properly without the actual file input// elements in the HTML DOM.// Suppose there are two HTML file input (`<input type="file" ...>`)// elements.const uploadJSONInput = document.getElementById('upload-json');const uploadWeightsInput = document.getElementById('upload-weights');const model = await tf.loadLayersModel(tf.io.browserFiles([uploadJSONInput.files[0], uploadWeightsInput.files[0]]));Parameter files
File
s to load from. Currently, this function supports only loading from files that contain Keras-style models (i.e.,tf.Model
s), for which anArray
ofFile
s is expected (in that order): - A JSON file containing the model topology and weight manifest. - Optionally, one or more binary files containing the binary weights. These files must have names that match the paths in theweightsManifest
contained by the aforementioned JSON file, or errors will be thrown during loading. These weights files have the same format as the ones generated bytensorflowjs_converter
that comes with thetensorflowjs
Python PIP package. If no weights files are provided, only the model topology will be loaded from the JSON file above.Returns
An instance of
Files
IOHandler
.{ heading: 'Models', subheading: 'Loading', namespace: 'io', ignoreCI: true }
function browserHTTPRequest
browserHTTPRequest: (path: string, loadOptions?: LoadOptions) => IOHandler;
Deprecated. Use
tf.io.http
.Parameter path
Parameter loadOptions
function concatenateArrayBuffers
concatenateArrayBuffers: (buffers: ArrayBuffer[] | ArrayBuffer) => ArrayBuffer;
Concatenate a number of ArrayBuffers into one.
Parameter buffers
An array of ArrayBuffers to concatenate, or a single ArrayBuffer.
Returns
Result of concatenating
buffers
in order.Deprecated
Use tf.io.CompositeArrayBuffer.join() instead.
function copyModel
copyModel: (sourceURL: string, destURL: string) => Promise<ModelArtifactsInfo>;
Copy a model from one URL to another.
This function supports:
1. Copying within a storage medium, e.g.,
tf.io.copyModel('localstorage://model-1', 'localstorage://model-2')
2. Copying between two storage mediums, e.g.,tf.io.copyModel('localstorage://model-1', 'indexeddb://model-1')
// First create and save a model.const model = tf.sequential();model.add(tf.layers.dense({units: 1, inputShape: [10], activation: 'sigmoid'}));await model.save('localstorage://demo/management/model1');// Then list existing models.console.log(JSON.stringify(await tf.io.listModels()));// Copy the model, from Local Storage to IndexedDB.await tf.io.copyModel('localstorage://demo/management/model1','indexeddb://demo/management/model1');// List models again.console.log(JSON.stringify(await tf.io.listModels()));// Remove both models.await tf.io.removeModel('localstorage://demo/management/model1');await tf.io.removeModel('indexeddb://demo/management/model1');Parameter sourceURL
Source URL of copying.
Parameter destURL
Destination URL of copying.
Returns
ModelArtifactsInfo of the copied model (if and only if copying is successful).
Throws
Error if copying fails, e.g., if no model exists at
sourceURL
, or ifoldPath
andnewPath
are identical.{ heading: 'Models', subheading: 'Management', namespace: 'io', ignoreCI: true }
function decodeWeights
decodeWeights: ( weightData: WeightData, specs: WeightsManifestEntry[]) => NamedTensorMap;
Decode flat ArrayBuffer as weights.
This function does not handle sharding.
This function is the reverse of
encodeWeights
.Parameter weightData
A flat ArrayBuffer or an array of ArrayBuffers carrying the binary values of the tensors concatenated in the order specified in
specs
.Parameter specs
Specifications of the names, dtypes and shapes of the tensors whose value are encoded by
buffer
. A map from tensor name to tensor value, with the names corresponding to names inspecs
.Throws
Error, if any of the tensors has unsupported dtype.
function decodeWeightsStream
decodeWeightsStream: ( weightStream: ReadableStream<ArrayBuffer>, specs: WeightsManifestEntry[]) => Promise<NamedTensorMap>;
function encodeWeights
encodeWeights: ( tensors: NamedTensorMap | NamedTensor[], group?: WeightGroup) => Promise<{ data: ArrayBuffer; specs: WeightsManifestEntry[] }>;
Encode a map from names to weight values as an ArrayBuffer, along with an
Array
ofWeightsManifestEntry
as specification of the encoded weights.This function does not perform sharding.
This function is the reverse of
decodeWeights
.Parameter tensors
A map ("dict") from names to tensors.
Parameter group
Group to which the weights belong (optional).
Returns
A
Promise
of - A flatArrayBuffer
with all the binary values of theTensor
s concatenated. - AnArray
ofWeightManifestEntry
s, carrying information including tensor names,dtype
s and shapes.Throws
Error: on unsupported tensor
dtype
.
function fromMemory
fromMemory: ( modelArtifacts: {} | ModelArtifacts, weightSpecs?: WeightsManifestEntry[], weightData?: WeightData, trainingConfig?: TrainingConfig) => IOHandler;
Creates an IOHandler that loads model artifacts from memory.
When used in conjunction with
tf.loadLayersModel
, an instance oftf.LayersModel
(Keras-style) can be constructed from the loaded artifacts.const model = await tf.loadLayersModel(tf.io.fromMemory(modelTopology, weightSpecs, weightData));Parameter modelArtifacts
a object containing model topology (i.e., parsed from the JSON format).
Parameter weightSpecs
An array of
WeightsManifestEntry
objects describing the names, shapes, types, and quantization of the weight data. Optional.Parameter weightData
A single
ArrayBuffer
containing the weight data, concatenated in the order described by the weightSpecs. Optional.Parameter trainingConfig
Model training configuration. Optional.
Returns
A passthrough
IOHandler
that simply loads the provided data.
function fromMemorySync
fromMemorySync: ( modelArtifacts: {} | ModelArtifacts, weightSpecs?: WeightsManifestEntry[], weightData?: WeightData, trainingConfig?: TrainingConfig) => IOHandlerSync;
Creates an IOHandler that loads model artifacts from memory.
When used in conjunction with
tf.loadLayersModel
, an instance oftf.LayersModel
(Keras-style) can be constructed from the loaded artifacts.const model = await tf.loadLayersModel(tf.io.fromMemory(modelTopology, weightSpecs, weightData));Parameter modelArtifacts
a object containing model topology (i.e., parsed from the JSON format).
Parameter weightSpecs
An array of
WeightsManifestEntry
objects describing the names, shapes, types, and quantization of the weight data. Optional.Parameter weightData
A single
ArrayBuffer
containing the weight data, concatenated in the order described by the weightSpecs. Optional.Parameter trainingConfig
Model training configuration. Optional.
Returns
A passthrough
IOHandlerSync
that simply loads the provided data.
function getLoadHandlers
getLoadHandlers: ( url: string | string[], loadOptions?: LoadOptions) => IOHandler[];
function getModelArtifactsForJSON
getModelArtifactsForJSON: ( modelJSON: ModelJSON, loadWeights: ( weightsManifest: WeightsManifestConfig ) => Promise<[WeightsManifestEntry[], WeightData]>) => Promise<ModelArtifacts>;
Create
ModelArtifacts
from a JSON file.Parameter modelJSON
Object containing the parsed JSON of
model.json
Parameter loadWeights
Function that takes the JSON file's weights manifest, reads weights from the listed path(s), and returns a Promise of the weight manifest entries along with the weights data.
Returns
A Promise of the
ModelArtifacts
, as described by the JSON file.
function getModelArtifactsForJSONSync
getModelArtifactsForJSONSync: ( modelJSON: ModelJSON, weightSpecs?: WeightsManifestEntry[], weightData?: WeightData) => ModelArtifacts;
Create
ModelArtifacts
from a JSON file and weights.Parameter modelJSON
Object containing the parsed JSON of
model.json
Parameter weightSpecs
The list of WeightsManifestEntry for the model. Must be passed if the modelJSON has a weightsManifest.
Parameter weightData
An ArrayBuffer or array of ArrayBuffers of weight data for the model corresponding to the weights in weightSpecs. Must be passed if the modelJSON has a weightsManifest.
Returns
A Promise of the
ModelArtifacts
, as described by the JSON file.
function getModelArtifactsInfoForJSON
getModelArtifactsInfoForJSON: ( modelArtifacts: ModelArtifacts) => ModelArtifactsInfo;
Populate ModelArtifactsInfo fields for a model with JSON topology.
Parameter modelArtifacts
Returns
A ModelArtifactsInfo object.
function getSaveHandlers
getSaveHandlers: (url: string | string[]) => IOHandler[];
function getWeightSpecs
getWeightSpecs: ( weightsManifest: WeightsManifestConfig) => WeightsManifestEntry[];
Concatenate the weights stored in a WeightsManifestConfig into a list of WeightsManifestEntry
Parameter weightsManifest
The WeightsManifestConfig to extract weights from.
Returns
A list of WeightsManifestEntry of the weights in the weightsManifest
function http
http: (path: string, loadOptions?: LoadOptions) => IOHandler;
Creates an IOHandler subtype that sends model artifacts to HTTP server.
An HTTP request of the
multipart/form-data
mime type will be sent to thepath
URL. The form data includes artifacts that represent the topology and/or weights of the model. In the case of Keras-styletf.Model
, two blobs (files) exist in form-data: - A JSON file consisting ofmodelTopology
andweightsManifest
. - A binary weights file consisting of the concatenated weight values. These files are in the same format as the one generated by [tfjs_converter](https://js.tensorflow.org/tutorials/import-keras.html).The following code snippet exemplifies the client-side code that uses this function:
const model = tf.sequential();model.add(tf.layers.dense({units: 1, inputShape: [100], activation: 'sigmoid'}));const saveResult = await model.save(tf.io.http('http://model-server:5000/upload', {requestInit: {method: 'PUT'}}));console.log(saveResult);If the default
POST
method is to be used, without any custom parameters such as headers, you can simply pass an HTTP or HTTPS URL tomodel.save
:const saveResult = await model.save('http://model-server:5000/upload');The following GitHub Gist https://gist.github.com/dsmilkov/1b6046fd6132d7408d5257b0976f7864 implements a server based on [flask](https://github.com/pallets/flask) that can receive the request. Upon receiving the model artifacts via the request, this particular server reconstitutes instances of [Keras Models](https://keras.io/models/model/) in memory.
Parameter path
A URL path to the model. Can be an absolute HTTP path (e.g., 'http://localhost:8000/model-upload)') or a relative path (e.g., './model-upload').
Parameter requestInit
Request configurations to be used when sending HTTP request to server using
fetch
. It can contain fields such asmethod
,credentials
,headers
,mode
, etc. See https://developer.mozilla.org/en-US/docs/Web/API/Request/Request for more information.requestInit
must not have a body, because the body will be set by TensorFlow.js. File blobs representing the model topology (filename: 'model.json') and the weights of the model (filename: 'model.weights.bin') will be appended to the body. IfrequestInit
has abody
, an Error will be thrown.Parameter loadOptions
Optional configuration for the loading. It includes the following fields: - weightPathPrefix Optional, this specifies the path prefix for weight files, by default this is calculated from the path param. - fetchFunc Optional, custom
fetch
function. E.g., in Node.js, thefetch
from node-fetch can be used here. - onProgress Optional, progress callback function, fired periodically before the load is completed.Returns
An instance of
IOHandler
.{ heading: 'Models', subheading: 'Loading', namespace: 'io', ignoreCI: true }
function isHTTPScheme
isHTTPScheme: (url: string) => boolean;
function listModels
listModels: () => Promise<{ [url: string]: ModelArtifactsInfo }>;
List all models stored in registered storage mediums.
For a web browser environment, the registered mediums are Local Storage and IndexedDB.
// First create and save a model.const model = tf.sequential();model.add(tf.layers.dense({units: 1, inputShape: [10], activation: 'sigmoid'}));await model.save('localstorage://demo/management/model1');// Then list existing models.console.log(JSON.stringify(await tf.io.listModels()));// Delete the model.await tf.io.removeModel('localstorage://demo/management/model1');// List models again.console.log(JSON.stringify(await tf.io.listModels()));Returns
A
Promise
of a dictionary mapping URLs of existing models to their model artifacts info. URLs include medium-specific schemes, e.g., 'indexeddb://my/model/1'. Model artifacts info include type of the model's topology, byte sizes of the topology, weights, etc.{ heading: 'Models', subheading: 'Management', namespace: 'io', ignoreCI: true }
function loadWeights
loadWeights: ( manifest: WeightsManifestConfig, filePathPrefix?: string, weightNames?: string[], requestInit?: RequestInit) => Promise<NamedTensorMap>;
Reads a weights manifest JSON configuration, fetches the weights and returns them as
Tensor
s.Parameter manifest
The weights manifest JSON.
Parameter filePathPrefix
The path prefix for filenames given in the manifest. Defaults to the empty string.
Parameter weightNames
The names of the weights to be fetched.
function moveModel
moveModel: (sourceURL: string, destURL: string) => Promise<ModelArtifactsInfo>;
Move a model from one URL to another.
This function supports:
1. Moving within a storage medium, e.g.,
tf.io.moveModel('localstorage://model-1', 'localstorage://model-2')
2. Moving between two storage mediums, e.g.,tf.io.moveModel('localstorage://model-1', 'indexeddb://model-1')
// First create and save a model.const model = tf.sequential();model.add(tf.layers.dense({units: 1, inputShape: [10], activation: 'sigmoid'}));await model.save('localstorage://demo/management/model1');// Then list existing models.console.log(JSON.stringify(await tf.io.listModels()));// Move the model, from Local Storage to IndexedDB.await tf.io.moveModel('localstorage://demo/management/model1','indexeddb://demo/management/model1');// List models again.console.log(JSON.stringify(await tf.io.listModels()));// Remove the moved model.await tf.io.removeModel('indexeddb://demo/management/model1');Parameter sourceURL
Source URL of moving.
Parameter destURL
Destination URL of moving.
Returns
ModelArtifactsInfo of the copied model (if and only if copying is successful).
Throws
Error if moving fails, e.g., if no model exists at
sourceURL
, or ifoldPath
andnewPath
are identical.{ heading: 'Models', subheading: 'Management', namespace: 'io', ignoreCI: true }
function registerLoadRouter
registerLoadRouter: (loudRouter: IORouter) => void;
function registerSaveRouter
registerSaveRouter: (loudRouter: IORouter) => void;
function removeModel
removeModel: (url: string) => Promise<ModelArtifactsInfo>;
Remove a model specified by URL from a registered storage medium.
// First create and save a model.const model = tf.sequential();model.add(tf.layers.dense({units: 1, inputShape: [10], activation: 'sigmoid'}));await model.save('localstorage://demo/management/model1');// Then list existing models.console.log(JSON.stringify(await tf.io.listModels()));// Delete the model.await tf.io.removeModel('localstorage://demo/management/model1');// List models again.console.log(JSON.stringify(await tf.io.listModels()));Parameter url
A URL to a stored model, with a scheme prefix, e.g., 'localstorage://my-model-1', 'indexeddb://my/model/2'.
Returns
ModelArtifactsInfo of the deleted model (if and only if deletion is successful).
Throws
Error if deletion fails, e.g., if no model exists at
path
.{ heading: 'Models', subheading: 'Management', namespace: 'io', ignoreCI: true }
function weightsLoaderFactory
weightsLoaderFactory: ( fetchWeightsFunction: (fetchUrls: string[]) => Promise<ArrayBuffer[]>) => ( manifest: WeightsManifestConfig, filePathPrefix?: string, weightNames?: string[]) => Promise<NamedTensorMap>;
Creates a function, which reads a weights manifest JSON configuration, fetches the weight files using the specified function and returns them as
Tensor
s.// example for creating a nodejs weight loader, which reads the weight files// from disk using fs.readFileSyncimport * as fs from 'fs'const fetchWeightsFromDisk = (filePaths: string[]) =>filePaths.map(filePath => fs.readFileSync(filePath).buffer)const loadWeights = tf.io.weightsLoaderFactory(fetchWeightsFromDisk)const manifest = JSON.parse(fs.readFileSync('./my_model-weights_manifest').toString())const weightMap = await loadWeights(manifest, './')Parameter fetchWeightsFunction
The function used for fetching the weight files.
Returns
Weight loading function.
function withSaveHandler
withSaveHandler: ( saveHandler: (artifacts: ModelArtifacts) => Promise<SaveResult>) => IOHandler;
Creates an IOHandler that passes saved model artifacts to a callback.
function handleSave(artifacts) {// ... do something with the artifacts ...return {modelArtifactsInfo: {...}, ...};}const saveResult = model.save(tf.io.withSaveHandler(handleSave));Parameter saveHandler
A function that accepts a
ModelArtifacts
and returns a promise that resolves to aSaveResult
.
function withSaveHandlerSync
withSaveHandlerSync: ( saveHandler: (artifacts: ModelArtifacts) => SaveResult) => IOHandlerSync;
Creates an IOHandlerSync that passes saved model artifacts to a callback.
function handleSave(artifacts) {// ... do something with the artifacts ...return {modelArtifactsInfo: {...}, ...};}const saveResult = model.save(tf.io.withSaveHandler(handleSave));Parameter saveHandler
A function that accepts a
ModelArtifacts
and returns aSaveResult
.
class CompositeArrayBuffer
class CompositeArrayBuffer {}
Wraps a list of ArrayBuffers into a
slice()
-able object without allocating a large ArrayBuffer.Allocating large ArrayBuffers (~2GB) can be unstable on Chrome. TFJS loads its weights as a list of (usually) 4MB ArrayBuffers and then slices the weight tensors out of them. For small models, it's safe to concatenate all the weight buffers into a single ArrayBuffer and then slice the weight tensors out of it, but for large models, a different approach is needed.
constructor
constructor(buffers?: TypedArray | ArrayBuffer | ArrayBuffer[] | TypedArray[]);
property byteLength
readonly byteLength: number;
method join
static join: (buffers?: ArrayBuffer[] | ArrayBuffer) => ArrayBuffer;
Concatenate a number of ArrayBuffers into one.
Parameter buffers
An array of ArrayBuffers to concatenate, or a single ArrayBuffer.
Returns
Result of concatenating
buffers
in order.
method slice
slice: (start?: number, end?: number) => ArrayBuffer;
interface IOHandler
interface IOHandler {}
Interface for a model import/export handler.
The
save
andload
handlers are both optional, in order to allow handlers that support only saving or loading.
interface LoadOptions
interface LoadOptions {}
io
property fetchFunc
fetchFunc?: typeof fetch;
A function used to override the
window.fetch
function.
property fromTFHub
fromTFHub?: boolean;
Whether the module or model is to be loaded from TF Hub.
Setting this to
true
allows passing a TF-Hub module URL, omitting the standard model file name and the query parameters.Default:
false
.
property onProgress
onProgress?: OnProgressCallback;
Progress callback.
property requestInit
requestInit?: RequestInit;
RequestInit (options) for HTTP requests.
For detailed information on the supported fields, see [https://developer.mozilla.org/en-US/docs/Web/API/Request/Request]( https://developer.mozilla.org/en-US/docs/Web/API/Request/Request)
property streamWeights
streamWeights?: boolean;
Whether to stream the model directly to the backend or cache all its weights on CPU first. Useful for large models.
property strict
strict?: boolean;
Strict loading model: whether extraneous weights or missing weights should trigger an
Error
.If
true
, require that the provided weights exactly match those required by the layers.false
means that both extra weights and missing weights will be silently ignored.Default:
true
.
property weightPathPrefix
weightPathPrefix?: string;
Path prefix for weight files, by default this is calculated from the path of the model JSON file.
For instance, if the path to the model JSON file is
http://localhost/foo/model.json
, then the default path prefix will behttp://localhost/foo/
. If a weight file has the path valuegroup1-shard1of2
in the weight manifest, then the weight file will be loaded fromhttp://localhost/foo/group1-shard1of2
by default. However, if you provide aweightPathPrefix
value ofhttp://localhost/foo/alt-weights
, then the weight file will be loaded from the pathhttp://localhost/foo/alt-weights/group1-shard1of2
instead.
property weightUrlConverter
weightUrlConverter?: (weightFileName: string) => Promise<string>;
An async function to convert weight file name to URL. The weight file names are stored in model.json's weightsManifest.paths field. By default we consider weight files are colocated with the model.json file. For example: model.json URL: https://www.google.com/models/1/model.json group1-shard1of1.bin url: https://www.google.com/models/1/group1-shard1of1.bin
With this func you can convert the weight file name to any URL.
interface ModelArtifacts
interface ModelArtifacts {}
The serialized artifacts of a model, including topology and weights.
The
modelTopology
,trainingConfig
,weightSpecs
andweightData
fields of this interface are optional, in order to support topology- or weights-only saving and loading.Note this interface is used internally in IOHandlers. For the file format written to disk as
model.json
, seeModelJSON
.
property convertedBy
convertedBy?: string | null;
What library or tool is responsible for converting the original model to this format, applicable only if the model is output by a converter.
Used for debugging purposes. E.g., 'TensorFlow.js Converter v1.0.0'.
A value of
null
means the model artifacts are generated without any conversion process (e.g., saved directly from a TensorFlow.jstf.LayersModel
instance.)
property format
format?: string;
Hard-coded format name for models saved from TensorFlow.js or converted by TensorFlow.js Converter.
property generatedBy
generatedBy?: string;
What library is responsible for originally generating this artifact.
Used for debugging purposes. E.g., 'TensorFlow.js v1.0.0'.
property getWeightStream
getWeightStream?: () => ReadableStream<ArrayBuffer>;
Returns a stream of the weights. Some models are too large to fit in V8's memory heap, and
getWeightStream
loads their weights without storing them all in memory at the same time.
property initializerSignature
initializerSignature?: {};
Inputs and outputs signature for model initializer.
property modelInitializer
modelInitializer?: {};
Initializer for the model.
property modelTopology
modelTopology?: {} | ArrayBuffer;
Model topology.
For Keras-style
tf.Model
s, this is a JSON object. For TensorFlow-style models (e.g.,SavedModel
), this is the JSON encoding of theGraphDef
protocol buffer.
property signature
signature?: {};
Inputs and outputs signature for saved model.
property trainingConfig
trainingConfig?: TrainingConfig;
Serialized configuration for the model's training.
property userDefinedMetadata
userDefinedMetadata?: { [key: string]: {};};
User-defined metadata about the model.
property weightData
weightData?: WeightData;
Binary buffer(s) for all weight values in the order specified by
weightSpecs
. This may be a single ArrayBuffer of all the weights concatenated together or an Array of ArrayBuffers containing the weights (weights may be sharded across multiple ArrayBuffers).
property weightSpecs
weightSpecs?: WeightsManifestEntry[];
Weight specifications.
This corresponds to the weightsData below.
interface ModelArtifactsInfo
interface ModelArtifactsInfo {}
property dateSaved
dateSaved: Date;
Timestamp for when the model is saved.
property modelTopologyBytes
modelTopologyBytes?: number;
Size of model topology (Keras JSON or GraphDef), in bytes.
property modelTopologyType
modelTopologyType: 'JSON' | 'GraphDef';
TODO (cais,yassogba) consider removing GraphDef as GraphDefs now come in a JSON format and none of our IOHandlers support a non json format. We could conder replacing this with 'Binary' if we want to allow future handlers to save to non json formats (though they will probably want more information than 'Binary'). Type of the model topology
Type of the model topology
Possible values: - JSON: JSON config (human-readable, e.g., Keras JSON). - GraphDef: TensorFlow [GraphDef](https://www.tensorflow.org/extend/tool_developers/#graphdef) protocol buffer (binary).
property weightDataBytes
weightDataBytes?: number;
Size of weight value data, in bytes.
property weightSpecsBytes
weightSpecsBytes?: number;
Size of weight specification or manifest, in bytes.
interface ModelJSON
interface ModelJSON {}
The on-disk format of the
model.json
file.TF.js 1.0 always populates the optional fields when writing model.json. Prior versions did not provide those fields.
property convertedBy
convertedBy?: string | null;
What library or tool is responsible for converting the original model to this format, applicable only if the model is output by a converter.
Used for debugging purposes. E.g., 'TensorFlow.js Converter v1.0.0'.
A value of
null
means the model artifacts are generated without any conversion process (e.g., saved directly from a TensorFlow.jstf.LayersModel
instance.)
property format
format?: string;
Hard-coded format name for models saved from TensorFlow.js or converted by TensorFlow.js Converter.
property generatedBy
generatedBy?: string;
What library is responsible for originally generating this artifact.
Used for debugging purposes. E.g., 'TensorFlow.js v1.0.0'.
property initializerSignature
initializerSignature?: {};
Inputs and outputs signature for model initializer.
property modelInitializer
modelInitializer?: {};
Initializer for the model.
property modelTopology
modelTopology: {};
Model topology.
For Keras-style
tf.Model
s, this is a JSON object. For TensorFlow-style models (e.g.,SavedModel
), this is the JSON encoding of theGraphDef
protocol buffer.
property signature
signature?: {};
Inputs and outputs signature for saved model.
property trainingConfig
trainingConfig?: TrainingConfig;
Model training configuration.
property userDefinedMetadata
userDefinedMetadata?: { [key: string]: {};};
User-defined metadata about the model.
property weightsManifest
weightsManifest: WeightsManifestConfig;
Weights manifest.
The weights manifest consists of an ordered list of weight-manifest groups. Each weight-manifest group consists of a number of weight values stored in a number of paths. See the documentation of
WeightsManifestConfig
for more details.
interface ModelStoreManager
interface ModelStoreManager {}
An interface for the manager of a model store.
A model store is defined as a storage medium on which multiple models can be stored. Each stored model has a unique
path
as its identifier. AModelStoreManager
for the store allows actions including- Listing the models stored in the store. - Deleting a model from the store.
method listModels
listModels: () => Promise<{ [path: string]: ModelArtifactsInfo }>;
List all models in the model store.
Returns
A dictionary mapping paths of existing models to their model artifacts info. Model artifacts info include type of the model's topology, byte sizes of the topology, weights, etc.
method removeModel
removeModel: (path: string) => Promise<ModelArtifactsInfo>;
Remove a model specified by
path
.Parameter path
Returns
ModelArtifactsInfo of the deleted model (if and only if deletion is successful).
Throws
Error if deletion fails, e.g., if no model exists at
path
.
interface RequestDetails
interface RequestDetails {}
Additional options for Platform.fetch
property isBinary
isBinary?: boolean;
Is this request for a binary file (as opposed to a json file)
interface SaveConfig
interface SaveConfig {}
Options for saving a model. io
property includeOptimizer
includeOptimizer?: boolean;
Whether the optimizer will be saved (if exists).
Default:
false
.
property trainableOnly
trainableOnly?: boolean;
Whether to save only the trainable weights of the model, ignoring the non-trainable ones.
interface SaveResult
interface SaveResult {}
Result of a saving operation.
property errors
errors?: Array<{} | string>;
Error messages and related data (if any).
property modelArtifactsInfo
modelArtifactsInfo: ModelArtifactsInfo;
Information about the model artifacts saved.
property responses
responses?: Response[];
HTTP responses from the server that handled the model-saving request (if any). This is applicable only to server-based saving routes.
interface TrainingConfig
interface TrainingConfig {}
Model training configuration.
property loss
loss: | string | string[] | { [key: string]: string; };
Loss function(s) for the model's output(s).
property loss_weights
loss_weights?: | number[] | { [key: string]: number; };
property metrics
metrics?: | string[] | { [key: string]: string; };
Metric function(s) for the model's output(s).
property optimizer_config
optimizer_config: {};
Optimizer used for the model training.
property sample_weight_mode
sample_weight_mode?: string;
property weighted_metrics
weighted_metrics?: string[];
interface WeightsManifestEntry
interface WeightsManifestEntry {}
An entry in the weight manifest.
The entry contains specification of a weight.
property dtype
dtype: 'float32' | 'int32' | 'bool' | 'string' | 'complex64';
Data type of the weight.
property group
group?: WeightGroup;
Type of the weight.
Optional.
The value 'optimizer' indicates the weight belongs to an optimizer (i.e., used only during model training and not during inference).
property name
name: string;
Name of the weight, e.g., 'Dense_1/bias'
property quantization
quantization?: { scale?: number; min?: number; dtype: 'uint16' | 'uint8' | 'float16';};
Information for dequantization of the weight.
property shape
shape: number[];
Shape of the weight.
type IOHandlerSync
type IOHandlerSync = { save?: SaveHandlerSync; load?: LoadHandlerSync;};
Interface for a synchronous model import/export handler.
The
save
andload
handlers are both optional, in order to allow handlers that support only saving or loading.
type LoadHandler
type LoadHandler = () => Promise<ModelArtifacts>;
Type definition for handlers of loading operations.
type OnProgressCallback
type OnProgressCallback = (fraction: number) => void;
Callback for the progress of a long-running action such as an HTTP request for a large binary object.
fraction
should be a number in the [0, 1] interval, indicating how much of the action has completed.
type SaveHandler
type SaveHandler = (modelArtifact: ModelArtifacts) => Promise<SaveResult>;
Type definition for handlers of saving operations.
type WeightData
type WeightData = ArrayBuffer | ArrayBuffer[];
type WeightGroup
type WeightGroup = 'model' | 'optimizer';
Group to which the weight belongs.
- 'optimizer': Weight from a stateful optimizer.
type WeightsManifestConfig
type WeightsManifestConfig = WeightsManifestGroupConfig[];
A weight manifest.
The weight manifest consists of an ordered list of weight-manifest groups. Each weight-manifest group ("group" for short hereafter) consists of a number of weight values stored in a number of paths. See the documentation of
WeightManifestGroupConfig
below for more details.
namespace kernel_impls
module 'dist/backends/kernel_impls.d.ts' {}
Copyright 2020 Google LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================
function nonMaxSuppressionV3Impl
nonMaxSuppressionV3Impl: ( boxes: TypedArray, scores: TypedArray, maxOutputSize: number, iouThreshold: number, scoreThreshold: number) => NonMaxSuppressionResult;
function nonMaxSuppressionV4Impl
nonMaxSuppressionV4Impl: ( boxes: TypedArray, scores: TypedArray, maxOutputSize: number, iouThreshold: number, scoreThreshold: number, padToMaxOutputSize: boolean) => NonMaxSuppressionResult;
function nonMaxSuppressionV5Impl
nonMaxSuppressionV5Impl: ( boxes: TypedArray, scores: TypedArray, maxOutputSize: number, iouThreshold: number, scoreThreshold: number, softNmsSigma: number) => NonMaxSuppressionResult;
function whereImpl
whereImpl: (condShape: number[], condVals: TypedArray) => Tensor2D;
namespace math
module 'dist/math.d.ts' {}
Copyright 2018 Google LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================
variable confusionMatrix
const confusionMatrix: ( labels: TensorLike | Tensor1D, predictions: TensorLike | Tensor1D, numClasses: number) => Tensor2D;
namespace scatter_util
module 'dist/ops/scatter_nd_util.d.ts' {}
Copyright 2018 Google LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================
function calculateShapes
calculateShapes: ( updates: TensorInfo, indices: TensorInfo, shape: number[]) => ScatterShapeInfo;
Calculate the shape information for the output.
Parameter update
The tensor contains the update values.
Parameter indices
The tensor contains the indices for the update values.
Parameter shape
The shape of the output tensor.
Returns
ScatterShapeInfo
function validateInput
validateInput: (updates: Tensor, indices: Tensor, shape: number[]) => void;
Validate scatter nd inputs.
Parameter update
The tensor contains the update values.
Parameter indices
The tensor contains the indices for the update values.
Parameter shape
The shape of the output tensor.
function validateUpdateShape
validateUpdateShape: (shape: number[], indices: Tensor, updates: Tensor) => void;
Check whether updates.shape = indices.shape[:batchDim] + shape[sliceDim:]
Parameter x
The input tensor.
interface ScatterShapeInfo
interface ScatterShapeInfo {}
property numUpdates
numUpdates: number;
property outputSize
outputSize: number;
property sliceRank
sliceRank: number;
property sliceSize
sliceSize: number;
property strides
strides: number[];
namespace serialization
module 'dist/serialization.d.ts' {}
Copyright 2018 Google LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================
function getRegisteredName
getRegisteredName: <T extends Serializable>( cls: SerializableConstructor<T>) => string;
Get the registered name of a class. If the class has not been registered, return the class name.
Parameter cls
The class we want to get register name for. It must have a public static member called
className
defined.Returns
registered name or class name.
function registerClass
registerClass: <T extends Serializable>( cls: SerializableConstructor<T>, pkg?: string, name?: string) => SerializableConstructor<T>;
Register a class with the serialization map of TensorFlow.js.
This is often used for registering custom Layers, so they can be serialized and deserialized.
Example 1. Register the class without package name and specified name.
class MyCustomLayer extends tf.layers.Layer {static className = 'MyCustomLayer';constructor(config) {super(config);}}tf.serialization.registerClass(MyCustomLayer);console.log(tf.serialization.GLOBALCUSTOMOBJECT.get("Custom>MyCustomLayer"));console.log(tf.serialization.GLOBALCUSTOMNAMES.get(MyCustomLayer));Example 2. Register the class with package name: "Package" and specified name: "MyLayer".
class MyCustomLayer extends tf.layers.Layer {static className = 'MyCustomLayer';constructor(config) {super(config);}}tf.serialization.registerClass(MyCustomLayer, "Package", "MyLayer");console.log(tf.serialization.GLOBALCUSTOMOBJECT.get("Package>MyLayer"));console.log(tf.serialization.GLOBALCUSTOMNAMES.get(MyCustomLayer));Example 3. Register the class with specified name: "MyLayer".
class MyCustomLayer extends tf.layers.Layer {static className = 'MyCustomLayer';constructor(config) {super(config);}}tf.serialization.registerClass(MyCustomLayer, undefined, "MyLayer");console.log(tf.serialization.GLOBALCUSTOMOBJECT.get("Custom>MyLayer"));console.log(tf.serialization.GLOBALCUSTOMNAMES.get(MyCustomLayer));Example 4. Register the class with specified package name: "Package".
class MyCustomLayer extends tf.layers.Layer {static className = 'MyCustomLayer';constructor(config) {super(config);}}tf.serialization.registerClass(MyCustomLayer, "Package");console.log(tf.serialization.GLOBALCUSTOMOBJECT.get("Package>MyCustomLayer"));console.log(tf.serialization.GLOBALCUSTOMNAMES.get(MyCustomLayer));Parameter cls
The class to be registered. It must have a public static member called
className
defined and the value must be a non-empty string.Parameter pkg
The package name that this class belongs to. This used to define the key in GlobalCustomObject. If not defined, it defaults to
Custom
.Parameter name
The name that user specified. It defaults to the actual name of the class as specified by its static
className
property. {heading: 'Models', subheading: 'Serialization', ignoreCI: true}
class Serializable
abstract class Serializable {}
Serializable defines the serialization contract.
TFJS requires serializable classes to return their className when asked to avoid issues with minification.
method fromConfig
static fromConfig: <T extends Serializable>( cls: SerializableConstructor<T>, config: ConfigDict) => T;
method getClassName
getClassName: () => string;
Return the class name for this class to use in serialization contexts.
Generally speaking this will be the same thing that constructor.name would have returned. However, the class name needs to be robust against minification for serialization/deserialization to work properly.
There's also places such as initializers.VarianceScaling, where implementation details between different languages led to different class hierarchies and a non-leaf node is used for serialization purposes.
method getConfig
abstract getConfig: () => ConfigDict;
Return all the non-weight state needed to serialize this object.
class SerializationMap
class SerializationMap {}
Maps string keys to class constructors.
Used during (de)serialization from the cross-language JSON format, which requires the class name in the serialization format matches the class names as used in Python, should it exist.
property classNameMap
classNameMap: { [className: string]: [ SerializableConstructor<Serializable>, FromConfigMethod<Serializable> ];};
method getMap
static getMap: () => SerializationMap;
Returns the singleton instance of the map.
method register
static register: <T extends Serializable>( cls: SerializableConstructor<T>) => void;
Registers the class as serializable.
interface ConfigDict
interface ConfigDict {}
index signature
[key: string]: ConfigDictValue;
interface ConfigDictArray
interface ConfigDictArray extends Array<ConfigDictValue> {}
type ConfigDictValue
type ConfigDictValue = | boolean | number | string | null | ConfigDictArray | ConfigDict;
Types to support JSON-esque data structures internally.
Internally ConfigDict's use camelCase keys and values where the values are class names to be instantiated. On the python side, these will be snake_case. Internally we allow Enums into the values for better type safety, but these need to be converted to raw primitives (usually strings) for round-tripping with python.
toConfig returns the TS-friendly representation. model.toJSON() returns the pythonic version as that's the portable format. If you need to python-ify a non-model level toConfig output, you'll need to use a convertTsToPythonic from serialization_utils in -Layers.
type FromConfigMethod
type FromConfigMethod<T extends Serializable> = ( cls: SerializableConstructor<T>, config: ConfigDict) => T;
type SerializableConstructor
type SerializableConstructor<T extends Serializable> = { new (...args: any[]): T; className: string; fromConfig: FromConfigMethod<T>;};
Type to represent the class-type of Serializable objects.
Ie the class prototype with access to the constructor and any static members/methods. Instance methods are not listed here.
Source for this idea: https://stackoverflow.com/a/43607255
namespace slice_util
module 'dist/ops/slice_util.d.ts' {}
Copyright 2021 Google LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================
function assertParamsValid
assertParamsValid: (input: TensorInfo, begin: number[], size: number[]) => void;
function computeFlatOffset
computeFlatOffset: (begin: number[], strides: number[]) => number;
function computeOutShape
computeOutShape: (begin: number[], end: number[], strides: number[]) => number[];
Computes the output shape given the strided slice params.
function getNormalizedAxes
getNormalizedAxes: ( inputShape: number[], ellipsisAxes: number[], numInterpolatedAxes: number, begin: number[], end: number[], strides: number[], beginMask: number, endMask: number, ellipsisMask: number) => { begin: number[]; end: number[]; strides: number[] };
function isSliceContinous
isSliceContinous: (shape: number[], begin: number[], size: number[]) => boolean;
Returns true if the slice occupies a continous set of elements in the 'flat' space.
function maskToAxes
maskToAxes: (mask: number) => number[];
Converts a binary mask to an array of axes. Used in stridedSlice().
function parseSliceParams
parseSliceParams: ( x: TensorInfo, begin: number | number[], size?: number | number[]) => number[][];
function sliceInfo
sliceInfo: ( xShape: number[], begin: number[], end: number[], strides: number[], beginMask: number, endMask: number, ellipsisMask: number, newAxisMask: number, shrinkAxisMask: number) => SliceInfo;
function startForAxis
startForAxis: ( beginMask: number, startIndices: number[], strides: number[], inputShape: number[], axis: number, ellipsisMask: number) => number;
function startIndicesWithElidedDims
startIndicesWithElidedDims: ( beginMask: number, ellipsisInsertionIndex: number, numElidedAxes: number, originalBegin: number[], inputShape: number[]) => number[];
function stopForAxis
stopForAxis: ( endMask: number, stopIndices: number[], strides: number[], inputShape: number[], axis: number, ellipsisMask: number) => number;
function stopIndicesWithElidedDims
stopIndicesWithElidedDims: ( endMask: number, ellipsisInsertionIndex: number, numElidedAxes: number, originalEnd: number[], inputShape: number[]) => number[];
function stridesForAxis
stridesForAxis: ( strides: number[], axis: number, ellipsisMask: number) => number;
function stridesWithElidedDims
stridesWithElidedDims: ( strides: number[], ellipsisInsertionIndex: number, numElidedAxes: number, inputShape: number[]) => number[];
type SliceInfo
type SliceInfo = { finalShapeSparse: number[]; finalShape: number[]; isIdentity: boolean; sliceDim0: boolean; isSimpleSlice: boolean; begin: number[]; end: number[]; strides: number[];};
namespace Tensor
namespace Tensor {}
namespace tensor_util
module 'dist/tensor_util.d.ts' {}
Copyright 2018 Google LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================
function assertTypesMatch
assertTypesMatch: (a: Tensor, b: Tensor) => void;
function getTensorsInContainer
getTensorsInContainer: (result: TensorContainer) => Tensor[];
Extracts any
Tensor
s found within the provided object.Parameter container
an object that may be a
Tensor
or may directly containTensor
s, such as aTensor[]
or{key: Tensor, ...}
. In general it is safe to pass any object here, except thatPromise
s are not supported.Returns
An array of
Tensors
found within the passed object. If the argument is simply aTensor', a list containing that
Tensor` is returned. If the object is not aTensor
or does not containTensors
, an empty list is returned.
function isTensorInList
isTensorInList: (tensor: Tensor, tensorList: Tensor[]) => boolean;
function makeTypesMatch
makeTypesMatch: <T extends Tensor<Rank>>(a: T, b: T) => [T, T];
namespace test_util
module 'dist/test_util.d.ts' {}
Copyright 2017 Google LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================
variable TEST_EPSILON_FLOAT16
const TEST_EPSILON_FLOAT16: number;
function createVideoElement
createVideoElement: (source: HTMLSourceElement) => Promise<HTMLVideoElement>;
Creates an HTMLVideoElement with autoplay-friendly default settings.
function encodeStrings
encodeStrings: (a: RecursiveArray<{}>) => RecursiveArray<Uint8Array>;
Encodes strings into utf-8 bytes.
function expectArrayBuffersEqual
expectArrayBuffersEqual: (actual: ArrayBuffer, expected: ArrayBuffer) => void;
function expectArraysClose
expectArraysClose: ( actual: TypedArray | number | RecursiveArray<number>, expected: TypedArray | number | RecursiveArray<number>, epsilon?: number) => void;
function expectArraysEqual
expectArraysEqual: (actual: TensorLike, expected: TensorLike) => void;
function expectNumbersClose
expectNumbersClose: (a: number, e: number, epsilon?: number) => void;
function expectPromiseToFail
expectPromiseToFail: (fn: () => Promise<{}>, done: DoneFn) => void;
function expectValuesInRange
expectValuesInRange: ( actual: TypedArray | number[], low: number, high: number) => void;
function play
play: (video: HTMLVideoElement) => Promise<void>;
function testEpsilon
testEpsilon: () => 0.001 | 0.1;
interface DoneFn
interface DoneFn {}
property fail
fail: (message?: Error | string) => void;
call signature
(): void;
namespace util
module 'dist/util.d.ts' {}
Copyright 2017 Google LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================
function arraysEqual
arraysEqual: (n1: FlatVector, n2: FlatVector) => boolean;
function arraysEqualWithNull
arraysEqualWithNull: (n1: number[], n2: number[]) => boolean;
function assert
assert: (expr: boolean, msg: () => string) => void;
Asserts that the expression is true. Otherwise throws an error with the provided message.
const x = 2;tf.util.assert(x === 2, 'x is not 2');Parameter expr
The expression to assert (as a boolean).
Parameter msg
A function that returns the message to report when throwing an error. We use a function for performance reasons.
{heading: 'Util', namespace: 'util'}
function assertNonNegativeIntegerDimensions
assertNonNegativeIntegerDimensions: (shape: number[]) => void;
function assertNonNull
assertNonNull: (a: TensorLike) => void;
function assertShapesMatch
assertShapesMatch: ( shapeA: number[], shapeB: number[], errorMessagePrefix?: string) => void;
function bytesFromStringArray
bytesFromStringArray: (arr: Uint8Array[]) => number;
Returns the approximate number of bytes allocated in the string array - 2 bytes per character. Computing the exact bytes for a native string in JS is not possible since it depends on the encoding of the html page that serves the website.
function bytesPerElement
bytesPerElement: (dtype: DataType) => number;
function checkConversionForErrors
checkConversionForErrors: <D extends keyof DataTypeMap>( vals: DataTypeMap[D] | number[], dtype: D) => void;
function clamp
clamp: (min: number, x: number, max: number) => number;
Clamps a value to a specified range.
function computeStrides
computeStrides: (shape: number[]) => number[];
function convertBackendValuesAndArrayBuffer
convertBackendValuesAndArrayBuffer: ( data: BackendValues | ArrayBuffer, dtype: DataType) => Float32Array | Int32Array | Uint8Array | Uint8Array[];
function createScalarValue
createScalarValue: (value: DataType, dtype: DataType) => BackendValues;
Create typed array for scalar value. Used for storing in
DataStorage
.
function createShuffledIndices
createShuffledIndices: (n: number) => Uint32Array;
Creates a new array with randomized indices to a given quantity.
const randomTen = tf.util.createShuffledIndices(10);console.log(randomTen);Parameter number
Quantity of how many shuffled indices to create.
{heading: 'Util', namespace: 'util'}
function decodeString
decodeString: (bytes: Uint8Array, encoding?: string) => string;
Decodes the provided bytes into a string using the provided encoding scheme.
Parameter bytes
The bytes to decode.
Parameter encoding
The encoding scheme. Defaults to utf-8.
{heading: 'Util'}
function distSquared
distSquared: (a: FlatVector, b: FlatVector) => number;
Returns the squared Euclidean distance between two vectors.
function encodeString
encodeString: (s: string, encoding?: string) => Uint8Array;
Encodes the provided string into bytes using the provided encoding scheme.
Parameter s
The string to encode.
Parameter encoding
The encoding scheme. Defaults to utf-8.
{heading: 'Util'}
function fetch
fetch: (path: string, requestInits?: RequestInit) => Promise<Response>;
Returns a platform-specific implementation of [
fetch
](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API).If
fetch
is defined on the global object (window
,process
, etc.),tf.util.fetch
returns that function.If not,
tf.util.fetch
returns a platform-specific solution.const resource = await tf.util.fetch('https://cdn.jsdelivr.net/npm/@tensorflow/tfjs');// handle response{heading: 'Util'}
function fingerPrint64
fingerPrint64: (s: Uint8Array, len?: number) => Long;
function flatten
flatten: <T extends string | number | boolean | TypedArray | Promise<number>>( arr: T | RecursiveArray<T>, result?: T[], skipTypedArray?: boolean) => T[];
Flattens an arbitrarily nested array.
const a = [[1, 2], [3, 4], [5, [6, [7]]]];const flat = tf.util.flatten(a);console.log(flat);Parameter arr
The nested array to flatten.
Parameter result
The destination array which holds the elements.
Parameter skipTypedArray
If true, avoids flattening the typed arrays. Defaults to false.
{heading: 'Util', namespace: 'util'}
function getArrayFromDType
getArrayFromDType: <D extends keyof DataTypeMap>( dtype: D, size: number) => DataTypeMap[D];
function getTypedArrayFromDType
getTypedArrayFromDType: <D extends NumericDataType>( dtype: D, size: number) => DataTypeMap[D];
function hasEncodingLoss
hasEncodingLoss: (oldType: DataType, newType: DataType) => boolean;
Returns true if the new type can't encode the old type without loss of precision.
function hexToLong
hexToLong: (hex: string) => Long;
function indexToLoc
indexToLoc: (index: number, rank: number, strides: number[]) => number[];
Computes the location (multidimensional index) in a tensor/multidimentional array for a given flat index.
Parameter index
Index in flat array.
Parameter rank
Rank of tensor.
Parameter strides
Strides of tensor.
function inferDtype
inferDtype: (values: TensorLike | WebGLData | WebGPUData) => DataType;
function inferFromImplicitShape
inferFromImplicitShape: (shape: number[], size: number) => number[];
Given the full size of the array and a shape that may contain -1 as the implicit dimension, returns the inferred shape where -1 is replaced. E.g. For shape=[2, -1, 3] and size=24, it will return [2, 4, 3].
Parameter shape
The shape, which may contain -1 in some dimension.
Parameter size
The full size (number of elements) of the array. The inferred shape where -1 is replaced with the inferred size.
function isBoolean
isBoolean: (value: {}) => boolean;
function isFunction
isFunction: (f: Function) => boolean;
function isInt
isInt: (a: number) => boolean;
function isNumber
isNumber: (value: {}) => boolean;
function isPromise
isPromise: (object: any) => object is Promise<unknown>;
This method asserts whether an object is a Promise instance.
Parameter object
function isScalarShape
isScalarShape: (shape: number[]) => boolean;
function isString
isString: (value: {}) => value is string;
Returns true if the value is a string.
function isTypedArray
isTypedArray: (a: {}) => a is | Uint8Array | Float32Array | Int32Array | Uint8ClampedArray;
function isValidDtype
isValidDtype: (dtype: DataType) => boolean;
Returns true if the dtype is valid.
function locToIndex
locToIndex: (locs: number[], rank: number, strides: number[]) => number;
Computes flat index for a given location (multidimentionsal index) in a Tensor/multidimensional array.
Parameter locs
Location in the tensor.
Parameter rank
Rank of the tensor.
Parameter strides
Tensor strides.
function makeOnesTypedArray
makeOnesTypedArray: <D extends keyof DataTypeMap>( size: number, dtype: D) => DataTypeMap[D];
function makeZerosNestedTypedArray
makeZerosNestedTypedArray: <D extends keyof DataTypeMap>( shape: number[], dtype: D) => number | any[];
Make nested
TypedArray
filled with zeros.Parameter shape
The shape information for the nested array.
Parameter dtype
dtype of the array element.
function makeZerosTypedArray
makeZerosTypedArray: <D extends keyof DataTypeMap>( size: number, dtype: D) => DataTypeMap[D];
function nearestDivisor
nearestDivisor: (size: number, start: number) => number;
function nearestLargerEven
nearestLargerEven: (val: number) => number;
function now
now: () => number;
Returns the current high-resolution time in milliseconds relative to an arbitrary time in the past. It works across different platforms (node.js, browsers).
console.log(tf.util.now());{heading: 'Util', namespace: 'util'}
function parseAxisParam
parseAxisParam: (axis: number | number[], shape: number[]) => number[];
function randUniform
randUniform: (a: number, b: number) => number;
Returns a sample from a uniform [a, b) distribution.
Parameter a
The minimum support (inclusive).
Parameter b
The maximum support (exclusive). A pseudorandom number on the half-open interval [a,b).
function repeatedTry
repeatedTry: ( checkFn: () => boolean, delayFn?: (counter: number) => number, maxCounter?: number, scheduleFn?: (functionRef: Function, delay: number) => void) => Promise<void>;
function rightPad
rightPad: (a: string, size: number) => string;
function shuffle
shuffle: (array: any[] | Uint32Array | Int32Array | Float32Array) => void;
Shuffles the array in-place using Fisher-Yates algorithm.
const a = [1, 2, 3, 4, 5];tf.util.shuffle(a);console.log(a);Parameter array
The array to shuffle in-place.
{heading: 'Util', namespace: 'util'}
function shuffleCombo
shuffleCombo: ( array: any[] | Uint32Array | Int32Array | Float32Array, array2: any[] | Uint32Array | Int32Array | Float32Array) => void;
Shuffles two arrays in-place the same way using Fisher-Yates algorithm.
const a = [1,2,3,4,5];const b = [11,22,33,44,55];tf.util.shuffleCombo(a, b);console.log(a, b);Parameter array
The first array to shuffle in-place.
Parameter array2
The second array to shuffle in-place with the same permutation as the first array.
{heading: 'Util', namespace: 'util'}
function sizeFromShape
sizeFromShape: (shape: number[]) => number;
Returns the size (number of elements) of the tensor given its shape.
const shape = [3, 4, 2];const size = tf.util.sizeFromShape(shape);console.log(size);{heading: 'Util', namespace: 'util'}
function sizeToSquarishShape
sizeToSquarishShape: (size: number) => [number, number];
function squeezeShape
squeezeShape: ( shape: number[], axis?: number[]) => { newShape: number[]; keptDims: number[] };
Reduces the shape by removing all dimensions of shape 1.
function sum
sum: (arr: number[]) => number;
function swap
swap: <T>(object: { [index: number]: T }, left: number, right: number) => void;
function tanh
tanh: (x: number) => number;
function toNestedArray
toNestedArray: ( shape: number[], a: TypedArray, isComplex?: boolean) => number | any[];
function toTypedArray
toTypedArray: (a: TensorLike, dtype: DataType) => TypedArray;
Package Files (423)
- dist/backends/backend.d.ts
- dist/backends/backend_util.d.ts
- dist/backends/complex_util.d.ts
- dist/backends/einsum_util.d.ts
- dist/backends/kernel_impls.d.ts
- dist/backends/non_max_suppression_impl.d.ts
- dist/backends/where_impl.d.ts
- dist/browser_util.d.ts
- dist/device_util.d.ts
- dist/engine.d.ts
- dist/environment.d.ts
- dist/globals.d.ts
- dist/gradients.d.ts
- dist/hash_util.d.ts
- dist/index.d.ts
- dist/io/browser_files.d.ts
- dist/io/composite_array_buffer.d.ts
- dist/io/http.d.ts
- dist/io/io.d.ts
- dist/io/io_utils.d.ts
- dist/io/model_management.d.ts
- dist/io/passthrough.d.ts
- dist/io/router_registry.d.ts
- dist/io/types.d.ts
- dist/io/weights_loader.d.ts
- dist/kernel_names.d.ts
- dist/kernel_registry.d.ts
- dist/log.d.ts
- dist/math.d.ts
- dist/model_types.d.ts
- dist/ops/abs.d.ts
- dist/ops/acos.d.ts
- dist/ops/acosh.d.ts
- dist/ops/add.d.ts
- dist/ops/add_n.d.ts
- dist/ops/all.d.ts
- dist/ops/any.d.ts
- dist/ops/arg_max.d.ts
- dist/ops/arg_min.d.ts
- dist/ops/array_ops_util.d.ts
- dist/ops/asin.d.ts
- dist/ops/asinh.d.ts
- dist/ops/atan.d.ts
- dist/ops/atan2.d.ts
- dist/ops/atanh.d.ts
- dist/ops/avg_pool.d.ts
- dist/ops/avg_pool_3d.d.ts
- dist/ops/axis_util.d.ts
- dist/ops/basic_lstm_cell.d.ts
- dist/ops/batch_to_space_nd.d.ts
- dist/ops/batchnorm.d.ts
- dist/ops/batchnorm2d.d.ts
- dist/ops/batchnorm3d.d.ts
- dist/ops/batchnorm4d.d.ts
- dist/ops/bincount.d.ts
- dist/ops/bitwise_and.d.ts
- dist/ops/boolean_mask.d.ts
- dist/ops/broadcast_args.d.ts
- dist/ops/broadcast_to.d.ts
- dist/ops/broadcast_util.d.ts
- dist/ops/browser.d.ts
- dist/ops/buffer.d.ts
- dist/ops/cast.d.ts
- dist/ops/ceil.d.ts
- dist/ops/clip_by_value.d.ts
- dist/ops/clone.d.ts
- dist/ops/complex.d.ts
- dist/ops/concat.d.ts
- dist/ops/concat_1d.d.ts
- dist/ops/concat_2d.d.ts
- dist/ops/concat_3d.d.ts
- dist/ops/concat_4d.d.ts
- dist/ops/concat_util.d.ts
- dist/ops/confusion_matrix.d.ts
- dist/ops/conv1d.d.ts
- dist/ops/conv2d.d.ts
- dist/ops/conv2d_transpose.d.ts
- dist/ops/conv3d.d.ts
- dist/ops/conv3d_transpose.d.ts
- dist/ops/conv_util.d.ts
- dist/ops/cos.d.ts
- dist/ops/cosh.d.ts
- dist/ops/cumprod.d.ts
- dist/ops/cumsum.d.ts
- dist/ops/dense_bincount.d.ts
- dist/ops/depth_to_space.d.ts
- dist/ops/depthwise_conv2d.d.ts
- dist/ops/diag.d.ts
- dist/ops/dilation2d.d.ts
- dist/ops/div.d.ts
- dist/ops/div_no_nan.d.ts
- dist/ops/dot.d.ts
- dist/ops/dropout.d.ts
- dist/ops/einsum.d.ts
- dist/ops/elu.d.ts
- dist/ops/ensure_shape.d.ts
- dist/ops/equal.d.ts
- dist/ops/erf.d.ts
- dist/ops/erf_util.d.ts
- dist/ops/euclidean_norm.d.ts
- dist/ops/exp.d.ts
- dist/ops/expand_dims.d.ts
- dist/ops/expm1.d.ts
- dist/ops/eye.d.ts
- dist/ops/fill.d.ts
- dist/ops/floor.d.ts
- dist/ops/floorDiv.d.ts
- dist/ops/fused/conv2d.d.ts
- dist/ops/fused/depthwise_conv2d.d.ts
- dist/ops/fused/mat_mul.d.ts
- dist/ops/fused_ops.d.ts
- dist/ops/fused_types.d.ts
- dist/ops/fused_util.d.ts
- dist/ops/gather.d.ts
- dist/ops/gather_nd.d.ts
- dist/ops/gather_nd_util.d.ts
- dist/ops/greater.d.ts
- dist/ops/greater_equal.d.ts
- dist/ops/imag.d.ts
- dist/ops/in_top_k.d.ts
- dist/ops/is_finite.d.ts
- dist/ops/is_inf.d.ts
- dist/ops/is_nan.d.ts
- dist/ops/leaky_relu.d.ts
- dist/ops/less.d.ts
- dist/ops/less_equal.d.ts
- dist/ops/linspace.d.ts
- dist/ops/local_response_normalization.d.ts
- dist/ops/log.d.ts
- dist/ops/log1p.d.ts
- dist/ops/log_sigmoid.d.ts
- dist/ops/log_softmax.d.ts
- dist/ops/log_sum_exp.d.ts
- dist/ops/logical_and.d.ts
- dist/ops/logical_not.d.ts
- dist/ops/logical_or.d.ts
- dist/ops/logical_xor.d.ts
- dist/ops/loss_ops_utils.d.ts
- dist/ops/lower_bound.d.ts
- dist/ops/mat_mul.d.ts
- dist/ops/max.d.ts
- dist/ops/max_pool.d.ts
- dist/ops/max_pool_3d.d.ts
- dist/ops/max_pool_with_argmax.d.ts
- dist/ops/maximum.d.ts
- dist/ops/mean.d.ts
- dist/ops/meshgrid.d.ts
- dist/ops/min.d.ts
- dist/ops/minimum.d.ts
- dist/ops/mirror_pad.d.ts
- dist/ops/mod.d.ts
- dist/ops/moments.d.ts
- dist/ops/moving_average.d.ts
- dist/ops/mul.d.ts
- dist/ops/multi_rnn_cell.d.ts
- dist/ops/multinomial.d.ts
- dist/ops/neg.d.ts
- dist/ops/norm.d.ts
- dist/ops/not_equal.d.ts
- dist/ops/one_hot.d.ts
- dist/ops/ones.d.ts
- dist/ops/ones_like.d.ts
- dist/ops/operation.d.ts
- dist/ops/ops.d.ts
- dist/ops/outer_product.d.ts
- dist/ops/pad.d.ts
- dist/ops/pad1d.d.ts
- dist/ops/pad2d.d.ts
- dist/ops/pad3d.d.ts
- dist/ops/pad4d.d.ts
- dist/ops/pool.d.ts
- dist/ops/pow.d.ts
- dist/ops/prelu.d.ts
- dist/ops/print.d.ts
- dist/ops/prod.d.ts
- dist/ops/ragged_gather.d.ts
- dist/ops/ragged_range.d.ts
- dist/ops/ragged_tensor_to_tensor.d.ts
- dist/ops/ragged_to_dense_util.d.ts
- dist/ops/rand.d.ts
- dist/ops/random_gamma.d.ts
- dist/ops/random_normal.d.ts
- dist/ops/random_standard_normal.d.ts
- dist/ops/random_uniform.d.ts
- dist/ops/random_uniform_int.d.ts
- dist/ops/range.d.ts
- dist/ops/real.d.ts
- dist/ops/reciprocal.d.ts
- dist/ops/reduce_util.d.ts
- dist/ops/relu.d.ts
- dist/ops/relu6.d.ts
- dist/ops/reshape.d.ts
- dist/ops/reverse.d.ts
- dist/ops/reverse_1d.d.ts
- dist/ops/reverse_2d.d.ts
- dist/ops/reverse_3d.d.ts
- dist/ops/reverse_4d.d.ts
- dist/ops/rotate_util.d.ts
- dist/ops/round.d.ts
- dist/ops/rsqrt.d.ts
- dist/ops/scalar.d.ts
- dist/ops/scatter_nd.d.ts
- dist/ops/scatter_nd_util.d.ts
- dist/ops/search_sorted.d.ts
- dist/ops/segment_util.d.ts
- dist/ops/selu.d.ts
- dist/ops/selu_util.d.ts
- dist/ops/separable_conv2d.d.ts
- dist/ops/setdiff1d_async.d.ts
- dist/ops/sigmoid.d.ts
- dist/ops/sign.d.ts
- dist/ops/signal_ops_util.d.ts
- dist/ops/sin.d.ts
- dist/ops/sinh.d.ts
- dist/ops/slice.d.ts
- dist/ops/slice1d.d.ts
- dist/ops/slice2d.d.ts
- dist/ops/slice3d.d.ts
- dist/ops/slice4d.d.ts
- dist/ops/slice_util.d.ts
- dist/ops/softmax.d.ts
- dist/ops/softplus.d.ts
- dist/ops/space_to_batch_nd.d.ts
- dist/ops/sparse/sparse_fill_empty_rows_util.d.ts
- dist/ops/sparse/sparse_reshape_util.d.ts
- dist/ops/sparse/sparse_segment_reduction_util.d.ts
- dist/ops/sparse_to_dense.d.ts
- dist/ops/spectral/fft.d.ts
- dist/ops/spectral/ifft.d.ts
- dist/ops/spectral/irfft.d.ts
- dist/ops/spectral/rfft.d.ts
- dist/ops/split.d.ts
- dist/ops/split_util.d.ts
- dist/ops/sqrt.d.ts
- dist/ops/square.d.ts
- dist/ops/squared_difference.d.ts
- dist/ops/squeeze.d.ts
- dist/ops/stack.d.ts
- dist/ops/step.d.ts
- dist/ops/strided_slice.d.ts
- dist/ops/sub.d.ts
- dist/ops/sum.d.ts
- dist/ops/tan.d.ts
- dist/ops/tanh.d.ts
- dist/ops/tensor.d.ts
- dist/ops/tensor1d.d.ts
- dist/ops/tensor2d.d.ts
- dist/ops/tensor3d.d.ts
- dist/ops/tensor4d.d.ts
- dist/ops/tensor5d.d.ts
- dist/ops/tensor6d.d.ts
- dist/ops/tensor_scatter_update.d.ts
- dist/ops/tile.d.ts
- dist/ops/topk.d.ts
- dist/ops/transpose.d.ts
- dist/ops/truncated_normal.d.ts
- dist/ops/unique.d.ts
- dist/ops/unsorted_segment_sum.d.ts
- dist/ops/unstack.d.ts
- dist/ops/upper_bound.d.ts
- dist/ops/variable.d.ts
- dist/ops/where.d.ts
- dist/ops/where_async.d.ts
- dist/ops/zeros.d.ts
- dist/ops/zeros_like.d.ts
- dist/optimizers/adadelta_optimizer.d.ts
- dist/optimizers/adagrad_optimizer.d.ts
- dist/optimizers/adam_optimizer.d.ts
- dist/optimizers/adamax_optimizer.d.ts
- dist/optimizers/momentum_optimizer.d.ts
- dist/optimizers/optimizer.d.ts
- dist/optimizers/optimizer_constructors.d.ts
- dist/optimizers/rmsprop_optimizer.d.ts
- dist/optimizers/sgd_optimizer.d.ts
- dist/platforms/platform.d.ts
- dist/public/chained_ops/abs.d.ts
- dist/public/chained_ops/acos.d.ts
- dist/public/chained_ops/acosh.d.ts
- dist/public/chained_ops/add.d.ts
- dist/public/chained_ops/all.d.ts
- dist/public/chained_ops/any.d.ts
- dist/public/chained_ops/arg_max.d.ts
- dist/public/chained_ops/arg_min.d.ts
- dist/public/chained_ops/as1d.d.ts
- dist/public/chained_ops/as2d.d.ts
- dist/public/chained_ops/as3d.d.ts
- dist/public/chained_ops/as4d.d.ts
- dist/public/chained_ops/as5d.d.ts
- dist/public/chained_ops/as_scalar.d.ts
- dist/public/chained_ops/as_type.d.ts
- dist/public/chained_ops/asin.d.ts
- dist/public/chained_ops/asinh.d.ts
- dist/public/chained_ops/atan.d.ts
- dist/public/chained_ops/atan2.d.ts
- dist/public/chained_ops/atanh.d.ts
- dist/public/chained_ops/avg_pool.d.ts
- dist/public/chained_ops/batch_to_space_nd.d.ts
- dist/public/chained_ops/batchnorm.d.ts
- dist/public/chained_ops/broadcast_to.d.ts
- dist/public/chained_ops/cast.d.ts
- dist/public/chained_ops/ceil.d.ts
- dist/public/chained_ops/clip_by_value.d.ts
- dist/public/chained_ops/concat.d.ts
- dist/public/chained_ops/conv1d.d.ts
- dist/public/chained_ops/conv2d.d.ts
- dist/public/chained_ops/conv2d_transpose.d.ts
- dist/public/chained_ops/cos.d.ts
- dist/public/chained_ops/cosh.d.ts
- dist/public/chained_ops/cumprod.d.ts
- dist/public/chained_ops/cumsum.d.ts
- dist/public/chained_ops/depth_to_space.d.ts
- dist/public/chained_ops/depthwise_conv2d.d.ts
- dist/public/chained_ops/dilation2d.d.ts
- dist/public/chained_ops/div.d.ts
- dist/public/chained_ops/div_no_nan.d.ts
- dist/public/chained_ops/dot.d.ts
- dist/public/chained_ops/elu.d.ts
- dist/public/chained_ops/equal.d.ts
- dist/public/chained_ops/erf.d.ts
- dist/public/chained_ops/euclidean_norm.d.ts
- dist/public/chained_ops/exp.d.ts
- dist/public/chained_ops/expand_dims.d.ts
- dist/public/chained_ops/expm1.d.ts
- dist/public/chained_ops/fft.d.ts
- dist/public/chained_ops/flatten.d.ts
- dist/public/chained_ops/floor.d.ts
- dist/public/chained_ops/floorDiv.d.ts
- dist/public/chained_ops/gather.d.ts
- dist/public/chained_ops/greater.d.ts
- dist/public/chained_ops/greater_equal.d.ts
- dist/public/chained_ops/ifft.d.ts
- dist/public/chained_ops/irfft.d.ts
- dist/public/chained_ops/is_finite.d.ts
- dist/public/chained_ops/is_inf.d.ts
- dist/public/chained_ops/is_nan.d.ts
- dist/public/chained_ops/leaky_relu.d.ts
- dist/public/chained_ops/less.d.ts
- dist/public/chained_ops/less_equal.d.ts
- dist/public/chained_ops/local_response_normalization.d.ts
- dist/public/chained_ops/log.d.ts
- dist/public/chained_ops/log1p.d.ts
- dist/public/chained_ops/log_sigmoid.d.ts
- dist/public/chained_ops/log_softmax.d.ts
- dist/public/chained_ops/log_sum_exp.d.ts
- dist/public/chained_ops/logical_and.d.ts
- dist/public/chained_ops/logical_not.d.ts
- dist/public/chained_ops/logical_or.d.ts
- dist/public/chained_ops/logical_xor.d.ts
- dist/public/chained_ops/mat_mul.d.ts
- dist/public/chained_ops/max.d.ts
- dist/public/chained_ops/max_pool.d.ts
- dist/public/chained_ops/maximum.d.ts
- dist/public/chained_ops/mean.d.ts
- dist/public/chained_ops/min.d.ts
- dist/public/chained_ops/minimum.d.ts
- dist/public/chained_ops/mirror_pad.d.ts
- dist/public/chained_ops/mod.d.ts
- dist/public/chained_ops/mul.d.ts
- dist/public/chained_ops/neg.d.ts
- dist/public/chained_ops/norm.d.ts
- dist/public/chained_ops/not_equal.d.ts
- dist/public/chained_ops/one_hot.d.ts
- dist/public/chained_ops/ones_like.d.ts
- dist/public/chained_ops/pad.d.ts
- dist/public/chained_ops/pool.d.ts
- dist/public/chained_ops/pow.d.ts
- dist/public/chained_ops/prelu.d.ts
- dist/public/chained_ops/prod.d.ts
- dist/public/chained_ops/reciprocal.d.ts
- dist/public/chained_ops/relu.d.ts
- dist/public/chained_ops/relu6.d.ts
- dist/public/chained_ops/reshape.d.ts
- dist/public/chained_ops/reshape_as.d.ts
- dist/public/chained_ops/resize_bilinear.d.ts
- dist/public/chained_ops/resize_nearest_neighbor.d.ts
- dist/public/chained_ops/reverse.d.ts
- dist/public/chained_ops/rfft.d.ts
- dist/public/chained_ops/round.d.ts
- dist/public/chained_ops/rsqrt.d.ts
- dist/public/chained_ops/selu.d.ts
- dist/public/chained_ops/separable_conv2d.d.ts
- dist/public/chained_ops/sigmoid.d.ts
- dist/public/chained_ops/sign.d.ts
- dist/public/chained_ops/sin.d.ts
- dist/public/chained_ops/sinh.d.ts
- dist/public/chained_ops/slice.d.ts
- dist/public/chained_ops/softmax.d.ts
- dist/public/chained_ops/softplus.d.ts
- dist/public/chained_ops/space_to_batch_nd.d.ts
- dist/public/chained_ops/split.d.ts
- dist/public/chained_ops/sqrt.d.ts
- dist/public/chained_ops/square.d.ts
- dist/public/chained_ops/squared_difference.d.ts
- dist/public/chained_ops/squeeze.d.ts
- dist/public/chained_ops/stack.d.ts
- dist/public/chained_ops/step.d.ts
- dist/public/chained_ops/strided_slice.d.ts
- dist/public/chained_ops/sub.d.ts
- dist/public/chained_ops/sum.d.ts
- dist/public/chained_ops/tan.d.ts
- dist/public/chained_ops/tanh.d.ts
- dist/public/chained_ops/tile.d.ts
- dist/public/chained_ops/to_bool.d.ts
- dist/public/chained_ops/to_float.d.ts
- dist/public/chained_ops/to_int.d.ts
- dist/public/chained_ops/topk.d.ts
- dist/public/chained_ops/transpose.d.ts
- dist/public/chained_ops/unique.d.ts
- dist/public/chained_ops/unsorted_segment_sum.d.ts
- dist/public/chained_ops/unstack.d.ts
- dist/public/chained_ops/where.d.ts
- dist/public/chained_ops/zeros_like.d.ts
- dist/serialization.d.ts
- dist/tensor.d.ts
- dist/tensor_info.d.ts
- dist/tensor_types.d.ts
- dist/tensor_util.d.ts
- dist/test_util.d.ts
- dist/train.d.ts
- dist/types.d.ts
- dist/util.d.ts
- dist/util_base.d.ts
- dist/version.d.ts
Dependencies (7)
Dev Dependencies (2)
Peer Dependencies (0)
No peer dependencies.
Badge
To add a badge like this oneto your package's README, use the codes available below.
You may also use Shields.io to create a custom badge linking to https://www.jsdocs.io/package/@tensorflow/tfjs-core
.
- Markdown[](https://www.jsdocs.io/package/@tensorflow/tfjs-core)
- HTML<a href="https://www.jsdocs.io/package/@tensorflow/tfjs-core"><img src="https://img.shields.io/badge/jsDocs.io-reference-blue" alt="jsDocs.io"></a>
- Updated .
Package analyzed in 18253 ms. - Missing or incorrect documentation? Open an issue for this package.